Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/logging/handlers.py: 19%

683 statements  

« prev     ^ index     » next       coverage.py v7.0.1, created at 2022-12-25 06:11 +0000

1# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved. 

2# 

3# Permission to use, copy, modify, and distribute this software and its 

4# documentation for any purpose and without fee is hereby granted, 

5# provided that the above copyright notice appear in all copies and that 

6# both that copyright notice and this permission notice appear in 

7# supporting documentation, and that the name of Vinay Sajip 

8# not be used in advertising or publicity pertaining to distribution 

9# of the software without specific, written prior permission. 

10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 

11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 

12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 

13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 

14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 

15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 

16 

17""" 

18Additional handlers for the logging package for Python. The core package is 

19based on PEP 282 and comments thereto in comp.lang.python. 

20 

21Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. 

22 

23To use, simply 'import logging.handlers' and log away! 

24""" 

25 

26import logging, socket, os, pickle, struct, time, re 

27from stat import ST_DEV, ST_INO, ST_MTIME 

28import queue 

29import threading 

30import copy 

31 

32# 

33# Some constants... 

34# 

35 

36DEFAULT_TCP_LOGGING_PORT = 9020 

37DEFAULT_UDP_LOGGING_PORT = 9021 

38DEFAULT_HTTP_LOGGING_PORT = 9022 

39DEFAULT_SOAP_LOGGING_PORT = 9023 

40SYSLOG_UDP_PORT = 514 

41SYSLOG_TCP_PORT = 514 

42 

43_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day 

44 

45class BaseRotatingHandler(logging.FileHandler): 

46 """ 

47 Base class for handlers that rotate log files at a certain point. 

48 Not meant to be instantiated directly. Instead, use RotatingFileHandler 

49 or TimedRotatingFileHandler. 

50 """ 

51 def __init__(self, filename, mode, encoding=None, delay=False): 

52 """ 

53 Use the specified filename for streamed logging 

54 """ 

55 logging.FileHandler.__init__(self, filename, mode, encoding, delay) 

56 self.mode = mode 

57 self.encoding = encoding 

58 self.namer = None 

59 self.rotator = None 

60 

61 def emit(self, record): 

62 """ 

63 Emit a record. 

64 

65 Output the record to the file, catering for rollover as described 

66 in doRollover(). 

67 """ 

68 try: 

69 if self.shouldRollover(record): 

70 self.doRollover() 

71 logging.FileHandler.emit(self, record) 

72 except Exception: 

73 self.handleError(record) 

74 

75 def rotation_filename(self, default_name): 

76 """ 

77 Modify the filename of a log file when rotating. 

78 

79 This is provided so that a custom filename can be provided. 

80 

81 The default implementation calls the 'namer' attribute of the 

82 handler, if it's callable, passing the default name to 

83 it. If the attribute isn't callable (the default is None), the name 

84 is returned unchanged. 

85 

86 :param default_name: The default name for the log file. 

87 """ 

88 if not callable(self.namer): 

89 result = default_name 

90 else: 

91 result = self.namer(default_name) 

92 return result 

93 

94 def rotate(self, source, dest): 

95 """ 

96 When rotating, rotate the current log. 

97 

98 The default implementation calls the 'rotator' attribute of the 

99 handler, if it's callable, passing the source and dest arguments to 

100 it. If the attribute isn't callable (the default is None), the source 

101 is simply renamed to the destination. 

102 

103 :param source: The source filename. This is normally the base 

104 filename, e.g. 'test.log' 

105 :param dest: The destination filename. This is normally 

106 what the source is rotated to, e.g. 'test.log.1'. 

107 """ 

108 if not callable(self.rotator): 

109 # Issue 18940: A file may not have been created if delay is True. 

110 if os.path.exists(source): 

111 os.rename(source, dest) 

112 else: 

113 self.rotator(source, dest) 

114 

115class RotatingFileHandler(BaseRotatingHandler): 

116 """ 

117 Handler for logging to a set of files, which switches from one file 

118 to the next when the current file reaches a certain size. 

119 """ 

120 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False): 

121 """ 

122 Open the specified file and use it as the stream for logging. 

123 

124 By default, the file grows indefinitely. You can specify particular 

125 values of maxBytes and backupCount to allow the file to rollover at 

126 a predetermined size. 

127 

128 Rollover occurs whenever the current log file is nearly maxBytes in 

129 length. If backupCount is >= 1, the system will successively create 

130 new files with the same pathname as the base file, but with extensions 

131 ".1", ".2" etc. appended to it. For example, with a backupCount of 5 

132 and a base file name of "app.log", you would get "app.log", 

133 "app.log.1", "app.log.2", ... through to "app.log.5". The file being 

134 written to is always "app.log" - when it gets filled up, it is closed 

135 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. 

136 exist, then they are renamed to "app.log.2", "app.log.3" etc. 

137 respectively. 

138 

139 If maxBytes is zero, rollover never occurs. 

140 """ 

141 # If rotation/rollover is wanted, it doesn't make sense to use another 

142 # mode. If for example 'w' were specified, then if there were multiple 

143 # runs of the calling application, the logs from previous runs would be 

144 # lost if the 'w' is respected, because the log file would be truncated 

145 # on each run. 

146 if maxBytes > 0: 

147 mode = 'a' 

148 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) 

149 self.maxBytes = maxBytes 

150 self.backupCount = backupCount 

151 

152 def doRollover(self): 

153 """ 

154 Do a rollover, as described in __init__(). 

155 """ 

156 if self.stream: 

157 self.stream.close() 

158 self.stream = None 

159 if self.backupCount > 0: 

160 for i in range(self.backupCount - 1, 0, -1): 

161 sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) 

162 dfn = self.rotation_filename("%s.%d" % (self.baseFilename, 

163 i + 1)) 

164 if os.path.exists(sfn): 

165 if os.path.exists(dfn): 

166 os.remove(dfn) 

167 os.rename(sfn, dfn) 

168 dfn = self.rotation_filename(self.baseFilename + ".1") 

169 if os.path.exists(dfn): 

170 os.remove(dfn) 

171 self.rotate(self.baseFilename, dfn) 

172 if not self.delay: 

173 self.stream = self._open() 

174 

175 def shouldRollover(self, record): 

176 """ 

177 Determine if rollover should occur. 

178 

179 Basically, see if the supplied record would cause the file to exceed 

180 the size limit we have. 

181 """ 

182 if self.stream is None: # delay was set... 

183 self.stream = self._open() 

184 if self.maxBytes > 0: # are we rolling over? 

185 msg = "%s\n" % self.format(record) 

186 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature 

187 if self.stream.tell() + len(msg) >= self.maxBytes: 

188 return 1 

189 return 0 

190 

191class TimedRotatingFileHandler(BaseRotatingHandler): 

192 """ 

193 Handler for logging to a file, rotating the log file at certain timed 

194 intervals. 

195 

196 If backupCount is > 0, when rollover is done, no more than backupCount 

197 files are kept - the oldest ones are deleted. 

198 """ 

199 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None): 

200 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) 

201 self.when = when.upper() 

202 self.backupCount = backupCount 

203 self.utc = utc 

204 self.atTime = atTime 

205 # Calculate the real rollover interval, which is just the number of 

206 # seconds between rollovers. Also set the filename suffix used when 

207 # a rollover occurs. Current 'when' events supported: 

208 # S - Seconds 

209 # M - Minutes 

210 # H - Hours 

211 # D - Days 

212 # midnight - roll over at midnight 

213 # W{0-6} - roll over on a certain day; 0 - Monday 

214 # 

215 # Case of the 'when' specifier is not important; lower or upper case 

216 # will work. 

217 if self.when == 'S': 

218 self.interval = 1 # one second 

219 self.suffix = "%Y-%m-%d_%H-%M-%S" 

220 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" 

221 elif self.when == 'M': 

222 self.interval = 60 # one minute 

223 self.suffix = "%Y-%m-%d_%H-%M" 

224 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" 

225 elif self.when == 'H': 

226 self.interval = 60 * 60 # one hour 

227 self.suffix = "%Y-%m-%d_%H" 

228 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" 

229 elif self.when == 'D' or self.when == 'MIDNIGHT': 

230 self.interval = 60 * 60 * 24 # one day 

231 self.suffix = "%Y-%m-%d" 

232 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" 

233 elif self.when.startswith('W'): 

234 self.interval = 60 * 60 * 24 * 7 # one week 

235 if len(self.when) != 2: 

236 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) 

237 if self.when[1] < '0' or self.when[1] > '6': 

238 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) 

239 self.dayOfWeek = int(self.when[1]) 

240 self.suffix = "%Y-%m-%d" 

241 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" 

242 else: 

243 raise ValueError("Invalid rollover interval specified: %s" % self.when) 

244 

245 self.extMatch = re.compile(self.extMatch, re.ASCII) 

246 self.interval = self.interval * interval # multiply by units requested 

247 # The following line added because the filename passed in could be a 

248 # path object (see Issue #27493), but self.baseFilename will be a string 

249 filename = self.baseFilename 

250 if os.path.exists(filename): 

251 t = os.stat(filename)[ST_MTIME] 

252 else: 

253 t = int(time.time()) 

254 self.rolloverAt = self.computeRollover(t) 

255 

256 def computeRollover(self, currentTime): 

257 """ 

258 Work out the rollover time based on the specified time. 

259 """ 

260 result = currentTime + self.interval 

261 # If we are rolling over at midnight or weekly, then the interval is already known. 

262 # What we need to figure out is WHEN the next interval is. In other words, 

263 # if you are rolling over at midnight, then your base interval is 1 day, 

264 # but you want to start that one day clock at midnight, not now. So, we 

265 # have to fudge the rolloverAt value in order to trigger the first rollover 

266 # at the right time. After that, the regular interval will take care of 

267 # the rest. Note that this code doesn't care about leap seconds. :) 

268 if self.when == 'MIDNIGHT' or self.when.startswith('W'): 

269 # This could be done with less code, but I wanted it to be clear 

270 if self.utc: 

271 t = time.gmtime(currentTime) 

272 else: 

273 t = time.localtime(currentTime) 

274 currentHour = t[3] 

275 currentMinute = t[4] 

276 currentSecond = t[5] 

277 currentDay = t[6] 

278 # r is the number of seconds left between now and the next rotation 

279 if self.atTime is None: 

280 rotate_ts = _MIDNIGHT 

281 else: 

282 rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 + 

283 self.atTime.second) 

284 

285 r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 + 

286 currentSecond) 

287 if r < 0: 

288 # Rotate time is before the current time (for example when 

289 # self.rotateAt is 13:45 and it now 14:15), rotation is 

290 # tomorrow. 

291 r += _MIDNIGHT 

292 currentDay = (currentDay + 1) % 7 

293 result = currentTime + r 

294 # If we are rolling over on a certain day, add in the number of days until 

295 # the next rollover, but offset by 1 since we just calculated the time 

296 # until the next day starts. There are three cases: 

297 # Case 1) The day to rollover is today; in this case, do nothing 

298 # Case 2) The day to rollover is further in the interval (i.e., today is 

299 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to 

300 # next rollover is simply 6 - 2 - 1, or 3. 

301 # Case 3) The day to rollover is behind us in the interval (i.e., today 

302 # is day 5 (Saturday) and rollover is on day 3 (Thursday). 

303 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the 

304 # number of days left in the current week (1) plus the number 

305 # of days in the next week until the rollover day (3). 

306 # The calculations described in 2) and 3) above need to have a day added. 

307 # This is because the above time calculation takes us to midnight on this 

308 # day, i.e. the start of the next day. 

309 if self.when.startswith('W'): 

310 day = currentDay # 0 is Monday 

311 if day != self.dayOfWeek: 

312 if day < self.dayOfWeek: 

313 daysToWait = self.dayOfWeek - day 

314 else: 

315 daysToWait = 6 - day + self.dayOfWeek + 1 

316 newRolloverAt = result + (daysToWait * (60 * 60 * 24)) 

317 if not self.utc: 

318 dstNow = t[-1] 

319 dstAtRollover = time.localtime(newRolloverAt)[-1] 

320 if dstNow != dstAtRollover: 

321 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 

322 addend = -3600 

323 else: # DST bows out before next rollover, so we need to add an hour 

324 addend = 3600 

325 newRolloverAt += addend 

326 result = newRolloverAt 

327 return result 

328 

329 def shouldRollover(self, record): 

330 """ 

331 Determine if rollover should occur. 

332 

333 record is not used, as we are just comparing times, but it is needed so 

334 the method signatures are the same 

335 """ 

336 t = int(time.time()) 

337 if t >= self.rolloverAt: 

338 return 1 

339 return 0 

340 

341 def getFilesToDelete(self): 

342 """ 

343 Determine the files to delete when rolling over. 

344 

345 More specific than the earlier method, which just used glob.glob(). 

346 """ 

347 dirName, baseName = os.path.split(self.baseFilename) 

348 fileNames = os.listdir(dirName) 

349 result = [] 

350 prefix = baseName + "." 

351 plen = len(prefix) 

352 for fileName in fileNames: 

353 if fileName[:plen] == prefix: 

354 suffix = fileName[plen:] 

355 if self.extMatch.match(suffix): 

356 result.append(os.path.join(dirName, fileName)) 

357 if len(result) < self.backupCount: 

358 result = [] 

359 else: 

360 result.sort() 

361 result = result[:len(result) - self.backupCount] 

362 return result 

363 

364 def doRollover(self): 

365 """ 

366 do a rollover; in this case, a date/time stamp is appended to the filename 

367 when the rollover happens. However, you want the file to be named for the 

368 start of the interval, not the current time. If there is a backup count, 

369 then we have to get a list of matching filenames, sort them and remove 

370 the one with the oldest suffix. 

371 """ 

372 if self.stream: 

373 self.stream.close() 

374 self.stream = None 

375 # get the time that this sequence started at and make it a TimeTuple 

376 currentTime = int(time.time()) 

377 dstNow = time.localtime(currentTime)[-1] 

378 t = self.rolloverAt - self.interval 

379 if self.utc: 

380 timeTuple = time.gmtime(t) 

381 else: 

382 timeTuple = time.localtime(t) 

383 dstThen = timeTuple[-1] 

384 if dstNow != dstThen: 

385 if dstNow: 

386 addend = 3600 

387 else: 

388 addend = -3600 

389 timeTuple = time.localtime(t + addend) 

390 dfn = self.rotation_filename(self.baseFilename + "." + 

391 time.strftime(self.suffix, timeTuple)) 

392 if os.path.exists(dfn): 

393 os.remove(dfn) 

394 self.rotate(self.baseFilename, dfn) 

395 if self.backupCount > 0: 

396 for s in self.getFilesToDelete(): 

397 os.remove(s) 

398 if not self.delay: 

399 self.stream = self._open() 

400 newRolloverAt = self.computeRollover(currentTime) 

401 while newRolloverAt <= currentTime: 

402 newRolloverAt = newRolloverAt + self.interval 

403 #If DST changes and midnight or weekly rollover, adjust for this. 

404 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: 

405 dstAtRollover = time.localtime(newRolloverAt)[-1] 

406 if dstNow != dstAtRollover: 

407 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 

408 addend = -3600 

409 else: # DST bows out before next rollover, so we need to add an hour 

410 addend = 3600 

411 newRolloverAt += addend 

412 self.rolloverAt = newRolloverAt 

413 

414class WatchedFileHandler(logging.FileHandler): 

415 """ 

416 A handler for logging to a file, which watches the file 

417 to see if it has changed while in use. This can happen because of 

418 usage of programs such as newsyslog and logrotate which perform 

419 log file rotation. This handler, intended for use under Unix, 

420 watches the file to see if it has changed since the last emit. 

421 (A file has changed if its device or inode have changed.) 

422 If it has changed, the old file stream is closed, and the file 

423 opened to get a new stream. 

424 

425 This handler is not appropriate for use under Windows, because 

426 under Windows open files cannot be moved or renamed - logging 

427 opens the files with exclusive locks - and so there is no need 

428 for such a handler. Furthermore, ST_INO is not supported under 

429 Windows; stat always returns zero for this value. 

430 

431 This handler is based on a suggestion and patch by Chad J. 

432 Schroeder. 

433 """ 

434 def __init__(self, filename, mode='a', encoding=None, delay=False): 

435 logging.FileHandler.__init__(self, filename, mode, encoding, delay) 

436 self.dev, self.ino = -1, -1 

437 self._statstream() 

438 

439 def _statstream(self): 

440 if self.stream: 

441 sres = os.fstat(self.stream.fileno()) 

442 self.dev, self.ino = sres[ST_DEV], sres[ST_INO] 

443 

444 def reopenIfNeeded(self): 

445 """ 

446 Reopen log file if needed. 

447 

448 Checks if the underlying file has changed, and if it 

449 has, close the old stream and reopen the file to get the 

450 current stream. 

451 """ 

452 # Reduce the chance of race conditions by stat'ing by path only 

453 # once and then fstat'ing our new fd if we opened a new log stream. 

454 # See issue #14632: Thanks to John Mulligan for the problem report 

455 # and patch. 

456 try: 

457 # stat the file by path, checking for existence 

458 sres = os.stat(self.baseFilename) 

459 except FileNotFoundError: 

460 sres = None 

461 # compare file system stat with that of our stream file handle 

462 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: 

463 if self.stream is not None: 

464 # we have an open file handle, clean it up 

465 self.stream.flush() 

466 self.stream.close() 

467 self.stream = None # See Issue #21742: _open () might fail. 

468 # open a new file handle and get new stat info from that fd 

469 self.stream = self._open() 

470 self._statstream() 

471 

472 def emit(self, record): 

473 """ 

474 Emit a record. 

475 

476 If underlying file has changed, reopen the file before emitting the 

477 record to it. 

478 """ 

479 self.reopenIfNeeded() 

480 logging.FileHandler.emit(self, record) 

481 

482 

483class SocketHandler(logging.Handler): 

484 """ 

485 A handler class which writes logging records, in pickle format, to 

486 a streaming socket. The socket is kept open across logging calls. 

487 If the peer resets it, an attempt is made to reconnect on the next call. 

488 The pickle which is sent is that of the LogRecord's attribute dictionary 

489 (__dict__), so that the receiver does not need to have the logging module 

490 installed in order to process the logging event. 

491 

492 To unpickle the record at the receiving end into a LogRecord, use the 

493 makeLogRecord function. 

494 """ 

495 

496 def __init__(self, host, port): 

497 """ 

498 Initializes the handler with a specific host address and port. 

499 

500 When the attribute *closeOnError* is set to True - if a socket error 

501 occurs, the socket is silently closed and then reopened on the next 

502 logging call. 

503 """ 

504 logging.Handler.__init__(self) 

505 self.host = host 

506 self.port = port 

507 if port is None: 

508 self.address = host 

509 else: 

510 self.address = (host, port) 

511 self.sock = None 

512 self.closeOnError = False 

513 self.retryTime = None 

514 # 

515 # Exponential backoff parameters. 

516 # 

517 self.retryStart = 1.0 

518 self.retryMax = 30.0 

519 self.retryFactor = 2.0 

520 

521 def makeSocket(self, timeout=1): 

522 """ 

523 A factory method which allows subclasses to define the precise 

524 type of socket they want. 

525 """ 

526 if self.port is not None: 

527 result = socket.create_connection(self.address, timeout=timeout) 

528 else: 

529 result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 

530 result.settimeout(timeout) 

531 try: 

532 result.connect(self.address) 

533 except OSError: 

534 result.close() # Issue 19182 

535 raise 

536 return result 

537 

538 def createSocket(self): 

539 """ 

540 Try to create a socket, using an exponential backoff with 

541 a max retry time. Thanks to Robert Olson for the original patch 

542 (SF #815911) which has been slightly refactored. 

543 """ 

544 now = time.time() 

545 # Either retryTime is None, in which case this 

546 # is the first time back after a disconnect, or 

547 # we've waited long enough. 

548 if self.retryTime is None: 

549 attempt = True 

550 else: 

551 attempt = (now >= self.retryTime) 

552 if attempt: 

553 try: 

554 self.sock = self.makeSocket() 

555 self.retryTime = None # next time, no delay before trying 

556 except OSError: 

557 #Creation failed, so set the retry time and return. 

558 if self.retryTime is None: 

559 self.retryPeriod = self.retryStart 

560 else: 

561 self.retryPeriod = self.retryPeriod * self.retryFactor 

562 if self.retryPeriod > self.retryMax: 

563 self.retryPeriod = self.retryMax 

564 self.retryTime = now + self.retryPeriod 

565 

566 def send(self, s): 

567 """ 

568 Send a pickled string to the socket. 

569 

570 This function allows for partial sends which can happen when the 

571 network is busy. 

572 """ 

573 if self.sock is None: 

574 self.createSocket() 

575 #self.sock can be None either because we haven't reached the retry 

576 #time yet, or because we have reached the retry time and retried, 

577 #but are still unable to connect. 

578 if self.sock: 

579 try: 

580 self.sock.sendall(s) 

581 except OSError: #pragma: no cover 

582 self.sock.close() 

583 self.sock = None # so we can call createSocket next time 

584 

585 def makePickle(self, record): 

586 """ 

587 Pickles the record in binary format with a length prefix, and 

588 returns it ready for transmission across the socket. 

589 """ 

590 ei = record.exc_info 

591 if ei: 

592 # just to get traceback text into record.exc_text ... 

593 dummy = self.format(record) 

594 # See issue #14436: If msg or args are objects, they may not be 

595 # available on the receiving end. So we convert the msg % args 

596 # to a string, save it as msg and zap the args. 

597 d = dict(record.__dict__) 

598 d['msg'] = record.getMessage() 

599 d['args'] = None 

600 d['exc_info'] = None 

601 # Issue #25685: delete 'message' if present: redundant with 'msg' 

602 d.pop('message', None) 

603 s = pickle.dumps(d, 1) 

604 slen = struct.pack(">L", len(s)) 

605 return slen + s 

606 

607 def handleError(self, record): 

608 """ 

609 Handle an error during logging. 

610 

611 An error has occurred during logging. Most likely cause - 

612 connection lost. Close the socket so that we can retry on the 

613 next event. 

614 """ 

615 if self.closeOnError and self.sock: 

616 self.sock.close() 

617 self.sock = None #try to reconnect next time 

618 else: 

619 logging.Handler.handleError(self, record) 

620 

621 def emit(self, record): 

622 """ 

623 Emit a record. 

624 

625 Pickles the record and writes it to the socket in binary format. 

626 If there is an error with the socket, silently drop the packet. 

627 If there was a problem with the socket, re-establishes the 

628 socket. 

629 """ 

630 try: 

631 s = self.makePickle(record) 

632 self.send(s) 

633 except Exception: 

634 self.handleError(record) 

635 

636 def close(self): 

637 """ 

638 Closes the socket. 

639 """ 

640 self.acquire() 

641 try: 

642 sock = self.sock 

643 if sock: 

644 self.sock = None 

645 sock.close() 

646 logging.Handler.close(self) 

647 finally: 

648 self.release() 

649 

650class DatagramHandler(SocketHandler): 

651 """ 

652 A handler class which writes logging records, in pickle format, to 

653 a datagram socket. The pickle which is sent is that of the LogRecord's 

654 attribute dictionary (__dict__), so that the receiver does not need to 

655 have the logging module installed in order to process the logging event. 

656 

657 To unpickle the record at the receiving end into a LogRecord, use the 

658 makeLogRecord function. 

659 

660 """ 

661 def __init__(self, host, port): 

662 """ 

663 Initializes the handler with a specific host address and port. 

664 """ 

665 SocketHandler.__init__(self, host, port) 

666 self.closeOnError = False 

667 

668 def makeSocket(self): 

669 """ 

670 The factory method of SocketHandler is here overridden to create 

671 a UDP socket (SOCK_DGRAM). 

672 """ 

673 if self.port is None: 

674 family = socket.AF_UNIX 

675 else: 

676 family = socket.AF_INET 

677 s = socket.socket(family, socket.SOCK_DGRAM) 

678 return s 

679 

680 def send(self, s): 

681 """ 

682 Send a pickled string to a socket. 

683 

684 This function no longer allows for partial sends which can happen 

685 when the network is busy - UDP does not guarantee delivery and 

686 can deliver packets out of sequence. 

687 """ 

688 if self.sock is None: 

689 self.createSocket() 

690 self.sock.sendto(s, self.address) 

691 

692class SysLogHandler(logging.Handler): 

693 """ 

694 A handler class which sends formatted logging records to a syslog 

695 server. Based on Sam Rushing's syslog module: 

696 http://www.nightmare.com/squirl/python-ext/misc/syslog.py 

697 Contributed by Nicolas Untz (after which minor refactoring changes 

698 have been made). 

699 """ 

700 

701 # from <linux/sys/syslog.h>: 

702 # ====================================================================== 

703 # priorities/facilities are encoded into a single 32-bit quantity, where 

704 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the 

705 # facility (0-big number). Both the priorities and the facilities map 

706 # roughly one-to-one to strings in the syslogd(8) source code. This 

707 # mapping is included in this file. 

708 # 

709 # priorities (these are ordered) 

710 

711 LOG_EMERG = 0 # system is unusable 

712 LOG_ALERT = 1 # action must be taken immediately 

713 LOG_CRIT = 2 # critical conditions 

714 LOG_ERR = 3 # error conditions 

715 LOG_WARNING = 4 # warning conditions 

716 LOG_NOTICE = 5 # normal but significant condition 

717 LOG_INFO = 6 # informational 

718 LOG_DEBUG = 7 # debug-level messages 

719 

720 # facility codes 

721 LOG_KERN = 0 # kernel messages 

722 LOG_USER = 1 # random user-level messages 

723 LOG_MAIL = 2 # mail system 

724 LOG_DAEMON = 3 # system daemons 

725 LOG_AUTH = 4 # security/authorization messages 

726 LOG_SYSLOG = 5 # messages generated internally by syslogd 

727 LOG_LPR = 6 # line printer subsystem 

728 LOG_NEWS = 7 # network news subsystem 

729 LOG_UUCP = 8 # UUCP subsystem 

730 LOG_CRON = 9 # clock daemon 

731 LOG_AUTHPRIV = 10 # security/authorization messages (private) 

732 LOG_FTP = 11 # FTP daemon 

733 

734 # other codes through 15 reserved for system use 

735 LOG_LOCAL0 = 16 # reserved for local use 

736 LOG_LOCAL1 = 17 # reserved for local use 

737 LOG_LOCAL2 = 18 # reserved for local use 

738 LOG_LOCAL3 = 19 # reserved for local use 

739 LOG_LOCAL4 = 20 # reserved for local use 

740 LOG_LOCAL5 = 21 # reserved for local use 

741 LOG_LOCAL6 = 22 # reserved for local use 

742 LOG_LOCAL7 = 23 # reserved for local use 

743 

744 priority_names = { 

745 "alert": LOG_ALERT, 

746 "crit": LOG_CRIT, 

747 "critical": LOG_CRIT, 

748 "debug": LOG_DEBUG, 

749 "emerg": LOG_EMERG, 

750 "err": LOG_ERR, 

751 "error": LOG_ERR, # DEPRECATED 

752 "info": LOG_INFO, 

753 "notice": LOG_NOTICE, 

754 "panic": LOG_EMERG, # DEPRECATED 

755 "warn": LOG_WARNING, # DEPRECATED 

756 "warning": LOG_WARNING, 

757 } 

758 

759 facility_names = { 

760 "auth": LOG_AUTH, 

761 "authpriv": LOG_AUTHPRIV, 

762 "cron": LOG_CRON, 

763 "daemon": LOG_DAEMON, 

764 "ftp": LOG_FTP, 

765 "kern": LOG_KERN, 

766 "lpr": LOG_LPR, 

767 "mail": LOG_MAIL, 

768 "news": LOG_NEWS, 

769 "security": LOG_AUTH, # DEPRECATED 

770 "syslog": LOG_SYSLOG, 

771 "user": LOG_USER, 

772 "uucp": LOG_UUCP, 

773 "local0": LOG_LOCAL0, 

774 "local1": LOG_LOCAL1, 

775 "local2": LOG_LOCAL2, 

776 "local3": LOG_LOCAL3, 

777 "local4": LOG_LOCAL4, 

778 "local5": LOG_LOCAL5, 

779 "local6": LOG_LOCAL6, 

780 "local7": LOG_LOCAL7, 

781 } 

782 

783 #The map below appears to be trivially lowercasing the key. However, 

784 #there's more to it than meets the eye - in some locales, lowercasing 

785 #gives unexpected results. See SF #1524081: in the Turkish locale, 

786 #"INFO".lower() != "info" 

787 priority_map = { 

788 "DEBUG" : "debug", 

789 "INFO" : "info", 

790 "WARNING" : "warning", 

791 "ERROR" : "error", 

792 "CRITICAL" : "critical" 

793 } 

794 

795 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), 

796 facility=LOG_USER, socktype=None): 

797 """ 

798 Initialize a handler. 

799 

800 If address is specified as a string, a UNIX socket is used. To log to a 

801 local syslogd, "SysLogHandler(address="/dev/log")" can be used. 

802 If facility is not specified, LOG_USER is used. If socktype is 

803 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific 

804 socket type will be used. For Unix sockets, you can also specify a 

805 socktype of None, in which case socket.SOCK_DGRAM will be used, falling 

806 back to socket.SOCK_STREAM. 

807 """ 

808 logging.Handler.__init__(self) 

809 

810 self.address = address 

811 self.facility = facility 

812 self.socktype = socktype 

813 

814 if isinstance(address, str): 

815 self.unixsocket = True 

816 # Syslog server may be unavailable during handler initialisation. 

817 # C's openlog() function also ignores connection errors. 

818 # Moreover, we ignore these errors while logging, so it not worse 

819 # to ignore it also here. 

820 try: 

821 self._connect_unixsocket(address) 

822 except OSError: 

823 pass 

824 else: 

825 self.unixsocket = False 

826 if socktype is None: 

827 socktype = socket.SOCK_DGRAM 

828 host, port = address 

829 ress = socket.getaddrinfo(host, port, 0, socktype) 

830 if not ress: 

831 raise OSError("getaddrinfo returns an empty list") 

832 for res in ress: 

833 af, socktype, proto, _, sa = res 

834 err = sock = None 

835 try: 

836 sock = socket.socket(af, socktype, proto) 

837 if socktype == socket.SOCK_STREAM: 

838 sock.connect(sa) 

839 break 

840 except OSError as exc: 

841 err = exc 

842 if sock is not None: 

843 sock.close() 

844 if err is not None: 

845 raise err 

846 self.socket = sock 

847 self.socktype = socktype 

848 

849 def _connect_unixsocket(self, address): 

850 use_socktype = self.socktype 

851 if use_socktype is None: 

852 use_socktype = socket.SOCK_DGRAM 

853 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 

854 try: 

855 self.socket.connect(address) 

856 # it worked, so set self.socktype to the used type 

857 self.socktype = use_socktype 

858 except OSError: 

859 self.socket.close() 

860 if self.socktype is not None: 

861 # user didn't specify falling back, so fail 

862 raise 

863 use_socktype = socket.SOCK_STREAM 

864 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 

865 try: 

866 self.socket.connect(address) 

867 # it worked, so set self.socktype to the used type 

868 self.socktype = use_socktype 

869 except OSError: 

870 self.socket.close() 

871 raise 

872 

873 def encodePriority(self, facility, priority): 

874 """ 

875 Encode the facility and priority. You can pass in strings or 

876 integers - if strings are passed, the facility_names and 

877 priority_names mapping dictionaries are used to convert them to 

878 integers. 

879 """ 

880 if isinstance(facility, str): 

881 facility = self.facility_names[facility] 

882 if isinstance(priority, str): 

883 priority = self.priority_names[priority] 

884 return (facility << 3) | priority 

885 

886 def close(self): 

887 """ 

888 Closes the socket. 

889 """ 

890 self.acquire() 

891 try: 

892 self.socket.close() 

893 logging.Handler.close(self) 

894 finally: 

895 self.release() 

896 

897 def mapPriority(self, levelName): 

898 """ 

899 Map a logging level name to a key in the priority_names map. 

900 This is useful in two scenarios: when custom levels are being 

901 used, and in the case where you can't do a straightforward 

902 mapping by lowercasing the logging level name because of locale- 

903 specific issues (see SF #1524081). 

904 """ 

905 return self.priority_map.get(levelName, "warning") 

906 

907 ident = '' # prepended to all messages 

908 append_nul = True # some old syslog daemons expect a NUL terminator 

909 

910 def emit(self, record): 

911 """ 

912 Emit a record. 

913 

914 The record is formatted, and then sent to the syslog server. If 

915 exception information is present, it is NOT sent to the server. 

916 """ 

917 try: 

918 msg = self.format(record) 

919 if self.ident: 

920 msg = self.ident + msg 

921 if self.append_nul: 

922 msg += '\000' 

923 

924 # We need to convert record level to lowercase, maybe this will 

925 # change in the future. 

926 prio = '<%d>' % self.encodePriority(self.facility, 

927 self.mapPriority(record.levelname)) 

928 prio = prio.encode('utf-8') 

929 # Message is a string. Convert to bytes as required by RFC 5424 

930 msg = msg.encode('utf-8') 

931 msg = prio + msg 

932 if self.unixsocket: 

933 try: 

934 self.socket.send(msg) 

935 except OSError: 

936 self.socket.close() 

937 self._connect_unixsocket(self.address) 

938 self.socket.send(msg) 

939 elif self.socktype == socket.SOCK_DGRAM: 

940 self.socket.sendto(msg, self.address) 

941 else: 

942 self.socket.sendall(msg) 

943 except Exception: 

944 self.handleError(record) 

945 

946class SMTPHandler(logging.Handler): 

947 """ 

948 A handler class which sends an SMTP email for each logging event. 

949 """ 

950 def __init__(self, mailhost, fromaddr, toaddrs, subject, 

951 credentials=None, secure=None, timeout=5.0): 

952 """ 

953 Initialize the handler. 

954 

955 Initialize the instance with the from and to addresses and subject 

956 line of the email. To specify a non-standard SMTP port, use the 

957 (host, port) tuple format for the mailhost argument. To specify 

958 authentication credentials, supply a (username, password) tuple 

959 for the credentials argument. To specify the use of a secure 

960 protocol (TLS), pass in a tuple for the secure argument. This will 

961 only be used when authentication credentials are supplied. The tuple 

962 will be either an empty tuple, or a single-value tuple with the name 

963 of a keyfile, or a 2-value tuple with the names of the keyfile and 

964 certificate file. (This tuple is passed to the `starttls` method). 

965 A timeout in seconds can be specified for the SMTP connection (the 

966 default is one second). 

967 """ 

968 logging.Handler.__init__(self) 

969 if isinstance(mailhost, (list, tuple)): 

970 self.mailhost, self.mailport = mailhost 

971 else: 

972 self.mailhost, self.mailport = mailhost, None 

973 if isinstance(credentials, (list, tuple)): 

974 self.username, self.password = credentials 

975 else: 

976 self.username = None 

977 self.fromaddr = fromaddr 

978 if isinstance(toaddrs, str): 

979 toaddrs = [toaddrs] 

980 self.toaddrs = toaddrs 

981 self.subject = subject 

982 self.secure = secure 

983 self.timeout = timeout 

984 

985 def getSubject(self, record): 

986 """ 

987 Determine the subject for the email. 

988 

989 If you want to specify a subject line which is record-dependent, 

990 override this method. 

991 """ 

992 return self.subject 

993 

994 def emit(self, record): 

995 """ 

996 Emit a record. 

997 

998 Format the record and send it to the specified addressees. 

999 """ 

1000 try: 

1001 import smtplib 

1002 from email.message import EmailMessage 

1003 import email.utils 

1004 

1005 port = self.mailport 

1006 if not port: 

1007 port = smtplib.SMTP_PORT 

1008 smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) 

1009 msg = EmailMessage() 

1010 msg['From'] = self.fromaddr 

1011 msg['To'] = ','.join(self.toaddrs) 

1012 msg['Subject'] = self.getSubject(record) 

1013 msg['Date'] = email.utils.localtime() 

1014 msg.set_content(self.format(record)) 

1015 if self.username: 

1016 if self.secure is not None: 

1017 smtp.ehlo() 

1018 smtp.starttls(*self.secure) 

1019 smtp.ehlo() 

1020 smtp.login(self.username, self.password) 

1021 smtp.send_message(msg) 

1022 smtp.quit() 

1023 except Exception: 

1024 self.handleError(record) 

1025 

1026class NTEventLogHandler(logging.Handler): 

1027 """ 

1028 A handler class which sends events to the NT Event Log. Adds a 

1029 registry entry for the specified application name. If no dllname is 

1030 provided, win32service.pyd (which contains some basic message 

1031 placeholders) is used. Note that use of these placeholders will make 

1032 your event logs big, as the entire message source is held in the log. 

1033 If you want slimmer logs, you have to pass in the name of your own DLL 

1034 which contains the message definitions you want to use in the event log. 

1035 """ 

1036 def __init__(self, appname, dllname=None, logtype="Application"): 

1037 logging.Handler.__init__(self) 

1038 try: 

1039 import win32evtlogutil, win32evtlog 

1040 self.appname = appname 

1041 self._welu = win32evtlogutil 

1042 if not dllname: 

1043 dllname = os.path.split(self._welu.__file__) 

1044 dllname = os.path.split(dllname[0]) 

1045 dllname = os.path.join(dllname[0], r'win32service.pyd') 

1046 self.dllname = dllname 

1047 self.logtype = logtype 

1048 self._welu.AddSourceToRegistry(appname, dllname, logtype) 

1049 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE 

1050 self.typemap = { 

1051 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, 

1052 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, 

1053 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, 

1054 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, 

1055 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, 

1056 } 

1057 except ImportError: 

1058 print("The Python Win32 extensions for NT (service, event "\ 

1059 "logging) appear not to be available.") 

1060 self._welu = None 

1061 

1062 def getMessageID(self, record): 

1063 """ 

1064 Return the message ID for the event record. If you are using your 

1065 own messages, you could do this by having the msg passed to the 

1066 logger being an ID rather than a formatting string. Then, in here, 

1067 you could use a dictionary lookup to get the message ID. This 

1068 version returns 1, which is the base message ID in win32service.pyd. 

1069 """ 

1070 return 1 

1071 

1072 def getEventCategory(self, record): 

1073 """ 

1074 Return the event category for the record. 

1075 

1076 Override this if you want to specify your own categories. This version 

1077 returns 0. 

1078 """ 

1079 return 0 

1080 

1081 def getEventType(self, record): 

1082 """ 

1083 Return the event type for the record. 

1084 

1085 Override this if you want to specify your own types. This version does 

1086 a mapping using the handler's typemap attribute, which is set up in 

1087 __init__() to a dictionary which contains mappings for DEBUG, INFO, 

1088 WARNING, ERROR and CRITICAL. If you are using your own levels you will 

1089 either need to override this method or place a suitable dictionary in 

1090 the handler's typemap attribute. 

1091 """ 

1092 return self.typemap.get(record.levelno, self.deftype) 

1093 

1094 def emit(self, record): 

1095 """ 

1096 Emit a record. 

1097 

1098 Determine the message ID, event category and event type. Then 

1099 log the message in the NT event log. 

1100 """ 

1101 if self._welu: 

1102 try: 

1103 id = self.getMessageID(record) 

1104 cat = self.getEventCategory(record) 

1105 type = self.getEventType(record) 

1106 msg = self.format(record) 

1107 self._welu.ReportEvent(self.appname, id, cat, type, [msg]) 

1108 except Exception: 

1109 self.handleError(record) 

1110 

1111 def close(self): 

1112 """ 

1113 Clean up this handler. 

1114 

1115 You can remove the application name from the registry as a 

1116 source of event log entries. However, if you do this, you will 

1117 not be able to see the events as you intended in the Event Log 

1118 Viewer - it needs to be able to access the registry to get the 

1119 DLL name. 

1120 """ 

1121 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) 

1122 logging.Handler.close(self) 

1123 

1124class HTTPHandler(logging.Handler): 

1125 """ 

1126 A class which sends records to a Web server, using either GET or 

1127 POST semantics. 

1128 """ 

1129 def __init__(self, host, url, method="GET", secure=False, credentials=None, 

1130 context=None): 

1131 """ 

1132 Initialize the instance with the host, the request URL, and the method 

1133 ("GET" or "POST") 

1134 """ 

1135 logging.Handler.__init__(self) 

1136 method = method.upper() 

1137 if method not in ["GET", "POST"]: 

1138 raise ValueError("method must be GET or POST") 

1139 if not secure and context is not None: 

1140 raise ValueError("context parameter only makes sense " 

1141 "with secure=True") 

1142 self.host = host 

1143 self.url = url 

1144 self.method = method 

1145 self.secure = secure 

1146 self.credentials = credentials 

1147 self.context = context 

1148 

1149 def mapLogRecord(self, record): 

1150 """ 

1151 Default implementation of mapping the log record into a dict 

1152 that is sent as the CGI data. Overwrite in your class. 

1153 Contributed by Franz Glasner. 

1154 """ 

1155 return record.__dict__ 

1156 

1157 def emit(self, record): 

1158 """ 

1159 Emit a record. 

1160 

1161 Send the record to the Web server as a percent-encoded dictionary 

1162 """ 

1163 try: 

1164 import http.client, urllib.parse 

1165 host = self.host 

1166 if self.secure: 

1167 h = http.client.HTTPSConnection(host, context=self.context) 

1168 else: 

1169 h = http.client.HTTPConnection(host) 

1170 url = self.url 

1171 data = urllib.parse.urlencode(self.mapLogRecord(record)) 

1172 if self.method == "GET": 

1173 if (url.find('?') >= 0): 

1174 sep = '&' 

1175 else: 

1176 sep = '?' 

1177 url = url + "%c%s" % (sep, data) 

1178 h.putrequest(self.method, url) 

1179 # support multiple hosts on one IP address... 

1180 # need to strip optional :port from host, if present 

1181 i = host.find(":") 

1182 if i >= 0: 

1183 host = host[:i] 

1184 # See issue #30904: putrequest call above already adds this header 

1185 # on Python 3.x. 

1186 # h.putheader("Host", host) 

1187 if self.method == "POST": 

1188 h.putheader("Content-type", 

1189 "application/x-www-form-urlencoded") 

1190 h.putheader("Content-length", str(len(data))) 

1191 if self.credentials: 

1192 import base64 

1193 s = ('%s:%s' % self.credentials).encode('utf-8') 

1194 s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') 

1195 h.putheader('Authorization', s) 

1196 h.endheaders() 

1197 if self.method == "POST": 

1198 h.send(data.encode('utf-8')) 

1199 h.getresponse() #can't do anything with the result 

1200 except Exception: 

1201 self.handleError(record) 

1202 

1203class BufferingHandler(logging.Handler): 

1204 """ 

1205 A handler class which buffers logging records in memory. Whenever each 

1206 record is added to the buffer, a check is made to see if the buffer should 

1207 be flushed. If it should, then flush() is expected to do what's needed. 

1208 """ 

1209 def __init__(self, capacity): 

1210 """ 

1211 Initialize the handler with the buffer size. 

1212 """ 

1213 logging.Handler.__init__(self) 

1214 self.capacity = capacity 

1215 self.buffer = [] 

1216 

1217 def shouldFlush(self, record): 

1218 """ 

1219 Should the handler flush its buffer? 

1220 

1221 Returns true if the buffer is up to capacity. This method can be 

1222 overridden to implement custom flushing strategies. 

1223 """ 

1224 return (len(self.buffer) >= self.capacity) 

1225 

1226 def emit(self, record): 

1227 """ 

1228 Emit a record. 

1229 

1230 Append the record. If shouldFlush() tells us to, call flush() to process 

1231 the buffer. 

1232 """ 

1233 self.buffer.append(record) 

1234 if self.shouldFlush(record): 

1235 self.flush() 

1236 

1237 def flush(self): 

1238 """ 

1239 Override to implement custom flushing behaviour. 

1240 

1241 This version just zaps the buffer to empty. 

1242 """ 

1243 self.acquire() 

1244 try: 

1245 self.buffer = [] 

1246 finally: 

1247 self.release() 

1248 

1249 def close(self): 

1250 """ 

1251 Close the handler. 

1252 

1253 This version just flushes and chains to the parent class' close(). 

1254 """ 

1255 try: 

1256 self.flush() 

1257 finally: 

1258 logging.Handler.close(self) 

1259 

1260class MemoryHandler(BufferingHandler): 

1261 """ 

1262 A handler class which buffers logging records in memory, periodically 

1263 flushing them to a target handler. Flushing occurs whenever the buffer 

1264 is full, or when an event of a certain severity or greater is seen. 

1265 """ 

1266 def __init__(self, capacity, flushLevel=logging.ERROR, target=None, 

1267 flushOnClose=True): 

1268 """ 

1269 Initialize the handler with the buffer size, the level at which 

1270 flushing should occur and an optional target. 

1271 

1272 Note that without a target being set either here or via setTarget(), 

1273 a MemoryHandler is no use to anyone! 

1274 

1275 The ``flushOnClose`` argument is ``True`` for backward compatibility 

1276 reasons - the old behaviour is that when the handler is closed, the 

1277 buffer is flushed, even if the flush level hasn't been exceeded nor the 

1278 capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. 

1279 """ 

1280 BufferingHandler.__init__(self, capacity) 

1281 self.flushLevel = flushLevel 

1282 self.target = target 

1283 # See Issue #26559 for why this has been added 

1284 self.flushOnClose = flushOnClose 

1285 

1286 def shouldFlush(self, record): 

1287 """ 

1288 Check for buffer full or a record at the flushLevel or higher. 

1289 """ 

1290 return (len(self.buffer) >= self.capacity) or \ 

1291 (record.levelno >= self.flushLevel) 

1292 

1293 def setTarget(self, target): 

1294 """ 

1295 Set the target handler for this handler. 

1296 """ 

1297 self.target = target 

1298 

1299 def flush(self): 

1300 """ 

1301 For a MemoryHandler, flushing means just sending the buffered 

1302 records to the target, if there is one. Override if you want 

1303 different behaviour. 

1304 

1305 The record buffer is also cleared by this operation. 

1306 """ 

1307 self.acquire() 

1308 try: 

1309 if self.target: 

1310 for record in self.buffer: 

1311 self.target.handle(record) 

1312 self.buffer = [] 

1313 finally: 

1314 self.release() 

1315 

1316 def close(self): 

1317 """ 

1318 Flush, if appropriately configured, set the target to None and lose the 

1319 buffer. 

1320 """ 

1321 try: 

1322 if self.flushOnClose: 

1323 self.flush() 

1324 finally: 

1325 self.acquire() 

1326 try: 

1327 self.target = None 

1328 BufferingHandler.close(self) 

1329 finally: 

1330 self.release() 

1331 

1332 

1333class QueueHandler(logging.Handler): 

1334 """ 

1335 This handler sends events to a queue. Typically, it would be used together 

1336 with a multiprocessing Queue to centralise logging to file in one process 

1337 (in a multi-process application), so as to avoid file write contention 

1338 between processes. 

1339 

1340 This code is new in Python 3.2, but this class can be copy pasted into 

1341 user code for use with earlier Python versions. 

1342 """ 

1343 

1344 def __init__(self, queue): 

1345 """ 

1346 Initialise an instance, using the passed queue. 

1347 """ 

1348 logging.Handler.__init__(self) 

1349 self.queue = queue 

1350 

1351 def enqueue(self, record): 

1352 """ 

1353 Enqueue a record. 

1354 

1355 The base implementation uses put_nowait. You may want to override 

1356 this method if you want to use blocking, timeouts or custom queue 

1357 implementations. 

1358 """ 

1359 self.queue.put_nowait(record) 

1360 

1361 def prepare(self, record): 

1362 """ 

1363 Prepares a record for queuing. The object returned by this method is 

1364 enqueued. 

1365 

1366 The base implementation formats the record to merge the message 

1367 and arguments, and removes unpickleable items from the record 

1368 in-place. 

1369 

1370 You might want to override this method if you want to convert 

1371 the record to a dict or JSON string, or send a modified copy 

1372 of the record while leaving the original intact. 

1373 """ 

1374 # The format operation gets traceback text into record.exc_text 

1375 # (if there's exception data), and also returns the formatted 

1376 # message. We can then use this to replace the original 

1377 # msg + args, as these might be unpickleable. We also zap the 

1378 # exc_info and exc_text attributes, as they are no longer 

1379 # needed and, if not None, will typically not be pickleable. 

1380 msg = self.format(record) 

1381 # bpo-35726: make copy of record to avoid affecting other handlers in the chain. 

1382 record = copy.copy(record) 

1383 record.message = msg 

1384 record.msg = msg 

1385 record.args = None 

1386 record.exc_info = None 

1387 record.exc_text = None 

1388 return record 

1389 

1390 def emit(self, record): 

1391 """ 

1392 Emit a record. 

1393 

1394 Writes the LogRecord to the queue, preparing it for pickling first. 

1395 """ 

1396 try: 

1397 self.enqueue(self.prepare(record)) 

1398 except Exception: 

1399 self.handleError(record) 

1400 

1401 

1402class QueueListener(object): 

1403 """ 

1404 This class implements an internal threaded listener which watches for 

1405 LogRecords being added to a queue, removes them and passes them to a 

1406 list of handlers for processing. 

1407 """ 

1408 _sentinel = None 

1409 

1410 def __init__(self, queue, *handlers, respect_handler_level=False): 

1411 """ 

1412 Initialise an instance with the specified queue and 

1413 handlers. 

1414 """ 

1415 self.queue = queue 

1416 self.handlers = handlers 

1417 self._thread = None 

1418 self.respect_handler_level = respect_handler_level 

1419 

1420 def dequeue(self, block): 

1421 """ 

1422 Dequeue a record and return it, optionally blocking. 

1423 

1424 The base implementation uses get. You may want to override this method 

1425 if you want to use timeouts or work with custom queue implementations. 

1426 """ 

1427 return self.queue.get(block) 

1428 

1429 def start(self): 

1430 """ 

1431 Start the listener. 

1432 

1433 This starts up a background thread to monitor the queue for 

1434 LogRecords to process. 

1435 """ 

1436 self._thread = t = threading.Thread(target=self._monitor) 

1437 t.daemon = True 

1438 t.start() 

1439 

1440 def prepare(self, record): 

1441 """ 

1442 Prepare a record for handling. 

1443 

1444 This method just returns the passed-in record. You may want to 

1445 override this method if you need to do any custom marshalling or 

1446 manipulation of the record before passing it to the handlers. 

1447 """ 

1448 return record 

1449 

1450 def handle(self, record): 

1451 """ 

1452 Handle a record. 

1453 

1454 This just loops through the handlers offering them the record 

1455 to handle. 

1456 """ 

1457 record = self.prepare(record) 

1458 for handler in self.handlers: 

1459 if not self.respect_handler_level: 

1460 process = True 

1461 else: 

1462 process = record.levelno >= handler.level 

1463 if process: 

1464 handler.handle(record) 

1465 

1466 def _monitor(self): 

1467 """ 

1468 Monitor the queue for records, and ask the handler 

1469 to deal with them. 

1470 

1471 This method runs on a separate, internal thread. 

1472 The thread will terminate if it sees a sentinel object in the queue. 

1473 """ 

1474 q = self.queue 

1475 has_task_done = hasattr(q, 'task_done') 

1476 while True: 

1477 try: 

1478 record = self.dequeue(True) 

1479 if record is self._sentinel: 

1480 if has_task_done: 

1481 q.task_done() 

1482 break 

1483 self.handle(record) 

1484 if has_task_done: 

1485 q.task_done() 

1486 except queue.Empty: 

1487 break 

1488 

1489 def enqueue_sentinel(self): 

1490 """ 

1491 This is used to enqueue the sentinel record. 

1492 

1493 The base implementation uses put_nowait. You may want to override this 

1494 method if you want to use timeouts or work with custom queue 

1495 implementations. 

1496 """ 

1497 self.queue.put_nowait(self._sentinel) 

1498 

1499 def stop(self): 

1500 """ 

1501 Stop the listener. 

1502 

1503 This asks the thread to terminate, and then waits for it to do so. 

1504 Note that if you don't call this before your application exits, there 

1505 may be some records still left on the queue, which won't be processed. 

1506 """ 

1507 self.enqueue_sentinel() 

1508 self._thread.join() 

1509 self._thread = None