Coverage for /pythoncovmergedfiles/medio/medio/usr/lib/python3.9/logging/handlers.py: 19%

695 statements  

« prev     ^ index     » next       coverage.py v7.3.1, created at 2023-09-25 06:05 +0000

1# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved. 

2# 

3# Permission to use, copy, modify, and distribute this software and its 

4# documentation for any purpose and without fee is hereby granted, 

5# provided that the above copyright notice appear in all copies and that 

6# both that copyright notice and this permission notice appear in 

7# supporting documentation, and that the name of Vinay Sajip 

8# not be used in advertising or publicity pertaining to distribution 

9# of the software without specific, written prior permission. 

10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 

11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 

12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 

13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 

14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 

15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 

16 

17""" 

18Additional handlers for the logging package for Python. The core package is 

19based on PEP 282 and comments thereto in comp.lang.python. 

20 

21Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved. 

22 

23To use, simply 'import logging.handlers' and log away! 

24""" 

25 

26import logging, socket, os, pickle, struct, time, re 

27from stat import ST_DEV, ST_INO, ST_MTIME 

28import queue 

29import threading 

30import copy 

31 

32# 

33# Some constants... 

34# 

35 

36DEFAULT_TCP_LOGGING_PORT = 9020 

37DEFAULT_UDP_LOGGING_PORT = 9021 

38DEFAULT_HTTP_LOGGING_PORT = 9022 

39DEFAULT_SOAP_LOGGING_PORT = 9023 

40SYSLOG_UDP_PORT = 514 

41SYSLOG_TCP_PORT = 514 

42 

43_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day 

44 

45class BaseRotatingHandler(logging.FileHandler): 

46 """ 

47 Base class for handlers that rotate log files at a certain point. 

48 Not meant to be instantiated directly. Instead, use RotatingFileHandler 

49 or TimedRotatingFileHandler. 

50 """ 

51 namer = None 

52 rotator = None 

53 

54 def __init__(self, filename, mode, encoding=None, delay=False, errors=None): 

55 """ 

56 Use the specified filename for streamed logging 

57 """ 

58 logging.FileHandler.__init__(self, filename, mode=mode, 

59 encoding=encoding, delay=delay, 

60 errors=errors) 

61 self.mode = mode 

62 self.encoding = encoding 

63 self.errors = errors 

64 

65 def emit(self, record): 

66 """ 

67 Emit a record. 

68 

69 Output the record to the file, catering for rollover as described 

70 in doRollover(). 

71 """ 

72 try: 

73 if self.shouldRollover(record): 

74 self.doRollover() 

75 logging.FileHandler.emit(self, record) 

76 except Exception: 

77 self.handleError(record) 

78 

79 def rotation_filename(self, default_name): 

80 """ 

81 Modify the filename of a log file when rotating. 

82 

83 This is provided so that a custom filename can be provided. 

84 

85 The default implementation calls the 'namer' attribute of the 

86 handler, if it's callable, passing the default name to 

87 it. If the attribute isn't callable (the default is None), the name 

88 is returned unchanged. 

89 

90 :param default_name: The default name for the log file. 

91 """ 

92 if not callable(self.namer): 

93 result = default_name 

94 else: 

95 result = self.namer(default_name) 

96 return result 

97 

98 def rotate(self, source, dest): 

99 """ 

100 When rotating, rotate the current log. 

101 

102 The default implementation calls the 'rotator' attribute of the 

103 handler, if it's callable, passing the source and dest arguments to 

104 it. If the attribute isn't callable (the default is None), the source 

105 is simply renamed to the destination. 

106 

107 :param source: The source filename. This is normally the base 

108 filename, e.g. 'test.log' 

109 :param dest: The destination filename. This is normally 

110 what the source is rotated to, e.g. 'test.log.1'. 

111 """ 

112 if not callable(self.rotator): 

113 # Issue 18940: A file may not have been created if delay is True. 

114 if os.path.exists(source): 

115 os.rename(source, dest) 

116 else: 

117 self.rotator(source, dest) 

118 

119class RotatingFileHandler(BaseRotatingHandler): 

120 """ 

121 Handler for logging to a set of files, which switches from one file 

122 to the next when the current file reaches a certain size. 

123 """ 

124 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, 

125 encoding=None, delay=False, errors=None): 

126 """ 

127 Open the specified file and use it as the stream for logging. 

128 

129 By default, the file grows indefinitely. You can specify particular 

130 values of maxBytes and backupCount to allow the file to rollover at 

131 a predetermined size. 

132 

133 Rollover occurs whenever the current log file is nearly maxBytes in 

134 length. If backupCount is >= 1, the system will successively create 

135 new files with the same pathname as the base file, but with extensions 

136 ".1", ".2" etc. appended to it. For example, with a backupCount of 5 

137 and a base file name of "app.log", you would get "app.log", 

138 "app.log.1", "app.log.2", ... through to "app.log.5". The file being 

139 written to is always "app.log" - when it gets filled up, it is closed 

140 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. 

141 exist, then they are renamed to "app.log.2", "app.log.3" etc. 

142 respectively. 

143 

144 If maxBytes is zero, rollover never occurs. 

145 """ 

146 # If rotation/rollover is wanted, it doesn't make sense to use another 

147 # mode. If for example 'w' were specified, then if there were multiple 

148 # runs of the calling application, the logs from previous runs would be 

149 # lost if the 'w' is respected, because the log file would be truncated 

150 # on each run. 

151 if maxBytes > 0: 

152 mode = 'a' 

153 BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding, 

154 delay=delay, errors=errors) 

155 self.maxBytes = maxBytes 

156 self.backupCount = backupCount 

157 

158 def doRollover(self): 

159 """ 

160 Do a rollover, as described in __init__(). 

161 """ 

162 if self.stream: 

163 self.stream.close() 

164 self.stream = None 

165 if self.backupCount > 0: 

166 for i in range(self.backupCount - 1, 0, -1): 

167 sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) 

168 dfn = self.rotation_filename("%s.%d" % (self.baseFilename, 

169 i + 1)) 

170 if os.path.exists(sfn): 

171 if os.path.exists(dfn): 

172 os.remove(dfn) 

173 os.rename(sfn, dfn) 

174 dfn = self.rotation_filename(self.baseFilename + ".1") 

175 if os.path.exists(dfn): 

176 os.remove(dfn) 

177 self.rotate(self.baseFilename, dfn) 

178 if not self.delay: 

179 self.stream = self._open() 

180 

181 def shouldRollover(self, record): 

182 """ 

183 Determine if rollover should occur. 

184 

185 Basically, see if the supplied record would cause the file to exceed 

186 the size limit we have. 

187 """ 

188 if self.stream is None: # delay was set... 

189 self.stream = self._open() 

190 if self.maxBytes > 0: # are we rolling over? 

191 msg = "%s\n" % self.format(record) 

192 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature 

193 if self.stream.tell() + len(msg) >= self.maxBytes: 

194 return 1 

195 return 0 

196 

197class TimedRotatingFileHandler(BaseRotatingHandler): 

198 """ 

199 Handler for logging to a file, rotating the log file at certain timed 

200 intervals. 

201 

202 If backupCount is > 0, when rollover is done, no more than backupCount 

203 files are kept - the oldest ones are deleted. 

204 """ 

205 def __init__(self, filename, when='h', interval=1, backupCount=0, 

206 encoding=None, delay=False, utc=False, atTime=None, 

207 errors=None): 

208 BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding, 

209 delay=delay, errors=errors) 

210 self.when = when.upper() 

211 self.backupCount = backupCount 

212 self.utc = utc 

213 self.atTime = atTime 

214 # Calculate the real rollover interval, which is just the number of 

215 # seconds between rollovers. Also set the filename suffix used when 

216 # a rollover occurs. Current 'when' events supported: 

217 # S - Seconds 

218 # M - Minutes 

219 # H - Hours 

220 # D - Days 

221 # midnight - roll over at midnight 

222 # W{0-6} - roll over on a certain day; 0 - Monday 

223 # 

224 # Case of the 'when' specifier is not important; lower or upper case 

225 # will work. 

226 if self.when == 'S': 

227 self.interval = 1 # one second 

228 self.suffix = "%Y-%m-%d_%H-%M-%S" 

229 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" 

230 elif self.when == 'M': 

231 self.interval = 60 # one minute 

232 self.suffix = "%Y-%m-%d_%H-%M" 

233 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" 

234 elif self.when == 'H': 

235 self.interval = 60 * 60 # one hour 

236 self.suffix = "%Y-%m-%d_%H" 

237 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" 

238 elif self.when == 'D' or self.when == 'MIDNIGHT': 

239 self.interval = 60 * 60 * 24 # one day 

240 self.suffix = "%Y-%m-%d" 

241 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" 

242 elif self.when.startswith('W'): 

243 self.interval = 60 * 60 * 24 * 7 # one week 

244 if len(self.when) != 2: 

245 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) 

246 if self.when[1] < '0' or self.when[1] > '6': 

247 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) 

248 self.dayOfWeek = int(self.when[1]) 

249 self.suffix = "%Y-%m-%d" 

250 self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" 

251 else: 

252 raise ValueError("Invalid rollover interval specified: %s" % self.when) 

253 

254 self.extMatch = re.compile(self.extMatch, re.ASCII) 

255 self.interval = self.interval * interval # multiply by units requested 

256 # The following line added because the filename passed in could be a 

257 # path object (see Issue #27493), but self.baseFilename will be a string 

258 filename = self.baseFilename 

259 if os.path.exists(filename): 

260 t = os.stat(filename)[ST_MTIME] 

261 else: 

262 t = int(time.time()) 

263 self.rolloverAt = self.computeRollover(t) 

264 

265 def computeRollover(self, currentTime): 

266 """ 

267 Work out the rollover time based on the specified time. 

268 """ 

269 result = currentTime + self.interval 

270 # If we are rolling over at midnight or weekly, then the interval is already known. 

271 # What we need to figure out is WHEN the next interval is. In other words, 

272 # if you are rolling over at midnight, then your base interval is 1 day, 

273 # but you want to start that one day clock at midnight, not now. So, we 

274 # have to fudge the rolloverAt value in order to trigger the first rollover 

275 # at the right time. After that, the regular interval will take care of 

276 # the rest. Note that this code doesn't care about leap seconds. :) 

277 if self.when == 'MIDNIGHT' or self.when.startswith('W'): 

278 # This could be done with less code, but I wanted it to be clear 

279 if self.utc: 

280 t = time.gmtime(currentTime) 

281 else: 

282 t = time.localtime(currentTime) 

283 currentHour = t[3] 

284 currentMinute = t[4] 

285 currentSecond = t[5] 

286 currentDay = t[6] 

287 # r is the number of seconds left between now and the next rotation 

288 if self.atTime is None: 

289 rotate_ts = _MIDNIGHT 

290 else: 

291 rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 + 

292 self.atTime.second) 

293 

294 r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 + 

295 currentSecond) 

296 if r < 0: 

297 # Rotate time is before the current time (for example when 

298 # self.rotateAt is 13:45 and it now 14:15), rotation is 

299 # tomorrow. 

300 r += _MIDNIGHT 

301 currentDay = (currentDay + 1) % 7 

302 result = currentTime + r 

303 # If we are rolling over on a certain day, add in the number of days until 

304 # the next rollover, but offset by 1 since we just calculated the time 

305 # until the next day starts. There are three cases: 

306 # Case 1) The day to rollover is today; in this case, do nothing 

307 # Case 2) The day to rollover is further in the interval (i.e., today is 

308 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to 

309 # next rollover is simply 6 - 2 - 1, or 3. 

310 # Case 3) The day to rollover is behind us in the interval (i.e., today 

311 # is day 5 (Saturday) and rollover is on day 3 (Thursday). 

312 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the 

313 # number of days left in the current week (1) plus the number 

314 # of days in the next week until the rollover day (3). 

315 # The calculations described in 2) and 3) above need to have a day added. 

316 # This is because the above time calculation takes us to midnight on this 

317 # day, i.e. the start of the next day. 

318 if self.when.startswith('W'): 

319 day = currentDay # 0 is Monday 

320 if day != self.dayOfWeek: 

321 if day < self.dayOfWeek: 

322 daysToWait = self.dayOfWeek - day 

323 else: 

324 daysToWait = 6 - day + self.dayOfWeek + 1 

325 newRolloverAt = result + (daysToWait * (60 * 60 * 24)) 

326 if not self.utc: 

327 dstNow = t[-1] 

328 dstAtRollover = time.localtime(newRolloverAt)[-1] 

329 if dstNow != dstAtRollover: 

330 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 

331 addend = -3600 

332 else: # DST bows out before next rollover, so we need to add an hour 

333 addend = 3600 

334 newRolloverAt += addend 

335 result = newRolloverAt 

336 return result 

337 

338 def shouldRollover(self, record): 

339 """ 

340 Determine if rollover should occur. 

341 

342 record is not used, as we are just comparing times, but it is needed so 

343 the method signatures are the same 

344 """ 

345 t = int(time.time()) 

346 if t >= self.rolloverAt: 

347 return 1 

348 return 0 

349 

350 def getFilesToDelete(self): 

351 """ 

352 Determine the files to delete when rolling over. 

353 

354 More specific than the earlier method, which just used glob.glob(). 

355 """ 

356 dirName, baseName = os.path.split(self.baseFilename) 

357 fileNames = os.listdir(dirName) 

358 result = [] 

359 prefix = baseName + "." 

360 plen = len(prefix) 

361 for fileName in fileNames: 

362 if fileName[:plen] == prefix: 

363 suffix = fileName[plen:] 

364 if self.extMatch.match(suffix): 

365 result.append(os.path.join(dirName, fileName)) 

366 if len(result) < self.backupCount: 

367 result = [] 

368 else: 

369 result.sort() 

370 result = result[:len(result) - self.backupCount] 

371 return result 

372 

373 def doRollover(self): 

374 """ 

375 do a rollover; in this case, a date/time stamp is appended to the filename 

376 when the rollover happens. However, you want the file to be named for the 

377 start of the interval, not the current time. If there is a backup count, 

378 then we have to get a list of matching filenames, sort them and remove 

379 the one with the oldest suffix. 

380 """ 

381 if self.stream: 

382 self.stream.close() 

383 self.stream = None 

384 # get the time that this sequence started at and make it a TimeTuple 

385 currentTime = int(time.time()) 

386 dstNow = time.localtime(currentTime)[-1] 

387 t = self.rolloverAt - self.interval 

388 if self.utc: 

389 timeTuple = time.gmtime(t) 

390 else: 

391 timeTuple = time.localtime(t) 

392 dstThen = timeTuple[-1] 

393 if dstNow != dstThen: 

394 if dstNow: 

395 addend = 3600 

396 else: 

397 addend = -3600 

398 timeTuple = time.localtime(t + addend) 

399 dfn = self.rotation_filename(self.baseFilename + "." + 

400 time.strftime(self.suffix, timeTuple)) 

401 if os.path.exists(dfn): 

402 os.remove(dfn) 

403 self.rotate(self.baseFilename, dfn) 

404 if self.backupCount > 0: 

405 for s in self.getFilesToDelete(): 

406 os.remove(s) 

407 if not self.delay: 

408 self.stream = self._open() 

409 newRolloverAt = self.computeRollover(currentTime) 

410 while newRolloverAt <= currentTime: 

411 newRolloverAt = newRolloverAt + self.interval 

412 #If DST changes and midnight or weekly rollover, adjust for this. 

413 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: 

414 dstAtRollover = time.localtime(newRolloverAt)[-1] 

415 if dstNow != dstAtRollover: 

416 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 

417 addend = -3600 

418 else: # DST bows out before next rollover, so we need to add an hour 

419 addend = 3600 

420 newRolloverAt += addend 

421 self.rolloverAt = newRolloverAt 

422 

423class WatchedFileHandler(logging.FileHandler): 

424 """ 

425 A handler for logging to a file, which watches the file 

426 to see if it has changed while in use. This can happen because of 

427 usage of programs such as newsyslog and logrotate which perform 

428 log file rotation. This handler, intended for use under Unix, 

429 watches the file to see if it has changed since the last emit. 

430 (A file has changed if its device or inode have changed.) 

431 If it has changed, the old file stream is closed, and the file 

432 opened to get a new stream. 

433 

434 This handler is not appropriate for use under Windows, because 

435 under Windows open files cannot be moved or renamed - logging 

436 opens the files with exclusive locks - and so there is no need 

437 for such a handler. Furthermore, ST_INO is not supported under 

438 Windows; stat always returns zero for this value. 

439 

440 This handler is based on a suggestion and patch by Chad J. 

441 Schroeder. 

442 """ 

443 def __init__(self, filename, mode='a', encoding=None, delay=False, 

444 errors=None): 

445 logging.FileHandler.__init__(self, filename, mode=mode, 

446 encoding=encoding, delay=delay, 

447 errors=errors) 

448 self.dev, self.ino = -1, -1 

449 self._statstream() 

450 

451 def _statstream(self): 

452 if self.stream: 

453 sres = os.fstat(self.stream.fileno()) 

454 self.dev, self.ino = sres[ST_DEV], sres[ST_INO] 

455 

456 def reopenIfNeeded(self): 

457 """ 

458 Reopen log file if needed. 

459 

460 Checks if the underlying file has changed, and if it 

461 has, close the old stream and reopen the file to get the 

462 current stream. 

463 """ 

464 # Reduce the chance of race conditions by stat'ing by path only 

465 # once and then fstat'ing our new fd if we opened a new log stream. 

466 # See issue #14632: Thanks to John Mulligan for the problem report 

467 # and patch. 

468 try: 

469 # stat the file by path, checking for existence 

470 sres = os.stat(self.baseFilename) 

471 except FileNotFoundError: 

472 sres = None 

473 # compare file system stat with that of our stream file handle 

474 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: 

475 if self.stream is not None: 

476 # we have an open file handle, clean it up 

477 self.stream.flush() 

478 self.stream.close() 

479 self.stream = None # See Issue #21742: _open () might fail. 

480 # open a new file handle and get new stat info from that fd 

481 self.stream = self._open() 

482 self._statstream() 

483 

484 def emit(self, record): 

485 """ 

486 Emit a record. 

487 

488 If underlying file has changed, reopen the file before emitting the 

489 record to it. 

490 """ 

491 self.reopenIfNeeded() 

492 logging.FileHandler.emit(self, record) 

493 

494 

495class SocketHandler(logging.Handler): 

496 """ 

497 A handler class which writes logging records, in pickle format, to 

498 a streaming socket. The socket is kept open across logging calls. 

499 If the peer resets it, an attempt is made to reconnect on the next call. 

500 The pickle which is sent is that of the LogRecord's attribute dictionary 

501 (__dict__), so that the receiver does not need to have the logging module 

502 installed in order to process the logging event. 

503 

504 To unpickle the record at the receiving end into a LogRecord, use the 

505 makeLogRecord function. 

506 """ 

507 

508 def __init__(self, host, port): 

509 """ 

510 Initializes the handler with a specific host address and port. 

511 

512 When the attribute *closeOnError* is set to True - if a socket error 

513 occurs, the socket is silently closed and then reopened on the next 

514 logging call. 

515 """ 

516 logging.Handler.__init__(self) 

517 self.host = host 

518 self.port = port 

519 if port is None: 

520 self.address = host 

521 else: 

522 self.address = (host, port) 

523 self.sock = None 

524 self.closeOnError = False 

525 self.retryTime = None 

526 # 

527 # Exponential backoff parameters. 

528 # 

529 self.retryStart = 1.0 

530 self.retryMax = 30.0 

531 self.retryFactor = 2.0 

532 

533 def makeSocket(self, timeout=1): 

534 """ 

535 A factory method which allows subclasses to define the precise 

536 type of socket they want. 

537 """ 

538 if self.port is not None: 

539 result = socket.create_connection(self.address, timeout=timeout) 

540 else: 

541 result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 

542 result.settimeout(timeout) 

543 try: 

544 result.connect(self.address) 

545 except OSError: 

546 result.close() # Issue 19182 

547 raise 

548 return result 

549 

550 def createSocket(self): 

551 """ 

552 Try to create a socket, using an exponential backoff with 

553 a max retry time. Thanks to Robert Olson for the original patch 

554 (SF #815911) which has been slightly refactored. 

555 """ 

556 now = time.time() 

557 # Either retryTime is None, in which case this 

558 # is the first time back after a disconnect, or 

559 # we've waited long enough. 

560 if self.retryTime is None: 

561 attempt = True 

562 else: 

563 attempt = (now >= self.retryTime) 

564 if attempt: 

565 try: 

566 self.sock = self.makeSocket() 

567 self.retryTime = None # next time, no delay before trying 

568 except OSError: 

569 #Creation failed, so set the retry time and return. 

570 if self.retryTime is None: 

571 self.retryPeriod = self.retryStart 

572 else: 

573 self.retryPeriod = self.retryPeriod * self.retryFactor 

574 if self.retryPeriod > self.retryMax: 

575 self.retryPeriod = self.retryMax 

576 self.retryTime = now + self.retryPeriod 

577 

578 def send(self, s): 

579 """ 

580 Send a pickled string to the socket. 

581 

582 This function allows for partial sends which can happen when the 

583 network is busy. 

584 """ 

585 if self.sock is None: 

586 self.createSocket() 

587 #self.sock can be None either because we haven't reached the retry 

588 #time yet, or because we have reached the retry time and retried, 

589 #but are still unable to connect. 

590 if self.sock: 

591 try: 

592 self.sock.sendall(s) 

593 except OSError: #pragma: no cover 

594 self.sock.close() 

595 self.sock = None # so we can call createSocket next time 

596 

597 def makePickle(self, record): 

598 """ 

599 Pickles the record in binary format with a length prefix, and 

600 returns it ready for transmission across the socket. 

601 """ 

602 ei = record.exc_info 

603 if ei: 

604 # just to get traceback text into record.exc_text ... 

605 dummy = self.format(record) 

606 # See issue #14436: If msg or args are objects, they may not be 

607 # available on the receiving end. So we convert the msg % args 

608 # to a string, save it as msg and zap the args. 

609 d = dict(record.__dict__) 

610 d['msg'] = record.getMessage() 

611 d['args'] = None 

612 d['exc_info'] = None 

613 # Issue #25685: delete 'message' if present: redundant with 'msg' 

614 d.pop('message', None) 

615 s = pickle.dumps(d, 1) 

616 slen = struct.pack(">L", len(s)) 

617 return slen + s 

618 

619 def handleError(self, record): 

620 """ 

621 Handle an error during logging. 

622 

623 An error has occurred during logging. Most likely cause - 

624 connection lost. Close the socket so that we can retry on the 

625 next event. 

626 """ 

627 if self.closeOnError and self.sock: 

628 self.sock.close() 

629 self.sock = None #try to reconnect next time 

630 else: 

631 logging.Handler.handleError(self, record) 

632 

633 def emit(self, record): 

634 """ 

635 Emit a record. 

636 

637 Pickles the record and writes it to the socket in binary format. 

638 If there is an error with the socket, silently drop the packet. 

639 If there was a problem with the socket, re-establishes the 

640 socket. 

641 """ 

642 try: 

643 s = self.makePickle(record) 

644 self.send(s) 

645 except Exception: 

646 self.handleError(record) 

647 

648 def close(self): 

649 """ 

650 Closes the socket. 

651 """ 

652 self.acquire() 

653 try: 

654 sock = self.sock 

655 if sock: 

656 self.sock = None 

657 sock.close() 

658 logging.Handler.close(self) 

659 finally: 

660 self.release() 

661 

662class DatagramHandler(SocketHandler): 

663 """ 

664 A handler class which writes logging records, in pickle format, to 

665 a datagram socket. The pickle which is sent is that of the LogRecord's 

666 attribute dictionary (__dict__), so that the receiver does not need to 

667 have the logging module installed in order to process the logging event. 

668 

669 To unpickle the record at the receiving end into a LogRecord, use the 

670 makeLogRecord function. 

671 

672 """ 

673 def __init__(self, host, port): 

674 """ 

675 Initializes the handler with a specific host address and port. 

676 """ 

677 SocketHandler.__init__(self, host, port) 

678 self.closeOnError = False 

679 

680 def makeSocket(self): 

681 """ 

682 The factory method of SocketHandler is here overridden to create 

683 a UDP socket (SOCK_DGRAM). 

684 """ 

685 if self.port is None: 

686 family = socket.AF_UNIX 

687 else: 

688 family = socket.AF_INET 

689 s = socket.socket(family, socket.SOCK_DGRAM) 

690 return s 

691 

692 def send(self, s): 

693 """ 

694 Send a pickled string to a socket. 

695 

696 This function no longer allows for partial sends which can happen 

697 when the network is busy - UDP does not guarantee delivery and 

698 can deliver packets out of sequence. 

699 """ 

700 if self.sock is None: 

701 self.createSocket() 

702 self.sock.sendto(s, self.address) 

703 

704class SysLogHandler(logging.Handler): 

705 """ 

706 A handler class which sends formatted logging records to a syslog 

707 server. Based on Sam Rushing's syslog module: 

708 http://www.nightmare.com/squirl/python-ext/misc/syslog.py 

709 Contributed by Nicolas Untz (after which minor refactoring changes 

710 have been made). 

711 """ 

712 

713 # from <linux/sys/syslog.h>: 

714 # ====================================================================== 

715 # priorities/facilities are encoded into a single 32-bit quantity, where 

716 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the 

717 # facility (0-big number). Both the priorities and the facilities map 

718 # roughly one-to-one to strings in the syslogd(8) source code. This 

719 # mapping is included in this file. 

720 # 

721 # priorities (these are ordered) 

722 

723 LOG_EMERG = 0 # system is unusable 

724 LOG_ALERT = 1 # action must be taken immediately 

725 LOG_CRIT = 2 # critical conditions 

726 LOG_ERR = 3 # error conditions 

727 LOG_WARNING = 4 # warning conditions 

728 LOG_NOTICE = 5 # normal but significant condition 

729 LOG_INFO = 6 # informational 

730 LOG_DEBUG = 7 # debug-level messages 

731 

732 # facility codes 

733 LOG_KERN = 0 # kernel messages 

734 LOG_USER = 1 # random user-level messages 

735 LOG_MAIL = 2 # mail system 

736 LOG_DAEMON = 3 # system daemons 

737 LOG_AUTH = 4 # security/authorization messages 

738 LOG_SYSLOG = 5 # messages generated internally by syslogd 

739 LOG_LPR = 6 # line printer subsystem 

740 LOG_NEWS = 7 # network news subsystem 

741 LOG_UUCP = 8 # UUCP subsystem 

742 LOG_CRON = 9 # clock daemon 

743 LOG_AUTHPRIV = 10 # security/authorization messages (private) 

744 LOG_FTP = 11 # FTP daemon 

745 LOG_NTP = 12 # NTP subsystem 

746 LOG_SECURITY = 13 # Log audit 

747 LOG_CONSOLE = 14 # Log alert 

748 LOG_SOLCRON = 15 # Scheduling daemon (Solaris) 

749 

750 # other codes through 15 reserved for system use 

751 LOG_LOCAL0 = 16 # reserved for local use 

752 LOG_LOCAL1 = 17 # reserved for local use 

753 LOG_LOCAL2 = 18 # reserved for local use 

754 LOG_LOCAL3 = 19 # reserved for local use 

755 LOG_LOCAL4 = 20 # reserved for local use 

756 LOG_LOCAL5 = 21 # reserved for local use 

757 LOG_LOCAL6 = 22 # reserved for local use 

758 LOG_LOCAL7 = 23 # reserved for local use 

759 

760 priority_names = { 

761 "alert": LOG_ALERT, 

762 "crit": LOG_CRIT, 

763 "critical": LOG_CRIT, 

764 "debug": LOG_DEBUG, 

765 "emerg": LOG_EMERG, 

766 "err": LOG_ERR, 

767 "error": LOG_ERR, # DEPRECATED 

768 "info": LOG_INFO, 

769 "notice": LOG_NOTICE, 

770 "panic": LOG_EMERG, # DEPRECATED 

771 "warn": LOG_WARNING, # DEPRECATED 

772 "warning": LOG_WARNING, 

773 } 

774 

775 facility_names = { 

776 "auth": LOG_AUTH, 

777 "authpriv": LOG_AUTHPRIV, 

778 "console": LOG_CONSOLE, 

779 "cron": LOG_CRON, 

780 "daemon": LOG_DAEMON, 

781 "ftp": LOG_FTP, 

782 "kern": LOG_KERN, 

783 "lpr": LOG_LPR, 

784 "mail": LOG_MAIL, 

785 "news": LOG_NEWS, 

786 "ntp": LOG_NTP, 

787 "security": LOG_SECURITY, 

788 "solaris-cron": LOG_SOLCRON, 

789 "syslog": LOG_SYSLOG, 

790 "user": LOG_USER, 

791 "uucp": LOG_UUCP, 

792 "local0": LOG_LOCAL0, 

793 "local1": LOG_LOCAL1, 

794 "local2": LOG_LOCAL2, 

795 "local3": LOG_LOCAL3, 

796 "local4": LOG_LOCAL4, 

797 "local5": LOG_LOCAL5, 

798 "local6": LOG_LOCAL6, 

799 "local7": LOG_LOCAL7, 

800 } 

801 

802 #The map below appears to be trivially lowercasing the key. However, 

803 #there's more to it than meets the eye - in some locales, lowercasing 

804 #gives unexpected results. See SF #1524081: in the Turkish locale, 

805 #"INFO".lower() != "info" 

806 priority_map = { 

807 "DEBUG" : "debug", 

808 "INFO" : "info", 

809 "WARNING" : "warning", 

810 "ERROR" : "error", 

811 "CRITICAL" : "critical" 

812 } 

813 

814 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), 

815 facility=LOG_USER, socktype=None): 

816 """ 

817 Initialize a handler. 

818 

819 If address is specified as a string, a UNIX socket is used. To log to a 

820 local syslogd, "SysLogHandler(address="/dev/log")" can be used. 

821 If facility is not specified, LOG_USER is used. If socktype is 

822 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific 

823 socket type will be used. For Unix sockets, you can also specify a 

824 socktype of None, in which case socket.SOCK_DGRAM will be used, falling 

825 back to socket.SOCK_STREAM. 

826 """ 

827 logging.Handler.__init__(self) 

828 

829 self.address = address 

830 self.facility = facility 

831 self.socktype = socktype 

832 

833 if isinstance(address, str): 

834 self.unixsocket = True 

835 # Syslog server may be unavailable during handler initialisation. 

836 # C's openlog() function also ignores connection errors. 

837 # Moreover, we ignore these errors while logging, so it not worse 

838 # to ignore it also here. 

839 try: 

840 self._connect_unixsocket(address) 

841 except OSError: 

842 pass 

843 else: 

844 self.unixsocket = False 

845 if socktype is None: 

846 socktype = socket.SOCK_DGRAM 

847 host, port = address 

848 ress = socket.getaddrinfo(host, port, 0, socktype) 

849 if not ress: 

850 raise OSError("getaddrinfo returns an empty list") 

851 for res in ress: 

852 af, socktype, proto, _, sa = res 

853 err = sock = None 

854 try: 

855 sock = socket.socket(af, socktype, proto) 

856 if socktype == socket.SOCK_STREAM: 

857 sock.connect(sa) 

858 break 

859 except OSError as exc: 

860 err = exc 

861 if sock is not None: 

862 sock.close() 

863 if err is not None: 

864 raise err 

865 self.socket = sock 

866 self.socktype = socktype 

867 

868 def _connect_unixsocket(self, address): 

869 use_socktype = self.socktype 

870 if use_socktype is None: 

871 use_socktype = socket.SOCK_DGRAM 

872 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 

873 try: 

874 self.socket.connect(address) 

875 # it worked, so set self.socktype to the used type 

876 self.socktype = use_socktype 

877 except OSError: 

878 self.socket.close() 

879 if self.socktype is not None: 

880 # user didn't specify falling back, so fail 

881 raise 

882 use_socktype = socket.SOCK_STREAM 

883 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 

884 try: 

885 self.socket.connect(address) 

886 # it worked, so set self.socktype to the used type 

887 self.socktype = use_socktype 

888 except OSError: 

889 self.socket.close() 

890 raise 

891 

892 def encodePriority(self, facility, priority): 

893 """ 

894 Encode the facility and priority. You can pass in strings or 

895 integers - if strings are passed, the facility_names and 

896 priority_names mapping dictionaries are used to convert them to 

897 integers. 

898 """ 

899 if isinstance(facility, str): 

900 facility = self.facility_names[facility] 

901 if isinstance(priority, str): 

902 priority = self.priority_names[priority] 

903 return (facility << 3) | priority 

904 

905 def close(self): 

906 """ 

907 Closes the socket. 

908 """ 

909 self.acquire() 

910 try: 

911 self.socket.close() 

912 logging.Handler.close(self) 

913 finally: 

914 self.release() 

915 

916 def mapPriority(self, levelName): 

917 """ 

918 Map a logging level name to a key in the priority_names map. 

919 This is useful in two scenarios: when custom levels are being 

920 used, and in the case where you can't do a straightforward 

921 mapping by lowercasing the logging level name because of locale- 

922 specific issues (see SF #1524081). 

923 """ 

924 return self.priority_map.get(levelName, "warning") 

925 

926 ident = '' # prepended to all messages 

927 append_nul = True # some old syslog daemons expect a NUL terminator 

928 

929 def emit(self, record): 

930 """ 

931 Emit a record. 

932 

933 The record is formatted, and then sent to the syslog server. If 

934 exception information is present, it is NOT sent to the server. 

935 """ 

936 try: 

937 msg = self.format(record) 

938 if self.ident: 

939 msg = self.ident + msg 

940 if self.append_nul: 

941 msg += '\000' 

942 

943 # We need to convert record level to lowercase, maybe this will 

944 # change in the future. 

945 prio = '<%d>' % self.encodePriority(self.facility, 

946 self.mapPriority(record.levelname)) 

947 prio = prio.encode('utf-8') 

948 # Message is a string. Convert to bytes as required by RFC 5424 

949 msg = msg.encode('utf-8') 

950 msg = prio + msg 

951 if self.unixsocket: 

952 try: 

953 self.socket.send(msg) 

954 except OSError: 

955 self.socket.close() 

956 self._connect_unixsocket(self.address) 

957 self.socket.send(msg) 

958 elif self.socktype == socket.SOCK_DGRAM: 

959 self.socket.sendto(msg, self.address) 

960 else: 

961 self.socket.sendall(msg) 

962 except Exception: 

963 self.handleError(record) 

964 

965class SMTPHandler(logging.Handler): 

966 """ 

967 A handler class which sends an SMTP email for each logging event. 

968 """ 

969 def __init__(self, mailhost, fromaddr, toaddrs, subject, 

970 credentials=None, secure=None, timeout=5.0): 

971 """ 

972 Initialize the handler. 

973 

974 Initialize the instance with the from and to addresses and subject 

975 line of the email. To specify a non-standard SMTP port, use the 

976 (host, port) tuple format for the mailhost argument. To specify 

977 authentication credentials, supply a (username, password) tuple 

978 for the credentials argument. To specify the use of a secure 

979 protocol (TLS), pass in a tuple for the secure argument. This will 

980 only be used when authentication credentials are supplied. The tuple 

981 will be either an empty tuple, or a single-value tuple with the name 

982 of a keyfile, or a 2-value tuple with the names of the keyfile and 

983 certificate file. (This tuple is passed to the `starttls` method). 

984 A timeout in seconds can be specified for the SMTP connection (the 

985 default is one second). 

986 """ 

987 logging.Handler.__init__(self) 

988 if isinstance(mailhost, (list, tuple)): 

989 self.mailhost, self.mailport = mailhost 

990 else: 

991 self.mailhost, self.mailport = mailhost, None 

992 if isinstance(credentials, (list, tuple)): 

993 self.username, self.password = credentials 

994 else: 

995 self.username = None 

996 self.fromaddr = fromaddr 

997 if isinstance(toaddrs, str): 

998 toaddrs = [toaddrs] 

999 self.toaddrs = toaddrs 

1000 self.subject = subject 

1001 self.secure = secure 

1002 self.timeout = timeout 

1003 

1004 def getSubject(self, record): 

1005 """ 

1006 Determine the subject for the email. 

1007 

1008 If you want to specify a subject line which is record-dependent, 

1009 override this method. 

1010 """ 

1011 return self.subject 

1012 

1013 def emit(self, record): 

1014 """ 

1015 Emit a record. 

1016 

1017 Format the record and send it to the specified addressees. 

1018 """ 

1019 try: 

1020 import smtplib 

1021 from email.message import EmailMessage 

1022 import email.utils 

1023 

1024 port = self.mailport 

1025 if not port: 

1026 port = smtplib.SMTP_PORT 

1027 smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) 

1028 msg = EmailMessage() 

1029 msg['From'] = self.fromaddr 

1030 msg['To'] = ','.join(self.toaddrs) 

1031 msg['Subject'] = self.getSubject(record) 

1032 msg['Date'] = email.utils.localtime() 

1033 msg.set_content(self.format(record)) 

1034 if self.username: 

1035 if self.secure is not None: 

1036 smtp.ehlo() 

1037 smtp.starttls(*self.secure) 

1038 smtp.ehlo() 

1039 smtp.login(self.username, self.password) 

1040 smtp.send_message(msg) 

1041 smtp.quit() 

1042 except Exception: 

1043 self.handleError(record) 

1044 

1045class NTEventLogHandler(logging.Handler): 

1046 """ 

1047 A handler class which sends events to the NT Event Log. Adds a 

1048 registry entry for the specified application name. If no dllname is 

1049 provided, win32service.pyd (which contains some basic message 

1050 placeholders) is used. Note that use of these placeholders will make 

1051 your event logs big, as the entire message source is held in the log. 

1052 If you want slimmer logs, you have to pass in the name of your own DLL 

1053 which contains the message definitions you want to use in the event log. 

1054 """ 

1055 def __init__(self, appname, dllname=None, logtype="Application"): 

1056 logging.Handler.__init__(self) 

1057 try: 

1058 import win32evtlogutil, win32evtlog 

1059 self.appname = appname 

1060 self._welu = win32evtlogutil 

1061 if not dllname: 

1062 dllname = os.path.split(self._welu.__file__) 

1063 dllname = os.path.split(dllname[0]) 

1064 dllname = os.path.join(dllname[0], r'win32service.pyd') 

1065 self.dllname = dllname 

1066 self.logtype = logtype 

1067 self._welu.AddSourceToRegistry(appname, dllname, logtype) 

1068 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE 

1069 self.typemap = { 

1070 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, 

1071 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, 

1072 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, 

1073 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, 

1074 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, 

1075 } 

1076 except ImportError: 

1077 print("The Python Win32 extensions for NT (service, event "\ 

1078 "logging) appear not to be available.") 

1079 self._welu = None 

1080 

1081 def getMessageID(self, record): 

1082 """ 

1083 Return the message ID for the event record. If you are using your 

1084 own messages, you could do this by having the msg passed to the 

1085 logger being an ID rather than a formatting string. Then, in here, 

1086 you could use a dictionary lookup to get the message ID. This 

1087 version returns 1, which is the base message ID in win32service.pyd. 

1088 """ 

1089 return 1 

1090 

1091 def getEventCategory(self, record): 

1092 """ 

1093 Return the event category for the record. 

1094 

1095 Override this if you want to specify your own categories. This version 

1096 returns 0. 

1097 """ 

1098 return 0 

1099 

1100 def getEventType(self, record): 

1101 """ 

1102 Return the event type for the record. 

1103 

1104 Override this if you want to specify your own types. This version does 

1105 a mapping using the handler's typemap attribute, which is set up in 

1106 __init__() to a dictionary which contains mappings for DEBUG, INFO, 

1107 WARNING, ERROR and CRITICAL. If you are using your own levels you will 

1108 either need to override this method or place a suitable dictionary in 

1109 the handler's typemap attribute. 

1110 """ 

1111 return self.typemap.get(record.levelno, self.deftype) 

1112 

1113 def emit(self, record): 

1114 """ 

1115 Emit a record. 

1116 

1117 Determine the message ID, event category and event type. Then 

1118 log the message in the NT event log. 

1119 """ 

1120 if self._welu: 

1121 try: 

1122 id = self.getMessageID(record) 

1123 cat = self.getEventCategory(record) 

1124 type = self.getEventType(record) 

1125 msg = self.format(record) 

1126 self._welu.ReportEvent(self.appname, id, cat, type, [msg]) 

1127 except Exception: 

1128 self.handleError(record) 

1129 

1130 def close(self): 

1131 """ 

1132 Clean up this handler. 

1133 

1134 You can remove the application name from the registry as a 

1135 source of event log entries. However, if you do this, you will 

1136 not be able to see the events as you intended in the Event Log 

1137 Viewer - it needs to be able to access the registry to get the 

1138 DLL name. 

1139 """ 

1140 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) 

1141 logging.Handler.close(self) 

1142 

1143class HTTPHandler(logging.Handler): 

1144 """ 

1145 A class which sends records to a Web server, using either GET or 

1146 POST semantics. 

1147 """ 

1148 def __init__(self, host, url, method="GET", secure=False, credentials=None, 

1149 context=None): 

1150 """ 

1151 Initialize the instance with the host, the request URL, and the method 

1152 ("GET" or "POST") 

1153 """ 

1154 logging.Handler.__init__(self) 

1155 method = method.upper() 

1156 if method not in ["GET", "POST"]: 

1157 raise ValueError("method must be GET or POST") 

1158 if not secure and context is not None: 

1159 raise ValueError("context parameter only makes sense " 

1160 "with secure=True") 

1161 self.host = host 

1162 self.url = url 

1163 self.method = method 

1164 self.secure = secure 

1165 self.credentials = credentials 

1166 self.context = context 

1167 

1168 def mapLogRecord(self, record): 

1169 """ 

1170 Default implementation of mapping the log record into a dict 

1171 that is sent as the CGI data. Overwrite in your class. 

1172 Contributed by Franz Glasner. 

1173 """ 

1174 return record.__dict__ 

1175 

1176 def getConnection(self, host, secure): 

1177 """ 

1178 get a HTTP[S]Connection. 

1179 

1180 Override when a custom connection is required, for example if 

1181 there is a proxy. 

1182 """ 

1183 import http.client 

1184 if secure: 

1185 connection = http.client.HTTPSConnection(host, context=self.context) 

1186 else: 

1187 connection = http.client.HTTPConnection(host) 

1188 return connection 

1189 

1190 def emit(self, record): 

1191 """ 

1192 Emit a record. 

1193 

1194 Send the record to the Web server as a percent-encoded dictionary 

1195 """ 

1196 try: 

1197 import urllib.parse 

1198 host = self.host 

1199 h = self.getConnection(host, self.secure) 

1200 url = self.url 

1201 data = urllib.parse.urlencode(self.mapLogRecord(record)) 

1202 if self.method == "GET": 

1203 if (url.find('?') >= 0): 

1204 sep = '&' 

1205 else: 

1206 sep = '?' 

1207 url = url + "%c%s" % (sep, data) 

1208 h.putrequest(self.method, url) 

1209 # support multiple hosts on one IP address... 

1210 # need to strip optional :port from host, if present 

1211 i = host.find(":") 

1212 if i >= 0: 

1213 host = host[:i] 

1214 # See issue #30904: putrequest call above already adds this header 

1215 # on Python 3.x. 

1216 # h.putheader("Host", host) 

1217 if self.method == "POST": 

1218 h.putheader("Content-type", 

1219 "application/x-www-form-urlencoded") 

1220 h.putheader("Content-length", str(len(data))) 

1221 if self.credentials: 

1222 import base64 

1223 s = ('%s:%s' % self.credentials).encode('utf-8') 

1224 s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') 

1225 h.putheader('Authorization', s) 

1226 h.endheaders() 

1227 if self.method == "POST": 

1228 h.send(data.encode('utf-8')) 

1229 h.getresponse() #can't do anything with the result 

1230 except Exception: 

1231 self.handleError(record) 

1232 

1233class BufferingHandler(logging.Handler): 

1234 """ 

1235 A handler class which buffers logging records in memory. Whenever each 

1236 record is added to the buffer, a check is made to see if the buffer should 

1237 be flushed. If it should, then flush() is expected to do what's needed. 

1238 """ 

1239 def __init__(self, capacity): 

1240 """ 

1241 Initialize the handler with the buffer size. 

1242 """ 

1243 logging.Handler.__init__(self) 

1244 self.capacity = capacity 

1245 self.buffer = [] 

1246 

1247 def shouldFlush(self, record): 

1248 """ 

1249 Should the handler flush its buffer? 

1250 

1251 Returns true if the buffer is up to capacity. This method can be 

1252 overridden to implement custom flushing strategies. 

1253 """ 

1254 return (len(self.buffer) >= self.capacity) 

1255 

1256 def emit(self, record): 

1257 """ 

1258 Emit a record. 

1259 

1260 Append the record. If shouldFlush() tells us to, call flush() to process 

1261 the buffer. 

1262 """ 

1263 self.buffer.append(record) 

1264 if self.shouldFlush(record): 

1265 self.flush() 

1266 

1267 def flush(self): 

1268 """ 

1269 Override to implement custom flushing behaviour. 

1270 

1271 This version just zaps the buffer to empty. 

1272 """ 

1273 self.acquire() 

1274 try: 

1275 self.buffer.clear() 

1276 finally: 

1277 self.release() 

1278 

1279 def close(self): 

1280 """ 

1281 Close the handler. 

1282 

1283 This version just flushes and chains to the parent class' close(). 

1284 """ 

1285 try: 

1286 self.flush() 

1287 finally: 

1288 logging.Handler.close(self) 

1289 

1290class MemoryHandler(BufferingHandler): 

1291 """ 

1292 A handler class which buffers logging records in memory, periodically 

1293 flushing them to a target handler. Flushing occurs whenever the buffer 

1294 is full, or when an event of a certain severity or greater is seen. 

1295 """ 

1296 def __init__(self, capacity, flushLevel=logging.ERROR, target=None, 

1297 flushOnClose=True): 

1298 """ 

1299 Initialize the handler with the buffer size, the level at which 

1300 flushing should occur and an optional target. 

1301 

1302 Note that without a target being set either here or via setTarget(), 

1303 a MemoryHandler is no use to anyone! 

1304 

1305 The ``flushOnClose`` argument is ``True`` for backward compatibility 

1306 reasons - the old behaviour is that when the handler is closed, the 

1307 buffer is flushed, even if the flush level hasn't been exceeded nor the 

1308 capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. 

1309 """ 

1310 BufferingHandler.__init__(self, capacity) 

1311 self.flushLevel = flushLevel 

1312 self.target = target 

1313 # See Issue #26559 for why this has been added 

1314 self.flushOnClose = flushOnClose 

1315 

1316 def shouldFlush(self, record): 

1317 """ 

1318 Check for buffer full or a record at the flushLevel or higher. 

1319 """ 

1320 return (len(self.buffer) >= self.capacity) or \ 

1321 (record.levelno >= self.flushLevel) 

1322 

1323 def setTarget(self, target): 

1324 """ 

1325 Set the target handler for this handler. 

1326 """ 

1327 self.acquire() 

1328 try: 

1329 self.target = target 

1330 finally: 

1331 self.release() 

1332 

1333 def flush(self): 

1334 """ 

1335 For a MemoryHandler, flushing means just sending the buffered 

1336 records to the target, if there is one. Override if you want 

1337 different behaviour. 

1338 

1339 The record buffer is also cleared by this operation. 

1340 """ 

1341 self.acquire() 

1342 try: 

1343 if self.target: 

1344 for record in self.buffer: 

1345 self.target.handle(record) 

1346 self.buffer.clear() 

1347 finally: 

1348 self.release() 

1349 

1350 def close(self): 

1351 """ 

1352 Flush, if appropriately configured, set the target to None and lose the 

1353 buffer. 

1354 """ 

1355 try: 

1356 if self.flushOnClose: 

1357 self.flush() 

1358 finally: 

1359 self.acquire() 

1360 try: 

1361 self.target = None 

1362 BufferingHandler.close(self) 

1363 finally: 

1364 self.release() 

1365 

1366 

1367class QueueHandler(logging.Handler): 

1368 """ 

1369 This handler sends events to a queue. Typically, it would be used together 

1370 with a multiprocessing Queue to centralise logging to file in one process 

1371 (in a multi-process application), so as to avoid file write contention 

1372 between processes. 

1373 

1374 This code is new in Python 3.2, but this class can be copy pasted into 

1375 user code for use with earlier Python versions. 

1376 """ 

1377 

1378 def __init__(self, queue): 

1379 """ 

1380 Initialise an instance, using the passed queue. 

1381 """ 

1382 logging.Handler.__init__(self) 

1383 self.queue = queue 

1384 

1385 def enqueue(self, record): 

1386 """ 

1387 Enqueue a record. 

1388 

1389 The base implementation uses put_nowait. You may want to override 

1390 this method if you want to use blocking, timeouts or custom queue 

1391 implementations. 

1392 """ 

1393 self.queue.put_nowait(record) 

1394 

1395 def prepare(self, record): 

1396 """ 

1397 Prepares a record for queuing. The object returned by this method is 

1398 enqueued. 

1399 

1400 The base implementation formats the record to merge the message 

1401 and arguments, and removes unpickleable items from the record 

1402 in-place. 

1403 

1404 You might want to override this method if you want to convert 

1405 the record to a dict or JSON string, or send a modified copy 

1406 of the record while leaving the original intact. 

1407 """ 

1408 # The format operation gets traceback text into record.exc_text 

1409 # (if there's exception data), and also returns the formatted 

1410 # message. We can then use this to replace the original 

1411 # msg + args, as these might be unpickleable. We also zap the 

1412 # exc_info and exc_text attributes, as they are no longer 

1413 # needed and, if not None, will typically not be pickleable. 

1414 msg = self.format(record) 

1415 # bpo-35726: make copy of record to avoid affecting other handlers in the chain. 

1416 record = copy.copy(record) 

1417 record.message = msg 

1418 record.msg = msg 

1419 record.args = None 

1420 record.exc_info = None 

1421 record.exc_text = None 

1422 return record 

1423 

1424 def emit(self, record): 

1425 """ 

1426 Emit a record. 

1427 

1428 Writes the LogRecord to the queue, preparing it for pickling first. 

1429 """ 

1430 try: 

1431 self.enqueue(self.prepare(record)) 

1432 except Exception: 

1433 self.handleError(record) 

1434 

1435 

1436class QueueListener(object): 

1437 """ 

1438 This class implements an internal threaded listener which watches for 

1439 LogRecords being added to a queue, removes them and passes them to a 

1440 list of handlers for processing. 

1441 """ 

1442 _sentinel = None 

1443 

1444 def __init__(self, queue, *handlers, respect_handler_level=False): 

1445 """ 

1446 Initialise an instance with the specified queue and 

1447 handlers. 

1448 """ 

1449 self.queue = queue 

1450 self.handlers = handlers 

1451 self._thread = None 

1452 self.respect_handler_level = respect_handler_level 

1453 

1454 def dequeue(self, block): 

1455 """ 

1456 Dequeue a record and return it, optionally blocking. 

1457 

1458 The base implementation uses get. You may want to override this method 

1459 if you want to use timeouts or work with custom queue implementations. 

1460 """ 

1461 return self.queue.get(block) 

1462 

1463 def start(self): 

1464 """ 

1465 Start the listener. 

1466 

1467 This starts up a background thread to monitor the queue for 

1468 LogRecords to process. 

1469 """ 

1470 self._thread = t = threading.Thread(target=self._monitor) 

1471 t.daemon = True 

1472 t.start() 

1473 

1474 def prepare(self, record): 

1475 """ 

1476 Prepare a record for handling. 

1477 

1478 This method just returns the passed-in record. You may want to 

1479 override this method if you need to do any custom marshalling or 

1480 manipulation of the record before passing it to the handlers. 

1481 """ 

1482 return record 

1483 

1484 def handle(self, record): 

1485 """ 

1486 Handle a record. 

1487 

1488 This just loops through the handlers offering them the record 

1489 to handle. 

1490 """ 

1491 record = self.prepare(record) 

1492 for handler in self.handlers: 

1493 if not self.respect_handler_level: 

1494 process = True 

1495 else: 

1496 process = record.levelno >= handler.level 

1497 if process: 

1498 handler.handle(record) 

1499 

1500 def _monitor(self): 

1501 """ 

1502 Monitor the queue for records, and ask the handler 

1503 to deal with them. 

1504 

1505 This method runs on a separate, internal thread. 

1506 The thread will terminate if it sees a sentinel object in the queue. 

1507 """ 

1508 q = self.queue 

1509 has_task_done = hasattr(q, 'task_done') 

1510 while True: 

1511 try: 

1512 record = self.dequeue(True) 

1513 if record is self._sentinel: 

1514 if has_task_done: 

1515 q.task_done() 

1516 break 

1517 self.handle(record) 

1518 if has_task_done: 

1519 q.task_done() 

1520 except queue.Empty: 

1521 break 

1522 

1523 def enqueue_sentinel(self): 

1524 """ 

1525 This is used to enqueue the sentinel record. 

1526 

1527 The base implementation uses put_nowait. You may want to override this 

1528 method if you want to use timeouts or work with custom queue 

1529 implementations. 

1530 """ 

1531 self.queue.put_nowait(self._sentinel) 

1532 

1533 def stop(self): 

1534 """ 

1535 Stop the listener. 

1536 

1537 This asks the thread to terminate, and then waits for it to do so. 

1538 Note that if you don't call this before your application exits, there 

1539 may be some records still left on the queue, which won't be processed. 

1540 """ 

1541 self.enqueue_sentinel() 

1542 self._thread.join() 

1543 self._thread = None