1# This file is dual licensed under the terms of the Apache License, Version
2# 2.0, and the MIT License. See the LICENSE file in the root of this
3# repository for complete details.
4
5"""
6Processors and helpers specific to the :mod:`logging` module from the `Python
7standard library <https://docs.python.org/>`_.
8
9See also :doc:`structlog's standard library support <standard-library>`.
10"""
11
12from __future__ import annotations
13
14import asyncio
15import contextvars
16import functools
17import logging
18import sys
19import warnings
20
21from functools import partial
22from typing import Any, Callable, Collection, Dict, Iterable, Sequence, cast
23
24
25if sys.version_info >= (3, 11):
26 from typing import Self
27else:
28 from typing_extensions import Self
29
30
31from . import _config
32from ._base import BoundLoggerBase
33from ._frames import _find_first_app_frame_and_name, _format_stack
34from ._log_levels import LEVEL_TO_NAME, NAME_TO_LEVEL, add_log_level
35from .contextvars import _ASYNC_CALLING_STACK, merge_contextvars
36from .exceptions import DropEvent
37from .processors import StackInfoRenderer
38from .typing import (
39 Context,
40 EventDict,
41 ExcInfo,
42 Processor,
43 ProcessorReturnValue,
44 WrappedLogger,
45)
46
47
48__all__ = [
49 "BoundLogger",
50 "ExtraAdder",
51 "LoggerFactory",
52 "PositionalArgumentsFormatter",
53 "ProcessorFormatter",
54 "add_log_level",
55 "add_log_level_number",
56 "add_logger_name",
57 "filter_by_level",
58 "get_logger",
59 "recreate_defaults",
60 "render_to_log_args_and_kwargs",
61 "render_to_log_kwargs",
62]
63
64
65def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
66 """
67 Recreate defaults on top of standard library's logging.
68
69 The output looks the same, but goes through `logging`.
70
71 As with vanilla defaults, the backwards-compatibility guarantees don't
72 apply to the settings applied here.
73
74 Args:
75 log_level:
76 If `None`, don't configure standard library logging **at all**.
77
78 Otherwise configure it to log to `sys.stdout` at *log_level*
79 (``logging.NOTSET`` being the default).
80
81 If you need more control over `logging`, pass `None` here and
82 configure it yourself.
83
84 .. versionadded:: 22.1.0
85 .. versionchanged:: 23.3.0 Added `add_logger_name`.
86 .. versionchanged:: 25.1.0 Added `PositionalArgumentsFormatter`.
87 """
88 if log_level is not None:
89 kw = {"force": True}
90
91 logging.basicConfig(
92 format="%(message)s",
93 stream=sys.stdout,
94 level=log_level,
95 **kw, # type: ignore[arg-type]
96 )
97
98 _config.reset_defaults()
99 _config.configure(
100 processors=[
101 PositionalArgumentsFormatter(), # handled by native loggers
102 merge_contextvars,
103 add_log_level,
104 add_logger_name,
105 StackInfoRenderer(),
106 _config._BUILTIN_DEFAULT_PROCESSORS[-2], # TimeStamper
107 _config._BUILTIN_DEFAULT_PROCESSORS[-1], # ConsoleRenderer
108 ],
109 wrapper_class=BoundLogger,
110 logger_factory=LoggerFactory(),
111 )
112
113
114_SENTINEL = object()
115
116
117class _FixedFindCallerLogger(logging.Logger):
118 """
119 Change the behavior of `logging.Logger.findCaller` to cope with
120 *structlog*'s extra frames.
121 """
122
123 def findCaller(
124 self, stack_info: bool = False, stacklevel: int = 1
125 ) -> tuple[str, int, str, str | None]:
126 """
127 Finds the first caller frame outside of structlog so that the caller
128 info is populated for wrapping stdlib.
129
130 This logger gets set as the default one when using LoggerFactory.
131 """
132 sinfo: str | None
133 f, name = _find_first_app_frame_and_name(["logging"])
134 sinfo = _format_stack(f) if stack_info else None
135
136 return f.f_code.co_filename, f.f_lineno, f.f_code.co_name, sinfo
137
138
139class BoundLogger(BoundLoggerBase):
140 """
141 Python Standard Library version of `structlog.BoundLogger`.
142
143 Works exactly like the generic one except that it takes advantage of
144 knowing the logging methods in advance.
145
146 Use it like::
147
148 structlog.configure(
149 wrapper_class=structlog.stdlib.BoundLogger,
150 )
151
152 It also contains a bunch of properties that pass-through to the wrapped
153 `logging.Logger` which should make it work as a drop-in replacement.
154
155 .. versionadded:: 23.1.0
156 Async variants `alog()`, `adebug()`, `ainfo()`, and so forth.
157
158 .. versionchanged:: 24.2.0
159 Callsite parameters are now also collected by
160 `structlog.processors.CallsiteParameterAdder` for async log methods.
161 """
162
163 _logger: logging.Logger
164
165 def bind(self, **new_values: Any) -> Self:
166 """
167 Return a new logger with *new_values* added to the existing ones.
168 """
169 return super().bind(**new_values)
170
171 def unbind(self, *keys: str) -> Self:
172 """
173 Return a new logger with *keys* removed from the context.
174
175 Raises:
176 KeyError: If the key is not part of the context.
177 """
178 return super().unbind(*keys)
179
180 def try_unbind(self, *keys: str) -> Self:
181 """
182 Like :meth:`unbind`, but best effort: missing keys are ignored.
183
184 .. versionadded:: 18.2.0
185 """
186 return super().try_unbind(*keys)
187
188 def new(self, **new_values: Any) -> Self:
189 """
190 Clear context and binds *initial_values* using `bind`.
191
192 Only necessary with dict implementations that keep global state like
193 those wrapped by `structlog.threadlocal.wrap_dict` when threads
194 are reused.
195 """
196 return super().new(**new_values)
197
198 def debug(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
199 """
200 Process event and call `logging.Logger.debug` with the result.
201 """
202 return self._proxy_to_logger("debug", event, *args, **kw)
203
204 def info(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
205 """
206 Process event and call `logging.Logger.info` with the result.
207 """
208 return self._proxy_to_logger("info", event, *args, **kw)
209
210 def warning(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
211 """
212 Process event and call `logging.Logger.warning` with the result.
213 """
214 return self._proxy_to_logger("warning", event, *args, **kw)
215
216 warn = warning
217
218 def error(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
219 """
220 Process event and call `logging.Logger.error` with the result.
221 """
222 return self._proxy_to_logger("error", event, *args, **kw)
223
224 def critical(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
225 """
226 Process event and call `logging.Logger.critical` with the result.
227 """
228 return self._proxy_to_logger("critical", event, *args, **kw)
229
230 def fatal(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
231 """
232 Process event and call `logging.Logger.critical` with the result.
233 """
234 return self._proxy_to_logger("critical", event, *args, **kw)
235
236 def exception(
237 self, event: str | None = None, *args: Any, **kw: Any
238 ) -> Any:
239 """
240 Process event and call `logging.Logger.exception` with the result,
241 after setting ``exc_info`` to `True` if it's not already set.
242 """
243 kw.setdefault("exc_info", True)
244 return self._proxy_to_logger("exception", event, *args, **kw)
245
246 def log(
247 self, level: int, event: str | None = None, *args: Any, **kw: Any
248 ) -> Any:
249 """
250 Process *event* and call the appropriate logging method depending on
251 *level*.
252 """
253 return self._proxy_to_logger(LEVEL_TO_NAME[level], event, *args, **kw)
254
255 def _proxy_to_logger(
256 self,
257 method_name: str,
258 event: str | None = None,
259 *event_args: str,
260 **event_kw: Any,
261 ) -> Any:
262 """
263 Propagate a method call to the wrapped logger.
264
265 This is the same as the superclass implementation, except that
266 it also preserves positional arguments in the ``event_dict`` so
267 that the stdlib's support for format strings can be used.
268 """
269 if event_args:
270 event_kw["positional_args"] = event_args
271
272 return super()._proxy_to_logger(method_name, event=event, **event_kw)
273
274 # Pass-through attributes and methods to mimic the stdlib's logger
275 # interface.
276
277 @property
278 def name(self) -> str:
279 """
280 Returns :attr:`logging.Logger.name`
281 """
282 return self._logger.name
283
284 @property
285 def level(self) -> int:
286 """
287 Returns :attr:`logging.Logger.level`
288 """
289 return self._logger.level
290
291 @property
292 def parent(self) -> Any:
293 """
294 Returns :attr:`logging.Logger.parent`
295 """
296 return self._logger.parent
297
298 @property
299 def propagate(self) -> bool:
300 """
301 Returns :attr:`logging.Logger.propagate`
302 """
303 return self._logger.propagate
304
305 @property
306 def handlers(self) -> Any:
307 """
308 Returns :attr:`logging.Logger.handlers`
309 """
310 return self._logger.handlers
311
312 @property
313 def disabled(self) -> int:
314 """
315 Returns :attr:`logging.Logger.disabled`
316 """
317 return self._logger.disabled
318
319 def setLevel(self, level: int) -> None:
320 """
321 Calls :meth:`logging.Logger.setLevel` with unmodified arguments.
322 """
323 self._logger.setLevel(level)
324
325 def findCaller(
326 self, stack_info: bool = False
327 ) -> tuple[str, int, str, str | None]:
328 """
329 Calls :meth:`logging.Logger.findCaller` with unmodified arguments.
330 """
331 return self._logger.findCaller(stack_info=stack_info)
332
333 def makeRecord(
334 self,
335 name: str,
336 level: int,
337 fn: str,
338 lno: int,
339 msg: str,
340 args: tuple[Any, ...],
341 exc_info: ExcInfo,
342 func: str | None = None,
343 extra: Any = None,
344 ) -> logging.LogRecord:
345 """
346 Calls :meth:`logging.Logger.makeRecord` with unmodified arguments.
347 """
348 return self._logger.makeRecord(
349 name, level, fn, lno, msg, args, exc_info, func=func, extra=extra
350 )
351
352 def handle(self, record: logging.LogRecord) -> None:
353 """
354 Calls :meth:`logging.Logger.handle` with unmodified arguments.
355 """
356 self._logger.handle(record)
357
358 def addHandler(self, hdlr: logging.Handler) -> None:
359 """
360 Calls :meth:`logging.Logger.addHandler` with unmodified arguments.
361 """
362 self._logger.addHandler(hdlr)
363
364 def removeHandler(self, hdlr: logging.Handler) -> None:
365 """
366 Calls :meth:`logging.Logger.removeHandler` with unmodified arguments.
367 """
368 self._logger.removeHandler(hdlr)
369
370 def hasHandlers(self) -> bool:
371 """
372 Calls :meth:`logging.Logger.hasHandlers` with unmodified arguments.
373
374 Exists only in Python 3.
375 """
376 return self._logger.hasHandlers()
377
378 def callHandlers(self, record: logging.LogRecord) -> None:
379 """
380 Calls :meth:`logging.Logger.callHandlers` with unmodified arguments.
381 """
382 self._logger.callHandlers(record)
383
384 def getEffectiveLevel(self) -> int:
385 """
386 Calls :meth:`logging.Logger.getEffectiveLevel` with unmodified
387 arguments.
388 """
389 return self._logger.getEffectiveLevel()
390
391 def isEnabledFor(self, level: int) -> bool:
392 """
393 Calls :meth:`logging.Logger.isEnabledFor` with unmodified arguments.
394 """
395 return self._logger.isEnabledFor(level)
396
397 def getChild(self, suffix: str) -> logging.Logger:
398 """
399 Calls :meth:`logging.Logger.getChild` with unmodified arguments.
400 """
401 return self._logger.getChild(suffix)
402
403 # Non-Standard Async
404 async def _dispatch_to_sync(
405 self,
406 meth: Callable[..., Any],
407 event: str,
408 args: tuple[Any, ...],
409 kw: dict[str, Any],
410 ) -> None:
411 """
412 Merge contextvars and log using the sync logger in a thread pool.
413 """
414 scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
415 ctx = contextvars.copy_context()
416
417 try:
418 await asyncio.get_running_loop().run_in_executor(
419 None,
420 lambda: ctx.run(lambda: meth(event, *args, **kw)),
421 )
422 finally:
423 _ASYNC_CALLING_STACK.reset(scs_token)
424
425 async def adebug(self, event: str, *args: Any, **kw: Any) -> None:
426 """
427 Log using `debug()`, but asynchronously in a separate thread.
428
429 .. versionadded:: 23.1.0
430 """
431 await self._dispatch_to_sync(self.debug, event, args, kw)
432
433 async def ainfo(self, event: str, *args: Any, **kw: Any) -> None:
434 """
435 Log using `info()`, but asynchronously in a separate thread.
436
437 .. versionadded:: 23.1.0
438 """
439 await self._dispatch_to_sync(self.info, event, args, kw)
440
441 async def awarning(self, event: str, *args: Any, **kw: Any) -> None:
442 """
443 Log using `warning()`, but asynchronously in a separate thread.
444
445 .. versionadded:: 23.1.0
446 """
447 await self._dispatch_to_sync(self.warning, event, args, kw)
448
449 async def aerror(self, event: str, *args: Any, **kw: Any) -> None:
450 """
451 Log using `error()`, but asynchronously in a separate thread.
452
453 .. versionadded:: 23.1.0
454 """
455 await self._dispatch_to_sync(self.error, event, args, kw)
456
457 async def acritical(self, event: str, *args: Any, **kw: Any) -> None:
458 """
459 Log using `critical()`, but asynchronously in a separate thread.
460
461 .. versionadded:: 23.1.0
462 """
463 await self._dispatch_to_sync(self.critical, event, args, kw)
464
465 async def afatal(self, event: str, *args: Any, **kw: Any) -> None:
466 """
467 Log using `critical()`, but asynchronously in a separate thread.
468
469 .. versionadded:: 23.1.0
470 """
471 await self._dispatch_to_sync(self.critical, event, args, kw)
472
473 async def aexception(self, event: str, *args: Any, **kw: Any) -> None:
474 """
475 Log using `exception()`, but asynchronously in a separate thread.
476
477 .. versionadded:: 23.1.0
478 """
479 # To make `log.exception("foo") work, we have to check if the user
480 # passed an explicit exc_info and if not, supply our own.
481 if kw.get("exc_info", True) is True and kw.get("exception") is None:
482 kw["exc_info"] = sys.exc_info()
483
484 await self._dispatch_to_sync(self.exception, event, args, kw)
485
486 async def alog(
487 self, level: Any, event: str, *args: Any, **kw: Any
488 ) -> None:
489 """
490 Log using `log()`, but asynchronously in a separate thread.
491
492 .. versionadded:: 23.1.0
493 """
494 await self._dispatch_to_sync(partial(self.log, level), event, args, kw)
495
496
497def get_logger(*args: Any, **initial_values: Any) -> BoundLogger:
498 """
499 Only calls `structlog.get_logger`, but has the correct type hints.
500
501 .. warning::
502
503 Does **not** check whether -- or ensure that -- you've configured
504 *structlog* for standard library :mod:`logging`!
505
506 See :doc:`standard-library` for details.
507
508 .. versionadded:: 20.2.0
509 """
510 return _config.get_logger(*args, **initial_values)
511
512
513class AsyncBoundLogger:
514 """
515 Wraps a `BoundLogger` & exposes its logging methods as ``async`` versions.
516
517 Instead of blocking the program, they are run asynchronously in a thread
518 pool executor.
519
520 This means more computational overhead per log call. But it also means that
521 the processor chain (e.g. JSON serialization) and I/O won't block your
522 whole application.
523
524 Only available for Python 3.7 and later.
525
526 .. versionadded:: 20.2.0
527 .. versionchanged:: 20.2.0 fix _dispatch_to_sync contextvars usage
528 .. deprecated:: 23.1.0
529 Use the regular `BoundLogger` with its a-prefixed methods instead.
530 .. versionchanged:: 23.3.0
531 Callsite parameters are now also collected for async log methods.
532 """
533
534 __slots__ = ("_loop", "sync_bl")
535
536 #: The wrapped synchronous logger. It is useful to be able to log
537 #: synchronously occasionally.
538 sync_bl: BoundLogger
539
540 # Blatant lie, we use a property for _context. Need this for Protocol
541 # though.
542 _context: Context
543
544 _executor = None
545 _bound_logger_factory = BoundLogger
546
547 def __init__(
548 self,
549 logger: logging.Logger,
550 processors: Iterable[Processor],
551 context: Context,
552 *,
553 # Only as an optimization for binding!
554 _sync_bl: Any = None, # *vroom vroom* over purity.
555 _loop: Any = None,
556 ):
557 if _sync_bl:
558 self.sync_bl = _sync_bl
559 self._loop = _loop
560
561 return
562
563 self.sync_bl = self._bound_logger_factory(
564 logger=logger, processors=processors, context=context
565 )
566 self._loop = asyncio.get_running_loop()
567
568 # We have to ignore the type because we've already declared it to ensure
569 # we're a BindableLogger.
570 # Instances would've been correctly recognized as such, however the class
571 # not and we need the class in `structlog.configure()`.
572 @property # type: ignore[no-redef]
573 def _context(self) -> Context:
574 return self.sync_bl._context
575
576 def bind(self, **new_values: Any) -> AsyncBoundLogger:
577 return AsyncBoundLogger(
578 # logger, processors and context are within sync_bl. These
579 # arguments are ignored if _sync_bl is passed. *vroom vroom* over
580 # purity.
581 logger=None, # type: ignore[arg-type]
582 processors=(),
583 context={},
584 _sync_bl=self.sync_bl.bind(**new_values),
585 _loop=self._loop,
586 )
587
588 def new(self, **new_values: Any) -> AsyncBoundLogger:
589 return AsyncBoundLogger(
590 # c.f. comment in bind
591 logger=None, # type: ignore[arg-type]
592 processors=(),
593 context={},
594 _sync_bl=self.sync_bl.new(**new_values),
595 _loop=self._loop,
596 )
597
598 def unbind(self, *keys: str) -> AsyncBoundLogger:
599 return AsyncBoundLogger(
600 # c.f. comment in bind
601 logger=None, # type: ignore[arg-type]
602 processors=(),
603 context={},
604 _sync_bl=self.sync_bl.unbind(*keys),
605 _loop=self._loop,
606 )
607
608 def try_unbind(self, *keys: str) -> AsyncBoundLogger:
609 return AsyncBoundLogger(
610 # c.f. comment in bind
611 logger=None, # type: ignore[arg-type]
612 processors=(),
613 context={},
614 _sync_bl=self.sync_bl.try_unbind(*keys),
615 _loop=self._loop,
616 )
617
618 async def _dispatch_to_sync(
619 self,
620 meth: Callable[..., Any],
621 event: str,
622 args: tuple[Any, ...],
623 kw: dict[str, Any],
624 ) -> None:
625 """
626 Merge contextvars and log using the sync logger in a thread pool.
627 """
628 scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
629 ctx = contextvars.copy_context()
630
631 try:
632 await asyncio.get_running_loop().run_in_executor(
633 self._executor,
634 lambda: ctx.run(lambda: meth(event, *args, **kw)),
635 )
636 finally:
637 _ASYNC_CALLING_STACK.reset(scs_token)
638
639 async def debug(self, event: str, *args: Any, **kw: Any) -> None:
640 await self._dispatch_to_sync(self.sync_bl.debug, event, args, kw)
641
642 async def info(self, event: str, *args: Any, **kw: Any) -> None:
643 await self._dispatch_to_sync(self.sync_bl.info, event, args, kw)
644
645 async def warning(self, event: str, *args: Any, **kw: Any) -> None:
646 await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
647
648 async def warn(self, event: str, *args: Any, **kw: Any) -> None:
649 await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
650
651 async def error(self, event: str, *args: Any, **kw: Any) -> None:
652 await self._dispatch_to_sync(self.sync_bl.error, event, args, kw)
653
654 async def critical(self, event: str, *args: Any, **kw: Any) -> None:
655 await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
656
657 async def fatal(self, event: str, *args: Any, **kw: Any) -> None:
658 await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
659
660 async def exception(self, event: str, *args: Any, **kw: Any) -> None:
661 # To make `log.exception("foo") work, we have to check if the user
662 # passed an explicit exc_info and if not, supply our own.
663 ei = kw.pop("exc_info", None)
664 if ei is None and kw.get("exception") is None:
665 ei = sys.exc_info()
666
667 kw["exc_info"] = ei
668
669 await self._dispatch_to_sync(self.sync_bl.exception, event, args, kw)
670
671 async def log(self, level: Any, event: str, *args: Any, **kw: Any) -> None:
672 await self._dispatch_to_sync(
673 partial(self.sync_bl.log, level), event, args, kw
674 )
675
676
677class LoggerFactory:
678 """
679 Build a standard library logger when an *instance* is called.
680
681 Sets a custom logger using :func:`logging.setLoggerClass` so variables in
682 log format are expanded properly.
683
684 >>> from structlog import configure
685 >>> from structlog.stdlib import LoggerFactory
686 >>> configure(logger_factory=LoggerFactory())
687
688 Args:
689 ignore_frame_names:
690 When guessing the name of a logger, skip frames whose names *start*
691 with one of these. For example, in pyramid applications you'll
692 want to set it to ``["venusian", "pyramid.config"]``. This argument
693 is called *additional_ignores* in other APIs throughout
694 *structlog*.
695 """
696
697 def __init__(self, ignore_frame_names: list[str] | None = None):
698 self._ignore = ignore_frame_names
699 logging.setLoggerClass(_FixedFindCallerLogger)
700
701 def __call__(self, *args: Any) -> logging.Logger:
702 """
703 Deduce the caller's module name and create a stdlib logger.
704
705 If an optional argument is passed, it will be used as the logger name
706 instead of guesswork. This optional argument would be passed from the
707 :func:`structlog.get_logger` call. For example
708 ``structlog.get_logger("foo")`` would cause this method to be called
709 with ``"foo"`` as its first positional argument.
710
711 .. versionchanged:: 0.4.0
712 Added support for optional positional arguments. Using the first
713 one for naming the constructed logger.
714 """
715 if args:
716 return logging.getLogger(args[0])
717
718 # We skip all frames that originate from within structlog or one of the
719 # configured names.
720 _, name = _find_first_app_frame_and_name(self._ignore)
721
722 return logging.getLogger(name)
723
724
725class PositionalArgumentsFormatter:
726 """
727 Apply stdlib-like string formatting to the ``event`` key.
728
729 If the ``positional_args`` key in the event dict is set, it must
730 contain a tuple that is used for formatting (using the ``%s`` string
731 formatting operator) of the value from the ``event`` key. This works
732 in the same way as the stdlib handles arguments to the various log
733 methods: if the tuple contains only a single `dict` argument it is
734 used for keyword placeholders in the ``event`` string, otherwise it
735 will be used for positional placeholders.
736
737 ``positional_args`` is populated by `structlog.stdlib.BoundLogger` or
738 can be set manually.
739
740 The *remove_positional_args* flag can be set to `False` to keep the
741 ``positional_args`` key in the event dict; by default it will be
742 removed from the event dict after formatting a message.
743 """
744
745 def __init__(self, remove_positional_args: bool = True) -> None:
746 self.remove_positional_args = remove_positional_args
747
748 def __call__(
749 self, _: WrappedLogger, __: str, event_dict: EventDict
750 ) -> EventDict:
751 args = event_dict.get("positional_args")
752
753 # Mimic the formatting behaviour of the stdlib's logging module, which
754 # accepts both positional arguments and a single dict argument. The
755 # "single dict" check is the same one as the stdlib's logging module
756 # performs in LogRecord.__init__().
757 if args:
758 if len(args) == 1 and isinstance(args[0], dict) and args[0]:
759 args = args[0]
760
761 event_dict["event"] %= args
762
763 if self.remove_positional_args and args is not None:
764 del event_dict["positional_args"]
765
766 return event_dict
767
768
769def filter_by_level(
770 logger: logging.Logger, method_name: str, event_dict: EventDict
771) -> EventDict:
772 """
773 Check whether logging is configured to accept messages from this log level.
774
775 Should be the first processor if stdlib's filtering by level is used so
776 possibly expensive processors like exception formatters are avoided in the
777 first place.
778
779 >>> import logging
780 >>> from structlog.stdlib import filter_by_level
781 >>> logging.basicConfig(level=logging.WARN)
782 >>> logger = logging.getLogger()
783 >>> filter_by_level(logger, 'warn', {})
784 {}
785 >>> filter_by_level(logger, 'debug', {})
786 Traceback (most recent call last):
787 ...
788 DropEvent
789 """
790 if (
791 # We can't use logger.isEnabledFor() because it's always disabled when
792 # a log entry is in flight on Python 3.14 and later,
793 not logger.disabled
794 and NAME_TO_LEVEL[method_name] >= logger.getEffectiveLevel()
795 ):
796 return event_dict
797
798 raise DropEvent
799
800
801def add_log_level_number(
802 logger: logging.Logger, method_name: str, event_dict: EventDict
803) -> EventDict:
804 """
805 Add the log level number to the event dict.
806
807 Log level numbers map to the log level names. The Python stdlib uses them
808 for filtering logic. This adds the same numbers so users can leverage
809 similar filtering. Compare::
810
811 level in ("warning", "error", "critical")
812 level_number >= 30
813
814 The mapping of names to numbers is in
815 ``structlog.stdlib._log_levels._NAME_TO_LEVEL``.
816
817 .. versionadded:: 18.2.0
818 """
819 event_dict["level_number"] = NAME_TO_LEVEL[method_name]
820
821 return event_dict
822
823
824def add_logger_name(
825 logger: logging.Logger, method_name: str, event_dict: EventDict
826) -> EventDict:
827 """
828 Add the logger name to the event dict.
829 """
830 record = event_dict.get("_record")
831 if record is None:
832 event_dict["logger"] = logger.name
833 else:
834 event_dict["logger"] = record.name
835 return event_dict
836
837
838_LOG_RECORD_KEYS = logging.LogRecord(
839 "name", 0, "pathname", 0, "msg", (), None
840).__dict__.keys()
841
842
843class ExtraAdder:
844 """
845 Add extra attributes of `logging.LogRecord` objects to the event
846 dictionary.
847
848 This processor can be used for adding data passed in the ``extra``
849 parameter of the `logging` module's log methods to the event dictionary.
850
851 Args:
852 allow:
853 An optional collection of attributes that, if present in
854 `logging.LogRecord` objects, will be copied to event dictionaries.
855
856 If ``allow`` is None all attributes of `logging.LogRecord` objects
857 that do not exist on a standard `logging.LogRecord` object will be
858 copied to event dictionaries.
859
860 .. versionadded:: 21.5.0
861 """
862
863 __slots__ = ("_copier",)
864
865 def __init__(self, allow: Collection[str] | None = None) -> None:
866 self._copier: Callable[[EventDict, logging.LogRecord], None]
867 if allow is not None:
868 # The contents of allow is copied to a new list so that changes to
869 # the list passed into the constructor does not change the
870 # behaviour of this processor.
871 self._copier = functools.partial(self._copy_allowed, [*allow])
872 else:
873 self._copier = self._copy_all
874
875 def __call__(
876 self, logger: logging.Logger, name: str, event_dict: EventDict
877 ) -> EventDict:
878 record: logging.LogRecord | None = event_dict.get("_record")
879 if record is not None:
880 self._copier(event_dict, record)
881 return event_dict
882
883 @classmethod
884 def _copy_all(
885 cls, event_dict: EventDict, record: logging.LogRecord
886 ) -> None:
887 for key, value in record.__dict__.items():
888 if key not in _LOG_RECORD_KEYS:
889 event_dict[key] = value
890
891 @classmethod
892 def _copy_allowed(
893 cls,
894 allow: Collection[str],
895 event_dict: EventDict,
896 record: logging.LogRecord,
897 ) -> None:
898 for key in allow:
899 if key in record.__dict__:
900 event_dict[key] = record.__dict__[key]
901
902
903LOG_KWARG_NAMES = ("exc_info", "stack_info", "stacklevel")
904
905
906def render_to_log_args_and_kwargs(
907 _: logging.Logger, __: str, event_dict: EventDict
908) -> tuple[tuple[Any, ...], dict[str, Any]]:
909 """
910 Render ``event_dict`` into positional and keyword arguments for
911 `logging.Logger` logging methods.
912 See `logging.Logger.debug` method for keyword arguments reference.
913
914 The ``event`` field is passed in the first positional argument, positional
915 arguments from ``positional_args`` field are passed in subsequent positional
916 arguments, keyword arguments are extracted from the *event_dict* and the
917 rest of the *event_dict* is added as ``extra``.
918
919 This allows you to defer formatting to `logging`.
920
921 .. versionadded:: 25.1.0
922 """
923 args = (event_dict.pop("event"), *event_dict.pop("positional_args", ()))
924
925 kwargs = {
926 kwarg_name: event_dict.pop(kwarg_name)
927 for kwarg_name in LOG_KWARG_NAMES
928 if kwarg_name in event_dict
929 }
930 if event_dict:
931 kwargs["extra"] = event_dict
932
933 return args, kwargs
934
935
936def render_to_log_kwargs(
937 _: logging.Logger, __: str, event_dict: EventDict
938) -> EventDict:
939 """
940 Render ``event_dict`` into keyword arguments for `logging.Logger` logging
941 methods.
942 See `logging.Logger.debug` method for keyword arguments reference.
943
944 The ``event`` field is translated into ``msg``, keyword arguments are
945 extracted from the *event_dict* and the rest of the *event_dict* is added as
946 ``extra``.
947
948 This allows you to defer formatting to `logging`.
949
950 .. versionadded:: 17.1.0
951 .. versionchanged:: 22.1.0
952 ``exc_info``, ``stack_info``, and ``stacklevel`` are passed as proper
953 kwargs and not put into ``extra``.
954 .. versionchanged:: 24.2.0
955 ``stackLevel`` corrected to ``stacklevel``.
956 """
957 return {
958 "msg": event_dict.pop("event"),
959 "extra": event_dict,
960 **{
961 kw: event_dict.pop(kw)
962 for kw in LOG_KWARG_NAMES
963 if kw in event_dict
964 },
965 }
966
967
968class ProcessorFormatter(logging.Formatter):
969 r"""
970 Call *structlog* processors on `logging.LogRecord`\s.
971
972 This is an implementation of a `logging.Formatter` that can be used to
973 format log entries from both *structlog* and `logging`.
974
975 Its static method `wrap_for_formatter` must be the final processor in
976 *structlog*'s processor chain.
977
978 Please refer to :ref:`processor-formatter` for examples.
979
980 Args:
981 foreign_pre_chain:
982 If not `None`, it is used as a processor chain that is applied to
983 **non**-*structlog* log entries before the event dictionary is
984 passed to *processors*. (default: `None`)
985
986 processors:
987 A chain of *structlog* processors that is used to process **all**
988 log entries. The last one must render to a `str` which then gets
989 passed on to `logging` for output.
990
991 Compared to *structlog*'s regular processor chains, there's a few
992 differences:
993
994 - The event dictionary contains two additional keys:
995
996 #. ``_record``: a `logging.LogRecord` that either was created
997 using `logging` APIs, **or** is a wrapped *structlog* log
998 entry created by `wrap_for_formatter`.
999
1000 #. ``_from_structlog``: a `bool` that indicates whether or not
1001 ``_record`` was created by a *structlog* logger.
1002
1003 Since you most likely don't want ``_record`` and
1004 ``_from_structlog`` in your log files, we've added the static
1005 method `remove_processors_meta` to ``ProcessorFormatter`` that
1006 you can add just before your renderer.
1007
1008 - Since this is a `logging` *formatter*, raising
1009 `structlog.DropEvent` will crash your application.
1010
1011 keep_exc_info:
1012 ``exc_info`` on `logging.LogRecord`\ s is added to the
1013 ``event_dict`` and removed afterwards. Set this to ``True`` to keep
1014 it on the `logging.LogRecord`. (default: False)
1015
1016 keep_stack_info:
1017 Same as *keep_exc_info* except for ``stack_info``. (default: False)
1018
1019 logger:
1020 Logger which we want to push through the *structlog* processor
1021 chain. This parameter is necessary for some of the processors like
1022 `filter_by_level`. (default: None)
1023
1024 pass_foreign_args:
1025 If True, pass a foreign log record's ``args`` attribute to the
1026 ``event_dict`` under ``positional_args`` key. (default: False)
1027
1028 processor:
1029 A single *structlog* processor used for rendering the event
1030 dictionary before passing it off to `logging`. Must return a `str`.
1031 The event dictionary does **not** contain ``_record`` and
1032 ``_from_structlog``.
1033
1034 This parameter exists for historic reasons. Please use *processors*
1035 instead.
1036
1037 use_get_message:
1038 If True, use ``record.getMessage`` to get a fully rendered log
1039 message, otherwise use ``str(record.msg)``. (default: True)
1040
1041 Raises:
1042 TypeError: If both or neither *processor* and *processors* are passed.
1043
1044 .. versionadded:: 17.1.0
1045 .. versionadded:: 17.2.0 *keep_exc_info* and *keep_stack_info*
1046 .. versionadded:: 19.2.0 *logger*
1047 .. versionadded:: 19.2.0 *pass_foreign_args*
1048 .. versionadded:: 21.3.0 *processors*
1049 .. deprecated:: 21.3.0
1050 *processor* (singular) in favor of *processors* (plural). Removal is not
1051 planned.
1052 .. versionadded:: 23.3.0 *use_get_message*
1053 """
1054
1055 def __init__(
1056 self,
1057 processor: Processor | None = None,
1058 processors: Sequence[Processor] | None = (),
1059 foreign_pre_chain: Sequence[Processor] | None = None,
1060 keep_exc_info: bool = False,
1061 keep_stack_info: bool = False,
1062 logger: logging.Logger | None = None,
1063 pass_foreign_args: bool = False,
1064 use_get_message: bool = True,
1065 *args: Any,
1066 **kwargs: Any,
1067 ) -> None:
1068 fmt = kwargs.pop("fmt", "%(message)s")
1069 super().__init__(*args, fmt=fmt, **kwargs) # type: ignore[misc]
1070
1071 if processor and processors:
1072 msg = (
1073 "The `processor` and `processors` arguments are mutually"
1074 " exclusive."
1075 )
1076 raise TypeError(msg)
1077
1078 self.processors: Sequence[Processor]
1079 if processor is not None:
1080 self.processors = (self.remove_processors_meta, processor)
1081 elif processors:
1082 self.processors = processors
1083 else:
1084 msg = "Either `processor` or `processors` must be passed."
1085 raise TypeError(msg)
1086
1087 self.foreign_pre_chain = foreign_pre_chain
1088 self.keep_exc_info = keep_exc_info
1089 self.keep_stack_info = keep_stack_info
1090 self.logger = logger
1091 self.pass_foreign_args = pass_foreign_args
1092 self.use_get_message = use_get_message
1093
1094 def format(self, record: logging.LogRecord) -> str:
1095 """
1096 Extract *structlog*'s `event_dict` from ``record.msg`` and format it.
1097
1098 *record* has been patched by `wrap_for_formatter` first though, so the
1099 type isn't quite right.
1100 """
1101 # Make a shallow copy of the record to let other handlers/formatters
1102 # process the original one
1103 record = logging.makeLogRecord(record.__dict__)
1104
1105 logger = getattr(record, "_logger", _SENTINEL)
1106 meth_name = getattr(record, "_name", "__structlog_sentinel__")
1107
1108 ed: ProcessorReturnValue
1109 if logger is not _SENTINEL and meth_name != "__structlog_sentinel__":
1110 # Both attached by wrap_for_formatter
1111 if self.logger is not None:
1112 logger = self.logger
1113 meth_name = cast(str, record._name) # type:ignore[attr-defined]
1114
1115 # We need to copy because it's possible that the same record gets
1116 # processed by multiple logging formatters. LogRecord.getMessage
1117 # would transform our dict into a str.
1118 ed = cast(Dict[str, Any], record.msg).copy()
1119 ed["_record"] = record
1120 ed["_from_structlog"] = True
1121 else:
1122 logger = self.logger
1123 meth_name = record.levelname.lower()
1124 ed = {
1125 "event": (
1126 record.getMessage()
1127 if self.use_get_message
1128 else str(record.msg)
1129 ),
1130 "_record": record,
1131 "_from_structlog": False,
1132 }
1133
1134 if self.pass_foreign_args:
1135 ed["positional_args"] = record.args
1136
1137 record.args = ()
1138
1139 # Add stack-related attributes to the event dict
1140 if record.exc_info:
1141 ed["exc_info"] = record.exc_info
1142 if record.stack_info:
1143 ed["stack_info"] = record.stack_info
1144
1145 # Non-structlog allows to run through a chain to prepare it for the
1146 # final processor (e.g. adding timestamps and log levels).
1147 for proc in self.foreign_pre_chain or ():
1148 ed = cast(EventDict, proc(logger, meth_name, ed))
1149
1150 # If required, unset stack-related attributes on the record copy so
1151 # that the base implementation doesn't append stacktraces to the
1152 # output.
1153 if not self.keep_exc_info:
1154 record.exc_text = None
1155 record.exc_info = None
1156 if not self.keep_stack_info:
1157 record.stack_info = None
1158
1159 for p in self.processors:
1160 ed = p(logger, meth_name, ed) # type: ignore[arg-type]
1161
1162 if not isinstance(ed, str):
1163 warnings.warn(
1164 "The last processor in ProcessorFormatter.processors must "
1165 f"return a string, but {self.processors[-1]} returned a "
1166 f"{type(ed)} instead.",
1167 category=RuntimeWarning,
1168 stacklevel=1,
1169 )
1170 ed = cast(str, ed)
1171
1172 record.msg = ed
1173
1174 return super().format(record)
1175
1176 @staticmethod
1177 def wrap_for_formatter(
1178 logger: logging.Logger, name: str, event_dict: EventDict
1179 ) -> tuple[tuple[EventDict], dict[str, dict[str, Any]]]:
1180 """
1181 Wrap *logger*, *name*, and *event_dict*.
1182
1183 The result is later unpacked by `ProcessorFormatter` when formatting
1184 log entries.
1185
1186 Use this static method as the renderer (in other words, final
1187 processor) if you want to use `ProcessorFormatter` in your `logging`
1188 configuration.
1189 """
1190 return (event_dict,), {"extra": {"_logger": logger, "_name": name}}
1191
1192 @staticmethod
1193 def remove_processors_meta(
1194 _: WrappedLogger, __: str, event_dict: EventDict
1195 ) -> EventDict:
1196 """
1197 Remove ``_record`` and ``_from_structlog`` from *event_dict*.
1198
1199 These keys are added to the event dictionary, before
1200 `ProcessorFormatter`'s *processors* are run.
1201
1202 .. versionadded:: 21.3.0
1203 """
1204 del event_dict["_record"]
1205 del event_dict["_from_structlog"]
1206
1207 return event_dict