Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/structlog/stdlib.py: 39%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is dual licensed under the terms of the Apache License, Version
2# 2.0, and the MIT License. See the LICENSE file in the root of this
3# repository for complete details.
5"""
6Processors and helpers specific to the :mod:`logging` module from the `Python
7standard library <https://docs.python.org/>`_.
9See also :doc:`structlog's standard library support <standard-library>`.
10"""
12from __future__ import annotations
14import asyncio
15import contextvars
16import functools
17import logging
18import sys
19import warnings
21from functools import partial
22from typing import Any, Callable, Collection, Dict, Iterable, Sequence, cast
25if sys.version_info >= (3, 11):
26 from typing import Self
27else:
28 from typing_extensions import Self
31from . import _config
32from ._base import BoundLoggerBase
33from ._frames import _find_first_app_frame_and_name, _format_stack
34from ._log_levels import LEVEL_TO_NAME, NAME_TO_LEVEL, add_log_level
35from .contextvars import _ASYNC_CALLING_STACK, merge_contextvars
36from .exceptions import DropEvent
37from .processors import StackInfoRenderer
38from .typing import (
39 Context,
40 EventDict,
41 ExcInfo,
42 Processor,
43 ProcessorReturnValue,
44 WrappedLogger,
45)
48__all__ = [
49 "BoundLogger",
50 "ExtraAdder",
51 "LoggerFactory",
52 "PositionalArgumentsFormatter",
53 "ProcessorFormatter",
54 "add_log_level",
55 "add_log_level_number",
56 "add_logger_name",
57 "filter_by_level",
58 "get_logger",
59 "recreate_defaults",
60 "render_to_log_args_and_kwargs",
61 "render_to_log_kwargs",
62]
65def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
66 """
67 Recreate defaults on top of standard library's logging.
69 The output looks the same, but goes through `logging`.
71 As with vanilla defaults, the backwards-compatibility guarantees don't
72 apply to the settings applied here.
74 Args:
75 log_level:
76 If `None`, don't configure standard library logging **at all**.
78 Otherwise configure it to log to `sys.stdout` at *log_level*
79 (``logging.NOTSET`` being the default).
81 If you need more control over `logging`, pass `None` here and
82 configure it yourself.
84 .. versionadded:: 22.1.0
85 .. versionchanged:: 23.3.0 Added `add_logger_name`.
86 .. versionchanged:: 25.1.0 Added `PositionalArgumentsFormatter`.
87 """
88 if log_level is not None:
89 kw = {"force": True}
91 logging.basicConfig(
92 format="%(message)s",
93 stream=sys.stdout,
94 level=log_level,
95 **kw, # type: ignore[arg-type]
96 )
98 _config.reset_defaults()
99 _config.configure(
100 processors=[
101 PositionalArgumentsFormatter(), # handled by native loggers
102 merge_contextvars,
103 add_log_level,
104 add_logger_name,
105 StackInfoRenderer(),
106 _config._BUILTIN_DEFAULT_PROCESSORS[-2], # TimeStamper
107 _config._BUILTIN_DEFAULT_PROCESSORS[-1], # ConsoleRenderer
108 ],
109 wrapper_class=BoundLogger,
110 logger_factory=LoggerFactory(),
111 )
114_SENTINEL = object()
117class _FixedFindCallerLogger(logging.Logger):
118 """
119 Change the behavior of `logging.Logger.findCaller` to cope with
120 *structlog*'s extra frames.
121 """
123 def findCaller(
124 self, stack_info: bool = False, stacklevel: int = 1
125 ) -> tuple[str, int, str, str | None]:
126 """
127 Finds the first caller frame outside of structlog so that the caller
128 info is populated for wrapping stdlib.
130 This logger gets set as the default one when using LoggerFactory.
131 """
132 sinfo: str | None
133 # stdlib logging passes stacklevel=1 from log methods like .warning(),
134 # but we've already skipped those frames by ignoring "logging", so we
135 # need to adjust stacklevel down by 1. We need to manually drop
136 # logging frames, because there's cases where we call logging methods
137 # from within structlog and the stacklevel offsets don't work anymore.
138 adjusted_stacklevel = max(0, stacklevel - 1) if stacklevel else None
139 f, _name = _find_first_app_frame_and_name(
140 ["logging"], stacklevel=adjusted_stacklevel
141 )
142 sinfo = _format_stack(f) if stack_info else None
144 return f.f_code.co_filename, f.f_lineno, f.f_code.co_name, sinfo
147class BoundLogger(BoundLoggerBase):
148 """
149 Python Standard Library version of `structlog.BoundLogger`.
151 Works exactly like the generic one except that it takes advantage of
152 knowing the logging methods in advance.
154 Use it like::
156 structlog.configure(
157 wrapper_class=structlog.stdlib.BoundLogger,
158 )
160 It also contains a bunch of properties that pass-through to the wrapped
161 `logging.Logger` which should make it work as a drop-in replacement.
163 .. versionadded:: 23.1.0
164 Async variants `alog()`, `adebug()`, `ainfo()`, and so forth.
166 .. versionchanged:: 24.2.0
167 Callsite parameters are now also collected by
168 `structlog.processors.CallsiteParameterAdder` for async log methods.
169 """
171 _logger: logging.Logger
173 def bind(self, **new_values: Any) -> Self:
174 """
175 Return a new logger with *new_values* added to the existing ones.
176 """
177 return super().bind(**new_values)
179 def unbind(self, *keys: str) -> Self:
180 """
181 Return a new logger with *keys* removed from the context.
183 Raises:
184 KeyError: If the key is not part of the context.
185 """
186 return super().unbind(*keys)
188 def try_unbind(self, *keys: str) -> Self:
189 """
190 Like :meth:`unbind`, but best effort: missing keys are ignored.
192 .. versionadded:: 18.2.0
193 """
194 return super().try_unbind(*keys)
196 def new(self, **new_values: Any) -> Self:
197 """
198 Clear context and binds *initial_values* using `bind`.
200 Only necessary with dict implementations that keep global state like
201 those wrapped by `structlog.threadlocal.wrap_dict` when threads
202 are reused.
203 """
204 return super().new(**new_values)
206 def debug(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
207 """
208 Process event and call `logging.Logger.debug` with the result.
209 """
210 return self._proxy_to_logger("debug", event, *args, **kw)
212 def info(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
213 """
214 Process event and call `logging.Logger.info` with the result.
215 """
216 return self._proxy_to_logger("info", event, *args, **kw)
218 def warning(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
219 """
220 Process event and call `logging.Logger.warning` with the result.
221 """
222 return self._proxy_to_logger("warning", event, *args, **kw)
224 warn = warning
226 def error(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
227 """
228 Process event and call `logging.Logger.error` with the result.
229 """
230 return self._proxy_to_logger("error", event, *args, **kw)
232 def critical(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
233 """
234 Process event and call `logging.Logger.critical` with the result.
235 """
236 return self._proxy_to_logger("critical", event, *args, **kw)
238 def fatal(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
239 """
240 Process event and call `logging.Logger.critical` with the result.
241 """
242 return self._proxy_to_logger("critical", event, *args, **kw)
244 def exception(
245 self, event: str | None = None, *args: Any, **kw: Any
246 ) -> Any:
247 """
248 Process event and call `logging.Logger.exception` with the result,
249 after setting ``exc_info`` to `True` if it's not already set.
250 """
251 kw.setdefault("exc_info", True)
252 return self._proxy_to_logger("exception", event, *args, **kw)
254 def log(
255 self, level: int, event: str | None = None, *args: Any, **kw: Any
256 ) -> Any:
257 """
258 Process *event* and call the appropriate logging method depending on
259 *level*.
260 """
261 return self._proxy_to_logger(LEVEL_TO_NAME[level], event, *args, **kw)
263 def _proxy_to_logger(
264 self,
265 method_name: str,
266 event: str | None = None,
267 *event_args: str,
268 **event_kw: Any,
269 ) -> Any:
270 """
271 Propagate a method call to the wrapped logger.
273 This is the same as the superclass implementation, except that
274 it also preserves positional arguments in the ``event_dict`` so
275 that the stdlib's support for format strings can be used.
276 """
277 if event_args:
278 event_kw["positional_args"] = event_args
280 return super()._proxy_to_logger(method_name, event=event, **event_kw)
282 # Pass-through attributes and methods to mimic the stdlib's logger
283 # interface.
285 @property
286 def name(self) -> str:
287 """
288 Returns :attr:`logging.Logger.name`
289 """
290 return self._logger.name
292 @property
293 def level(self) -> int:
294 """
295 Returns :attr:`logging.Logger.level`
296 """
297 return self._logger.level
299 @property
300 def parent(self) -> Any:
301 """
302 Returns :attr:`logging.Logger.parent`
303 """
304 return self._logger.parent
306 @property
307 def propagate(self) -> bool:
308 """
309 Returns :attr:`logging.Logger.propagate`
310 """
311 return self._logger.propagate
313 @property
314 def handlers(self) -> Any:
315 """
316 Returns :attr:`logging.Logger.handlers`
317 """
318 return self._logger.handlers
320 @property
321 def disabled(self) -> int:
322 """
323 Returns :attr:`logging.Logger.disabled`
324 """
325 return self._logger.disabled
327 def setLevel(self, level: int) -> None:
328 """
329 Calls :meth:`logging.Logger.setLevel` with unmodified arguments.
330 """
331 self._logger.setLevel(level)
333 def findCaller(
334 self, stack_info: bool = False, stacklevel: int = 1
335 ) -> tuple[str, int, str, str | None]:
336 """
337 Calls :meth:`logging.Logger.findCaller` with unmodified arguments.
338 """
339 # No need for stacklevel-adjustments since we're within structlog and
340 # our frames are ignored unconditionally.
341 return self._logger.findCaller(
342 stack_info=stack_info, stacklevel=stacklevel
343 )
345 def makeRecord(
346 self,
347 name: str,
348 level: int,
349 fn: str,
350 lno: int,
351 msg: str,
352 args: tuple[Any, ...],
353 exc_info: ExcInfo,
354 func: str | None = None,
355 extra: Any = None,
356 ) -> logging.LogRecord:
357 """
358 Calls :meth:`logging.Logger.makeRecord` with unmodified arguments.
359 """
360 return self._logger.makeRecord(
361 name, level, fn, lno, msg, args, exc_info, func=func, extra=extra
362 )
364 def handle(self, record: logging.LogRecord) -> None:
365 """
366 Calls :meth:`logging.Logger.handle` with unmodified arguments.
367 """
368 self._logger.handle(record)
370 def addHandler(self, hdlr: logging.Handler) -> None:
371 """
372 Calls :meth:`logging.Logger.addHandler` with unmodified arguments.
373 """
374 self._logger.addHandler(hdlr)
376 def removeHandler(self, hdlr: logging.Handler) -> None:
377 """
378 Calls :meth:`logging.Logger.removeHandler` with unmodified arguments.
379 """
380 self._logger.removeHandler(hdlr)
382 def hasHandlers(self) -> bool:
383 """
384 Calls :meth:`logging.Logger.hasHandlers` with unmodified arguments.
386 Exists only in Python 3.
387 """
388 return self._logger.hasHandlers()
390 def callHandlers(self, record: logging.LogRecord) -> None:
391 """
392 Calls :meth:`logging.Logger.callHandlers` with unmodified arguments.
393 """
394 self._logger.callHandlers(record)
396 def getEffectiveLevel(self) -> int:
397 """
398 Calls :meth:`logging.Logger.getEffectiveLevel` with unmodified
399 arguments.
400 """
401 return self._logger.getEffectiveLevel()
403 def isEnabledFor(self, level: int) -> bool:
404 """
405 Calls :meth:`logging.Logger.isEnabledFor` with unmodified arguments.
406 """
407 return self._logger.isEnabledFor(level)
409 def getChild(self, suffix: str) -> logging.Logger:
410 """
411 Calls :meth:`logging.Logger.getChild` with unmodified arguments.
412 """
413 return self._logger.getChild(suffix)
415 # Non-Standard Async
416 async def _dispatch_to_sync(
417 self,
418 meth: Callable[..., Any],
419 event: str,
420 args: tuple[Any, ...],
421 kw: dict[str, Any],
422 ) -> None:
423 """
424 Merge contextvars and log using the sync logger in a thread pool.
425 """
426 scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
427 ctx = contextvars.copy_context()
429 try:
430 await asyncio.get_running_loop().run_in_executor(
431 None,
432 lambda: ctx.run(lambda: meth(event, *args, **kw)),
433 )
434 finally:
435 _ASYNC_CALLING_STACK.reset(scs_token)
437 async def adebug(self, event: str, *args: Any, **kw: Any) -> None:
438 """
439 Log using `debug()`, but asynchronously in a separate thread.
441 .. versionadded:: 23.1.0
442 """
443 await self._dispatch_to_sync(self.debug, event, args, kw)
445 async def ainfo(self, event: str, *args: Any, **kw: Any) -> None:
446 """
447 Log using `info()`, but asynchronously in a separate thread.
449 .. versionadded:: 23.1.0
450 """
451 await self._dispatch_to_sync(self.info, event, args, kw)
453 async def awarning(self, event: str, *args: Any, **kw: Any) -> None:
454 """
455 Log using `warning()`, but asynchronously in a separate thread.
457 .. versionadded:: 23.1.0
458 """
459 await self._dispatch_to_sync(self.warning, event, args, kw)
461 async def aerror(self, event: str, *args: Any, **kw: Any) -> None:
462 """
463 Log using `error()`, but asynchronously in a separate thread.
465 .. versionadded:: 23.1.0
466 """
467 await self._dispatch_to_sync(self.error, event, args, kw)
469 async def acritical(self, event: str, *args: Any, **kw: Any) -> None:
470 """
471 Log using `critical()`, but asynchronously in a separate thread.
473 .. versionadded:: 23.1.0
474 """
475 await self._dispatch_to_sync(self.critical, event, args, kw)
477 async def afatal(self, event: str, *args: Any, **kw: Any) -> None:
478 """
479 Log using `critical()`, but asynchronously in a separate thread.
481 .. versionadded:: 23.1.0
482 """
483 await self._dispatch_to_sync(self.critical, event, args, kw)
485 async def aexception(self, event: str, *args: Any, **kw: Any) -> None:
486 """
487 Log using `exception()`, but asynchronously in a separate thread.
489 .. versionadded:: 23.1.0
490 """
491 # To make `log.exception("foo") work, we have to check if the user
492 # passed an explicit exc_info and if not, supply our own.
493 if kw.get("exc_info", True) is True and kw.get("exception") is None:
494 kw["exc_info"] = sys.exc_info()
496 await self._dispatch_to_sync(self.exception, event, args, kw)
498 async def alog(
499 self, level: Any, event: str, *args: Any, **kw: Any
500 ) -> None:
501 """
502 Log using `log()`, but asynchronously in a separate thread.
504 .. versionadded:: 23.1.0
505 """
506 await self._dispatch_to_sync(partial(self.log, level), event, args, kw)
509def get_logger(*args: Any, **initial_values: Any) -> BoundLogger:
510 """
511 Only calls `structlog.get_logger`, but has the correct type hints.
513 .. warning::
515 Does **not** check whether -- or ensure that -- you've configured
516 *structlog* for standard library :mod:`logging`!
518 See :doc:`standard-library` for details.
520 .. versionadded:: 20.2.0
521 """
522 return _config.get_logger(*args, **initial_values)
525class AsyncBoundLogger:
526 """
527 Wraps a `BoundLogger` & exposes its logging methods as ``async`` versions.
529 Instead of blocking the program, they are run asynchronously in a thread
530 pool executor.
532 This means more computational overhead per log call. But it also means that
533 the processor chain (e.g. JSON serialization) and I/O won't block your
534 whole application.
536 Only available for Python 3.7 and later.
538 .. versionadded:: 20.2.0
539 .. versionchanged:: 20.2.0 fix _dispatch_to_sync contextvars usage
540 .. deprecated:: 23.1.0
541 Use the regular `BoundLogger` with its a-prefixed methods instead.
542 .. versionchanged:: 23.3.0
543 Callsite parameters are now also collected for async log methods.
544 """
546 __slots__ = ("_loop", "sync_bl")
548 #: The wrapped synchronous logger. It is useful to be able to log
549 #: synchronously occasionally.
550 sync_bl: BoundLogger
552 _executor = None
553 _bound_logger_factory = BoundLogger
555 def __init__(
556 self,
557 logger: logging.Logger,
558 processors: Iterable[Processor],
559 context: Context,
560 *,
561 # Only as an optimization for binding!
562 _sync_bl: Any = None, # *vroom vroom* over purity.
563 _loop: Any = None,
564 ):
565 if _sync_bl:
566 self.sync_bl = _sync_bl
567 self._loop = _loop
569 return
571 self.sync_bl = self._bound_logger_factory(
572 logger=logger, processors=processors, context=context
573 )
574 self._loop = asyncio.get_running_loop()
576 # Instances would've been correctly recognized as such, however the class
577 # not and we need the class in `structlog.configure()`.
578 @property
579 def _context(self) -> Context:
580 return self.sync_bl._context
582 def bind(self, **new_values: Any) -> Self:
583 return self.__class__(
584 # logger, processors and context are within sync_bl. These
585 # arguments are ignored if _sync_bl is passed. *vroom vroom* over
586 # purity.
587 logger=None, # type: ignore[arg-type]
588 processors=(),
589 context={},
590 _sync_bl=self.sync_bl.bind(**new_values),
591 _loop=self._loop,
592 )
594 def new(self, **new_values: Any) -> Self:
595 return self.__class__(
596 # c.f. comment in bind
597 logger=None, # type: ignore[arg-type]
598 processors=(),
599 context={},
600 _sync_bl=self.sync_bl.new(**new_values),
601 _loop=self._loop,
602 )
604 def unbind(self, *keys: str) -> Self:
605 return self.__class__(
606 # c.f. comment in bind
607 logger=None, # type: ignore[arg-type]
608 processors=(),
609 context={},
610 _sync_bl=self.sync_bl.unbind(*keys),
611 _loop=self._loop,
612 )
614 def try_unbind(self, *keys: str) -> Self:
615 return self.__class__(
616 # c.f. comment in bind
617 logger=None, # type: ignore[arg-type]
618 processors=(),
619 context={},
620 _sync_bl=self.sync_bl.try_unbind(*keys),
621 _loop=self._loop,
622 )
624 async def _dispatch_to_sync(
625 self,
626 meth: Callable[..., Any],
627 event: str,
628 args: tuple[Any, ...],
629 kw: dict[str, Any],
630 ) -> None:
631 """
632 Merge contextvars and log using the sync logger in a thread pool.
633 """
634 scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
635 ctx = contextvars.copy_context()
637 try:
638 await asyncio.get_running_loop().run_in_executor(
639 self._executor,
640 lambda: ctx.run(lambda: meth(event, *args, **kw)),
641 )
642 finally:
643 _ASYNC_CALLING_STACK.reset(scs_token)
645 async def debug(self, event: str, *args: Any, **kw: Any) -> None:
646 await self._dispatch_to_sync(self.sync_bl.debug, event, args, kw)
648 async def info(self, event: str, *args: Any, **kw: Any) -> None:
649 await self._dispatch_to_sync(self.sync_bl.info, event, args, kw)
651 async def warning(self, event: str, *args: Any, **kw: Any) -> None:
652 await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
654 async def warn(self, event: str, *args: Any, **kw: Any) -> None:
655 await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
657 async def error(self, event: str, *args: Any, **kw: Any) -> None:
658 await self._dispatch_to_sync(self.sync_bl.error, event, args, kw)
660 async def critical(self, event: str, *args: Any, **kw: Any) -> None:
661 await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
663 async def fatal(self, event: str, *args: Any, **kw: Any) -> None:
664 await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
666 async def exception(self, event: str, *args: Any, **kw: Any) -> None:
667 # To make `log.exception("foo") work, we have to check if the user
668 # passed an explicit exc_info and if not, supply our own.
669 ei = kw.pop("exc_info", None)
670 if ei is None and kw.get("exception") is None:
671 ei = sys.exc_info()
673 kw["exc_info"] = ei
675 await self._dispatch_to_sync(self.sync_bl.exception, event, args, kw)
677 async def log(self, level: Any, event: str, *args: Any, **kw: Any) -> None:
678 await self._dispatch_to_sync(
679 partial(self.sync_bl.log, level), event, args, kw
680 )
683class LoggerFactory:
684 """
685 Build a standard library logger when an *instance* is called.
687 Sets a custom logger using :func:`logging.setLoggerClass` so variables in
688 log format are expanded properly.
690 >>> from structlog import configure
691 >>> from structlog.stdlib import LoggerFactory
692 >>> configure(logger_factory=LoggerFactory())
694 Args:
695 ignore_frame_names:
696 When guessing the name of a logger, skip frames whose names *start*
697 with one of these. For example, in pyramid applications you'll
698 want to set it to ``["venusian", "pyramid.config"]``. This argument
699 is called *additional_ignores* in other APIs throughout
700 *structlog*.
701 """
703 def __init__(self, ignore_frame_names: list[str] | None = None):
704 self._ignore = ignore_frame_names
705 logging.setLoggerClass(_FixedFindCallerLogger)
707 def __call__(self, *args: Any) -> logging.Logger:
708 """
709 Deduce the caller's module name and create a stdlib logger.
711 If an optional argument is passed, it will be used as the logger name
712 instead of guesswork. This optional argument would be passed from the
713 :func:`structlog.get_logger` call. For example
714 ``structlog.get_logger("foo")`` would cause this method to be called
715 with ``"foo"`` as its first positional argument.
717 .. versionchanged:: 0.4.0
718 Added support for optional positional arguments. Using the first
719 one for naming the constructed logger.
720 """
721 if args:
722 return logging.getLogger(args[0])
724 # We skip all frames that originate from within structlog or one of the
725 # configured names.
726 _, name = _find_first_app_frame_and_name(self._ignore)
728 return logging.getLogger(name)
731class PositionalArgumentsFormatter:
732 """
733 Apply stdlib-like string formatting to the ``event`` key.
735 If the ``positional_args`` key in the event dict is set, it must
736 contain a tuple that is used for formatting (using the ``%s`` string
737 formatting operator) of the value from the ``event`` key. This works
738 in the same way as the stdlib handles arguments to the various log
739 methods: if the tuple contains only a single `dict` argument it is
740 used for keyword placeholders in the ``event`` string, otherwise it
741 will be used for positional placeholders.
743 ``positional_args`` is populated by `structlog.stdlib.BoundLogger` or
744 can be set manually.
746 The *remove_positional_args* flag can be set to `False` to keep the
747 ``positional_args`` key in the event dict; by default it will be
748 removed from the event dict after formatting a message.
749 """
751 def __init__(self, remove_positional_args: bool = True) -> None:
752 self.remove_positional_args = remove_positional_args
754 def __call__(
755 self, _: WrappedLogger, __: str, event_dict: EventDict
756 ) -> EventDict:
757 args = event_dict.get("positional_args")
759 # Mimic the formatting behaviour of the stdlib's logging module, which
760 # accepts both positional arguments and a single dict argument. The
761 # "single dict" check is the same one as the stdlib's logging module
762 # performs in LogRecord.__init__().
763 if args:
764 if len(args) == 1 and isinstance(args[0], dict) and args[0]:
765 args = args[0]
767 event_dict["event"] %= args
769 if self.remove_positional_args and args is not None:
770 del event_dict["positional_args"]
772 return event_dict
775def filter_by_level(
776 logger: logging.Logger, method_name: str, event_dict: EventDict
777) -> EventDict:
778 """
779 Check whether logging is configured to accept messages from this log level.
781 Should be the first processor if stdlib's filtering by level is used so
782 possibly expensive processors like exception formatters are avoided in the
783 first place.
785 >>> import logging
786 >>> from structlog.stdlib import filter_by_level
787 >>> logging.basicConfig(level=logging.WARN)
788 >>> logger = logging.getLogger()
789 >>> filter_by_level(logger, 'warn', {})
790 {}
791 >>> filter_by_level(logger, 'debug', {})
792 Traceback (most recent call last):
793 ...
794 DropEvent
795 """
796 if (
797 # We can't use logger.isEnabledFor() because it's always disabled when
798 # a log entry is in flight on Python 3.14 and later,
799 not logger.disabled
800 and NAME_TO_LEVEL[method_name] >= logger.getEffectiveLevel()
801 ):
802 return event_dict
804 raise DropEvent
807def add_log_level_number(
808 logger: logging.Logger, method_name: str, event_dict: EventDict
809) -> EventDict:
810 """
811 Add the log level number to the event dict.
813 Log level numbers map to the log level names. The Python stdlib uses them
814 for filtering logic. This adds the same numbers so users can leverage
815 similar filtering. Compare::
817 level in ("warning", "error", "critical")
818 level_number >= 30
820 The mapping of names to numbers is in
821 ``structlog.stdlib._log_levels._NAME_TO_LEVEL``.
823 .. versionadded:: 18.2.0
824 """
825 event_dict["level_number"] = NAME_TO_LEVEL[method_name]
827 return event_dict
830def add_logger_name(
831 logger: logging.Logger, method_name: str, event_dict: EventDict
832) -> EventDict:
833 """
834 Add the logger name to the event dict.
835 """
836 record = event_dict.get("_record")
837 if record is None:
838 event_dict["logger"] = logger.name
839 else:
840 event_dict["logger"] = record.name
841 return event_dict
844_LOG_RECORD_KEYS = logging.LogRecord(
845 "name", 0, "pathname", 0, "msg", (), None
846).__dict__.keys()
849class ExtraAdder:
850 """
851 Add extra attributes of `logging.LogRecord` objects to the event
852 dictionary.
854 This processor can be used for adding data passed in the ``extra``
855 parameter of the `logging` module's log methods to the event dictionary.
857 Args:
858 allow:
859 An optional collection of attributes that, if present in
860 `logging.LogRecord` objects, will be copied to event dictionaries.
862 If ``allow`` is None all attributes of `logging.LogRecord` objects
863 that do not exist on a standard `logging.LogRecord` object will be
864 copied to event dictionaries.
866 .. versionadded:: 21.5.0
867 """
869 __slots__ = ("_copier",)
871 def __init__(self, allow: Collection[str] | None = None) -> None:
872 self._copier: Callable[[EventDict, logging.LogRecord], None]
873 if allow is not None:
874 # The contents of allow is copied to a new list so that changes to
875 # the list passed into the constructor does not change the
876 # behaviour of this processor.
877 self._copier = functools.partial(self._copy_allowed, [*allow])
878 else:
879 self._copier = self._copy_all
881 def __call__(
882 self, logger: logging.Logger, name: str, event_dict: EventDict
883 ) -> EventDict:
884 record: logging.LogRecord | None = event_dict.get("_record")
885 if record is not None:
886 self._copier(event_dict, record)
887 return event_dict
889 @classmethod
890 def _copy_all(
891 cls, event_dict: EventDict, record: logging.LogRecord
892 ) -> None:
893 for key, value in record.__dict__.items():
894 if key not in _LOG_RECORD_KEYS:
895 event_dict[key] = value
897 @classmethod
898 def _copy_allowed(
899 cls,
900 allow: Collection[str],
901 event_dict: EventDict,
902 record: logging.LogRecord,
903 ) -> None:
904 for key in allow:
905 if key in record.__dict__:
906 event_dict[key] = record.__dict__[key]
909LOG_KWARG_NAMES = ("exc_info", "stack_info", "stacklevel")
912def render_to_log_args_and_kwargs(
913 _: logging.Logger, __: str, event_dict: EventDict
914) -> tuple[tuple[Any, ...], dict[str, Any]]:
915 """
916 Render ``event_dict`` into positional and keyword arguments for
917 `logging.Logger` logging methods.
918 See `logging.Logger.debug` method for keyword arguments reference.
920 The ``event`` field is passed in the first positional argument, positional
921 arguments from ``positional_args`` field are passed in subsequent positional
922 arguments, keyword arguments are extracted from the *event_dict* and the
923 rest of the *event_dict* is added as ``extra``.
925 This allows you to defer formatting to `logging`.
927 .. versionadded:: 25.1.0
928 """
929 args = (event_dict.pop("event"), *event_dict.pop("positional_args", ()))
931 kwargs = {
932 kwarg_name: event_dict.pop(kwarg_name)
933 for kwarg_name in LOG_KWARG_NAMES
934 if kwarg_name in event_dict
935 }
936 if event_dict:
937 kwargs["extra"] = event_dict
939 return args, kwargs
942def render_to_log_kwargs(
943 _: logging.Logger, __: str, event_dict: EventDict
944) -> EventDict:
945 """
946 Render ``event_dict`` into keyword arguments for `logging.Logger` logging
947 methods.
948 See `logging.Logger.debug` method for keyword arguments reference.
950 The ``event`` field is translated into ``msg``, keyword arguments are
951 extracted from the *event_dict* and the rest of the *event_dict* is added as
952 ``extra``.
954 This allows you to defer formatting to `logging`.
956 .. versionadded:: 17.1.0
957 .. versionchanged:: 22.1.0
958 ``exc_info``, ``stack_info``, and ``stacklevel`` are passed as proper
959 kwargs and not put into ``extra``.
960 .. versionchanged:: 24.2.0
961 ``stackLevel`` corrected to ``stacklevel``.
962 """
963 return {
964 "msg": event_dict.pop("event"),
965 "extra": event_dict,
966 **{
967 kw: event_dict.pop(kw)
968 for kw in LOG_KWARG_NAMES
969 if kw in event_dict
970 },
971 }
974class ProcessorFormatter(logging.Formatter):
975 r"""
976 Call *structlog* processors on `logging.LogRecord`\s.
978 This is an implementation of a `logging.Formatter` that can be used to
979 format log entries from both *structlog* and `logging`.
981 Its static method `wrap_for_formatter` must be the final processor in
982 *structlog*'s processor chain.
984 Please refer to :ref:`processor-formatter` for examples.
986 Args:
987 foreign_pre_chain:
988 If not `None`, it is used as a processor chain that is applied to
989 **non**-*structlog* log entries before the event dictionary is
990 passed to *processors*. (default: `None`)
992 processors:
993 A chain of *structlog* processors that is used to process **all**
994 log entries. The last one must render to a `str` which then gets
995 passed on to `logging` for output.
997 Compared to *structlog*'s regular processor chains, there's a few
998 differences:
1000 - The event dictionary contains two additional keys:
1002 #. ``_record``: a `logging.LogRecord` that either was created
1003 using `logging` APIs, **or** is a wrapped *structlog* log
1004 entry created by `wrap_for_formatter`.
1006 #. ``_from_structlog``: a `bool` that indicates whether or not
1007 ``_record`` was created by a *structlog* logger.
1009 Since you most likely don't want ``_record`` and
1010 ``_from_structlog`` in your log files, we've added the static
1011 method `remove_processors_meta` to ``ProcessorFormatter`` that
1012 you can add just before your renderer.
1014 - Since this is a `logging` *formatter*, raising
1015 `structlog.DropEvent` will crash your application.
1017 keep_exc_info:
1018 ``exc_info`` on `logging.LogRecord`\ s is added to the
1019 ``event_dict`` and removed afterwards. Set this to ``True`` to keep
1020 it on the `logging.LogRecord`. (default: False)
1022 keep_stack_info:
1023 Same as *keep_exc_info* except for ``stack_info``. (default: False)
1025 logger:
1026 Logger which we want to push through the *structlog* processor
1027 chain. This parameter is necessary for some of the processors like
1028 `filter_by_level`. (default: None)
1030 pass_foreign_args:
1031 If True, pass a foreign log record's ``args`` attribute to the
1032 ``event_dict`` under ``positional_args`` key. (default: False)
1034 processor:
1035 A single *structlog* processor used for rendering the event
1036 dictionary before passing it off to `logging`. Must return a `str`.
1037 The event dictionary does **not** contain ``_record`` and
1038 ``_from_structlog``.
1040 This parameter exists for historic reasons. Please use *processors*
1041 instead.
1043 use_get_message:
1044 If True, use ``record.getMessage`` to get a fully rendered log
1045 message, otherwise use ``str(record.msg)``. (default: True)
1047 Raises:
1048 TypeError: If both or neither *processor* and *processors* are passed.
1050 .. versionadded:: 17.1.0
1051 .. versionadded:: 17.2.0 *keep_exc_info* and *keep_stack_info*
1052 .. versionadded:: 19.2.0 *logger*
1053 .. versionadded:: 19.2.0 *pass_foreign_args*
1054 .. versionadded:: 21.3.0 *processors*
1055 .. deprecated:: 21.3.0
1056 *processor* (singular) in favor of *processors* (plural). Removal is not
1057 planned.
1058 .. versionadded:: 23.3.0 *use_get_message*
1059 """
1061 def __init__(
1062 self,
1063 processor: Processor | None = None,
1064 processors: Sequence[Processor] | None = (),
1065 foreign_pre_chain: Sequence[Processor] | None = None,
1066 keep_exc_info: bool = False,
1067 keep_stack_info: bool = False,
1068 logger: logging.Logger | None = None,
1069 pass_foreign_args: bool = False,
1070 use_get_message: bool = True,
1071 *args: Any,
1072 **kwargs: Any,
1073 ) -> None:
1074 fmt = kwargs.pop("fmt", "%(message)s")
1075 super().__init__(*args, fmt=fmt, **kwargs) # type: ignore[misc]
1077 if processor and processors:
1078 msg = (
1079 "The `processor` and `processors` arguments are mutually"
1080 " exclusive."
1081 )
1082 raise TypeError(msg)
1084 self.processors: Sequence[Processor]
1085 if processor is not None:
1086 self.processors = (self.remove_processors_meta, processor)
1087 elif processors:
1088 self.processors = processors
1089 else:
1090 msg = "Either `processor` or `processors` must be passed."
1091 raise TypeError(msg)
1093 self.foreign_pre_chain = foreign_pre_chain
1094 self.keep_exc_info = keep_exc_info
1095 self.keep_stack_info = keep_stack_info
1096 self.logger = logger
1097 self.pass_foreign_args = pass_foreign_args
1098 self.use_get_message = use_get_message
1100 def format(self, record: logging.LogRecord) -> str:
1101 """
1102 Extract *structlog*'s `event_dict` from ``record.msg`` and format it.
1104 *record* has been patched by `wrap_for_formatter` first though, so the
1105 type isn't quite right.
1106 """
1107 # Make a shallow copy of the record to let other handlers/formatters
1108 # process the original one
1109 record = logging.makeLogRecord(record.__dict__)
1111 logger = getattr(record, "_logger", _SENTINEL)
1112 meth_name = getattr(record, "_name", "__structlog_sentinel__")
1114 ed: ProcessorReturnValue
1115 if logger is not _SENTINEL and meth_name != "__structlog_sentinel__":
1116 # Both attached by wrap_for_formatter
1117 if self.logger is not None:
1118 logger = self.logger
1119 meth_name = cast(str, record._name) # type:ignore[attr-defined]
1121 # We need to copy because it's possible that the same record gets
1122 # processed by multiple logging formatters. LogRecord.getMessage
1123 # would transform our dict into a str.
1124 ed = cast(Dict[str, Any], record.msg).copy()
1125 ed["_record"] = record
1126 ed["_from_structlog"] = True
1127 else:
1128 logger = self.logger
1129 meth_name = record.levelname.lower()
1130 ed = {
1131 "event": (
1132 record.getMessage()
1133 if self.use_get_message
1134 else str(record.msg)
1135 ),
1136 "_record": record,
1137 "_from_structlog": False,
1138 }
1140 if self.pass_foreign_args:
1141 ed["positional_args"] = record.args
1143 record.args = ()
1145 # Add stack-related attributes to the event dict
1146 if record.exc_info:
1147 ed["exc_info"] = record.exc_info
1148 if record.stack_info:
1149 ed["stack_info"] = record.stack_info
1151 # Non-structlog allows to run through a chain to prepare it for the
1152 # final processor (e.g. adding timestamps and log levels).
1153 for proc in self.foreign_pre_chain or ():
1154 ed = cast(EventDict, proc(logger, meth_name, ed))
1156 # If required, unset stack-related attributes on the record copy so
1157 # that the base implementation doesn't append stacktraces to the
1158 # output.
1159 if not self.keep_exc_info:
1160 record.exc_text = None
1161 record.exc_info = None
1162 if not self.keep_stack_info:
1163 record.stack_info = None
1165 for p in self.processors:
1166 ed = p(logger, meth_name, ed) # type: ignore[arg-type]
1168 if not isinstance(ed, str):
1169 warnings.warn(
1170 "The last processor in ProcessorFormatter.processors must "
1171 f"return a string, but {self.processors[-1]} returned a "
1172 f"{type(ed)} instead.",
1173 category=RuntimeWarning,
1174 stacklevel=1,
1175 )
1176 ed = cast(str, ed)
1178 record.msg = ed
1180 return super().format(record)
1182 @staticmethod
1183 def wrap_for_formatter(
1184 logger: logging.Logger, name: str, event_dict: EventDict
1185 ) -> tuple[tuple[EventDict], dict[str, dict[str, Any]]]:
1186 """
1187 Wrap *logger*, *name*, and *event_dict*.
1189 The result is later unpacked by `ProcessorFormatter` when formatting
1190 log entries.
1192 Use this static method as the renderer (in other words, final
1193 processor) if you want to use `ProcessorFormatter` in your `logging`
1194 configuration.
1195 """
1196 return (event_dict,), {"extra": {"_logger": logger, "_name": name}}
1198 @staticmethod
1199 def remove_processors_meta(
1200 _: WrappedLogger, __: str, event_dict: EventDict
1201 ) -> EventDict:
1202 """
1203 Remove ``_record`` and ``_from_structlog`` from *event_dict*.
1205 These keys are added to the event dictionary, before
1206 `ProcessorFormatter`'s *processors* are run.
1208 .. versionadded:: 21.3.0
1209 """
1210 del event_dict["_record"]
1211 del event_dict["_from_structlog"]
1213 return event_dict