Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/absl/logging/__init__.py: 60%
436 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2017 The Abseil Authors.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
15"""Abseil Python logging module implemented on top of standard logging.
17Simple usage::
19 from absl import logging
21 logging.info('Interesting Stuff')
22 logging.info('Interesting Stuff with Arguments: %d', 42)
24 logging.set_verbosity(logging.INFO)
25 logging.log(logging.DEBUG, 'This will *not* be printed')
26 logging.set_verbosity(logging.DEBUG)
27 logging.log(logging.DEBUG, 'This will be printed')
29 logging.warning('Worrying Stuff')
30 logging.error('Alarming Stuff')
31 logging.fatal('AAAAHHHHH!!!!') # Process exits.
33Usage note: Do not pre-format the strings in your program code.
34Instead, let the logging module perform argument interpolation.
35This saves cycles because strings that don't need to be printed
36are never formatted. Note that this module does not attempt to
37interpolate arguments when no arguments are given. In other words::
39 logging.info('Interesting Stuff: %s')
41does not raise an exception because logging.info() has only one
42argument, the message string.
44"Lazy" evaluation for debugging
45-------------------------------
47If you do something like this::
49 logging.debug('Thing: %s', thing.ExpensiveOp())
51then the ExpensiveOp will be evaluated even if nothing
52is printed to the log. To avoid this, use the level_debug() function::
54 if logging.level_debug():
55 logging.debug('Thing: %s', thing.ExpensiveOp())
57Per file level logging is supported by logging.vlog() and
58logging.vlog_is_on(). For example::
60 if logging.vlog_is_on(2):
61 logging.vlog(2, very_expensive_debug_message())
63Notes on Unicode
64----------------
66The log output is encoded as UTF-8. Don't pass data in other encodings in
67bytes() instances -- instead pass unicode string instances when you need to
68(for both the format string and arguments).
70Note on critical and fatal:
71Standard logging module defines fatal as an alias to critical, but it's not
72documented, and it does NOT actually terminate the program.
73This module only defines fatal but not critical, and it DOES terminate the
74program.
76The differences in behavior are historical and unfortunate.
77"""
79import collections
80from collections import abc
81import getpass
82import io
83import itertools
84import logging
85import os
86import socket
87import struct
88import sys
89import tempfile
90import threading
91import time
92import timeit
93import traceback
94import types
95import warnings
97from absl import flags
98from absl.logging import converter
100# pylint: disable=g-import-not-at-top
101try:
102 from typing import NoReturn
103except ImportError:
104 pass
106# pylint: enable=g-import-not-at-top
108FLAGS = flags.FLAGS
111# Logging levels.
112FATAL = converter.ABSL_FATAL
113ERROR = converter.ABSL_ERROR
114WARNING = converter.ABSL_WARNING
115WARN = converter.ABSL_WARNING # Deprecated name.
116INFO = converter.ABSL_INFO
117DEBUG = converter.ABSL_DEBUG
119# Regex to match/parse log line prefixes.
120ABSL_LOGGING_PREFIX_REGEX = (
121 r'^(?P<severity>[IWEF])'
122 r'(?P<month>\d\d)(?P<day>\d\d) '
123 r'(?P<hour>\d\d):(?P<minute>\d\d):(?P<second>\d\d)'
124 r'\.(?P<microsecond>\d\d\d\d\d\d) +'
125 r'(?P<thread_id>-?\d+) '
126 r'(?P<filename>[a-zA-Z<][\w._<>-]+):(?P<line>\d+)')
129# Mask to convert integer thread ids to unsigned quantities for logging purposes
130_THREAD_ID_MASK = 2 ** (struct.calcsize('L') * 8) - 1
132# Extra property set on the LogRecord created by ABSLLogger when its level is
133# CRITICAL/FATAL.
134_ABSL_LOG_FATAL = '_absl_log_fatal'
135# Extra prefix added to the log message when a non-absl logger logs a
136# CRITICAL/FATAL message.
137_CRITICAL_PREFIX = 'CRITICAL - '
139# Used by findCaller to skip callers from */logging/__init__.py.
140_LOGGING_FILE_PREFIX = os.path.join('logging', '__init__.')
142# The ABSL logger instance, initialized in _initialize().
143_absl_logger = None
144# The ABSL handler instance, initialized in _initialize().
145_absl_handler = None
148_CPP_NAME_TO_LEVELS = {
149 'debug': '0', # Abseil C++ has no DEBUG level, mapping it to INFO here.
150 'info': '0',
151 'warning': '1',
152 'warn': '1',
153 'error': '2',
154 'fatal': '3'
155}
157_CPP_LEVEL_TO_NAMES = {
158 '0': 'info',
159 '1': 'warning',
160 '2': 'error',
161 '3': 'fatal',
162}
165class _VerbosityFlag(flags.Flag):
166 """Flag class for -v/--verbosity."""
168 def __init__(self, *args, **kwargs):
169 super(_VerbosityFlag, self).__init__(
170 flags.IntegerParser(),
171 flags.ArgumentSerializer(),
172 *args, **kwargs)
174 @property
175 def value(self):
176 return self._value
178 @value.setter
179 def value(self, v):
180 self._value = v
181 self._update_logging_levels()
183 def _update_logging_levels(self):
184 """Updates absl logging levels to the current verbosity.
186 Visibility: module-private
187 """
188 if not _absl_logger:
189 return
191 if self._value <= converter.ABSL_DEBUG:
192 standard_verbosity = converter.absl_to_standard(self._value)
193 else:
194 # --verbosity is set to higher than 1 for vlog.
195 standard_verbosity = logging.DEBUG - (self._value - 1)
197 # Also update root level when absl_handler is used.
198 if _absl_handler in logging.root.handlers:
199 # Make absl logger inherit from the root logger. absl logger might have
200 # a non-NOTSET value if logging.set_verbosity() is called at import time.
201 _absl_logger.setLevel(logging.NOTSET)
202 logging.root.setLevel(standard_verbosity)
203 else:
204 _absl_logger.setLevel(standard_verbosity)
207class _LoggerLevelsFlag(flags.Flag):
208 """Flag class for --logger_levels."""
210 def __init__(self, *args, **kwargs):
211 super(_LoggerLevelsFlag, self).__init__(
212 _LoggerLevelsParser(),
213 _LoggerLevelsSerializer(),
214 *args, **kwargs)
216 @property
217 def value(self):
218 # For lack of an immutable type, be defensive and return a copy.
219 # Modifications to the dict aren't supported and won't have any affect.
220 # While Py3 could use MappingProxyType, that isn't deepcopy friendly, so
221 # just return a copy.
222 return self._value.copy()
224 @value.setter
225 def value(self, v):
226 self._value = {} if v is None else v
227 self._update_logger_levels()
229 def _update_logger_levels(self):
230 # Visibility: module-private.
231 # This is called by absl.app.run() during initialization.
232 for name, level in self._value.items():
233 logging.getLogger(name).setLevel(level)
236class _LoggerLevelsParser(flags.ArgumentParser):
237 """Parser for --logger_levels flag."""
239 def parse(self, value):
240 if isinstance(value, abc.Mapping):
241 return value
243 pairs = [pair.strip() for pair in value.split(',') if pair.strip()]
245 # Preserve the order so that serialization is deterministic.
246 levels = collections.OrderedDict()
247 for name_level in pairs:
248 name, level = name_level.split(':', 1)
249 name = name.strip()
250 level = level.strip()
251 levels[name] = level
252 return levels
255class _LoggerLevelsSerializer(object):
256 """Serializer for --logger_levels flag."""
258 def serialize(self, value):
259 if isinstance(value, str):
260 return value
261 return ','.join(
262 '{}:{}'.format(name, level) for name, level in value.items())
265class _StderrthresholdFlag(flags.Flag):
266 """Flag class for --stderrthreshold."""
268 def __init__(self, *args, **kwargs):
269 super(_StderrthresholdFlag, self).__init__(
270 flags.ArgumentParser(),
271 flags.ArgumentSerializer(),
272 *args, **kwargs)
274 @property
275 def value(self):
276 return self._value
278 @value.setter
279 def value(self, v):
280 if v in _CPP_LEVEL_TO_NAMES:
281 # --stderrthreshold also accepts numeric strings whose values are
282 # Abseil C++ log levels.
283 cpp_value = int(v)
284 v = _CPP_LEVEL_TO_NAMES[v] # Normalize to strings.
285 elif v.lower() in _CPP_NAME_TO_LEVELS:
286 v = v.lower()
287 if v == 'warn':
288 v = 'warning' # Use 'warning' as the canonical name.
289 cpp_value = int(_CPP_NAME_TO_LEVELS[v])
290 else:
291 raise ValueError(
292 '--stderrthreshold must be one of (case-insensitive) '
293 "'debug', 'info', 'warning', 'error', 'fatal', "
294 "or '0', '1', '2', '3', not '%s'" % v)
296 self._value = v
299LOGTOSTDERR = flags.DEFINE_boolean(
300 'logtostderr',
301 False,
302 'Should only log to stderr?',
303 allow_override_cpp=True,
304)
305ALSOLOGTOSTDERR = flags.DEFINE_boolean(
306 'alsologtostderr',
307 False,
308 'also log to stderr?',
309 allow_override_cpp=True,
310)
311LOG_DIR = flags.DEFINE_string(
312 'log_dir',
313 os.getenv('TEST_TMPDIR', ''),
314 'directory to write logfiles into',
315 allow_override_cpp=True,
316)
317VERBOSITY = flags.DEFINE_flag(
318 _VerbosityFlag(
319 'verbosity',
320 -1,
321 (
322 'Logging verbosity level. Messages logged at this level or lower'
323 ' will be included. Set to 1 for debug logging. If the flag was not'
324 ' set or supplied, the value will be changed from the default of -1'
325 ' (warning) to 0 (info) after flags are parsed.'
326 ),
327 short_name='v',
328 allow_hide_cpp=True,
329 )
330)
331LOGGER_LEVELS = flags.DEFINE_flag(
332 _LoggerLevelsFlag(
333 'logger_levels',
334 {},
335 (
336 'Specify log level of loggers. The format is a CSV list of '
337 '`name:level`. Where `name` is the logger name used with '
338 '`logging.getLogger()`, and `level` is a level name (INFO, DEBUG, '
339 'etc). e.g. `myapp.foo:INFO,other.logger:DEBUG`'
340 ),
341 )
342)
343STDERRTHRESHOLD = flags.DEFINE_flag(
344 _StderrthresholdFlag(
345 'stderrthreshold',
346 'fatal',
347 (
348 'log messages at this level, or more severe, to stderr in '
349 'addition to the logfile. Possible values are '
350 "'debug', 'info', 'warning', 'error', and 'fatal'. "
351 'Obsoletes --alsologtostderr. Using --alsologtostderr '
352 'cancels the effect of this flag. Please also note that '
353 'this flag is subject to --verbosity and requires logfile '
354 'not be stderr.'
355 ),
356 allow_hide_cpp=True,
357 )
358)
359SHOWPREFIXFORINFO = flags.DEFINE_boolean(
360 'showprefixforinfo',
361 True,
362 (
363 'If False, do not prepend prefix to info messages '
364 "when it's logged to stderr, "
365 '--verbosity is set to INFO level, '
366 'and python logging is used.'
367 ),
368)
371def get_verbosity():
372 """Returns the logging verbosity."""
373 return FLAGS['verbosity'].value
376def set_verbosity(v):
377 """Sets the logging verbosity.
379 Causes all messages of level <= v to be logged,
380 and all messages of level > v to be silently discarded.
382 Args:
383 v: int|str, the verbosity level as an integer or string. Legal string values
384 are those that can be coerced to an integer as well as case-insensitive
385 'debug', 'info', 'warning', 'error', and 'fatal'.
386 """
387 try:
388 new_level = int(v)
389 except ValueError:
390 new_level = converter.ABSL_NAMES[v.upper()]
391 FLAGS.verbosity = new_level
394def set_stderrthreshold(s):
395 """Sets the stderr threshold to the value passed in.
397 Args:
398 s: str|int, valid strings values are case-insensitive 'debug',
399 'info', 'warning', 'error', and 'fatal'; valid integer values are
400 logging.DEBUG|INFO|WARNING|ERROR|FATAL.
402 Raises:
403 ValueError: Raised when s is an invalid value.
404 """
405 if s in converter.ABSL_LEVELS:
406 FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
407 elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
408 FLAGS.stderrthreshold = s
409 else:
410 raise ValueError(
411 'set_stderrthreshold only accepts integer absl logging level '
412 'from -3 to 1, or case-insensitive string values '
413 "'debug', 'info', 'warning', 'error', and 'fatal'. "
414 'But found "{}" ({}).'.format(s, type(s)))
417def fatal(msg, *args, **kwargs):
418 # type: (Any, Any, Any) -> NoReturn
419 """Logs a fatal message."""
420 log(FATAL, msg, *args, **kwargs)
423def error(msg, *args, **kwargs):
424 """Logs an error message."""
425 log(ERROR, msg, *args, **kwargs)
428def warning(msg, *args, **kwargs):
429 """Logs a warning message."""
430 log(WARNING, msg, *args, **kwargs)
433def warn(msg, *args, **kwargs):
434 """Deprecated, use 'warning' instead."""
435 warnings.warn("The 'warn' function is deprecated, use 'warning' instead",
436 DeprecationWarning, 2)
437 log(WARNING, msg, *args, **kwargs)
440def info(msg, *args, **kwargs):
441 """Logs an info message."""
442 log(INFO, msg, *args, **kwargs)
445def debug(msg, *args, **kwargs):
446 """Logs a debug message."""
447 log(DEBUG, msg, *args, **kwargs)
450def exception(msg, *args, exc_info=True, **kwargs):
451 """Logs an exception, with traceback and message."""
452 error(msg, *args, exc_info=exc_info, **kwargs)
455# Counter to keep track of number of log entries per token.
456_log_counter_per_token = {}
459def _get_next_log_count_per_token(token):
460 """Wrapper for _log_counter_per_token. Thread-safe.
462 Args:
463 token: The token for which to look up the count.
465 Returns:
466 The number of times this function has been called with
467 *token* as an argument (starting at 0).
468 """
469 # Can't use a defaultdict because defaultdict isn't atomic, whereas
470 # setdefault is.
471 return next(_log_counter_per_token.setdefault(token, itertools.count()))
474def log_every_n(level, msg, n, *args):
475 """Logs ``msg % args`` at level 'level' once per 'n' times.
477 Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
478 Not threadsafe.
480 Args:
481 level: int, the absl logging level at which to log.
482 msg: str, the message to be logged.
483 n: int, the number of times this should be called before it is logged.
484 *args: The args to be substituted into the msg.
485 """
486 count = _get_next_log_count_per_token(get_absl_logger().findCaller())
487 log_if(level, msg, not (count % n), *args)
490# Keeps track of the last log time of the given token.
491# Note: must be a dict since set/get is atomic in CPython.
492# Note: entries are never released as their number is expected to be low.
493_log_timer_per_token = {}
496def _seconds_have_elapsed(token, num_seconds):
497 """Tests if 'num_seconds' have passed since 'token' was requested.
499 Not strictly thread-safe - may log with the wrong frequency if called
500 concurrently from multiple threads. Accuracy depends on resolution of
501 'timeit.default_timer()'.
503 Always returns True on the first call for a given 'token'.
505 Args:
506 token: The token for which to look up the count.
507 num_seconds: The number of seconds to test for.
509 Returns:
510 Whether it has been >= 'num_seconds' since 'token' was last requested.
511 """
512 now = timeit.default_timer()
513 then = _log_timer_per_token.get(token, None)
514 if then is None or (now - then) >= num_seconds:
515 _log_timer_per_token[token] = now
516 return True
517 else:
518 return False
521def log_every_n_seconds(level, msg, n_seconds, *args):
522 """Logs ``msg % args`` at level ``level`` iff ``n_seconds`` elapsed since last call.
524 Logs the first call, logs subsequent calls if 'n' seconds have elapsed since
525 the last logging call from the same call site (file + line). Not thread-safe.
527 Args:
528 level: int, the absl logging level at which to log.
529 msg: str, the message to be logged.
530 n_seconds: float or int, seconds which should elapse before logging again.
531 *args: The args to be substituted into the msg.
532 """
533 should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds)
534 log_if(level, msg, should_log, *args)
537def log_first_n(level, msg, n, *args):
538 """Logs ``msg % args`` at level ``level`` only first ``n`` times.
540 Not threadsafe.
542 Args:
543 level: int, the absl logging level at which to log.
544 msg: str, the message to be logged.
545 n: int, the maximal number of times the message is logged.
546 *args: The args to be substituted into the msg.
547 """
548 count = _get_next_log_count_per_token(get_absl_logger().findCaller())
549 log_if(level, msg, count < n, *args)
552def log_if(level, msg, condition, *args):
553 """Logs ``msg % args`` at level ``level`` only if condition is fulfilled."""
554 if condition:
555 log(level, msg, *args)
558def log(level, msg, *args, **kwargs):
559 """Logs ``msg % args`` at absl logging level ``level``.
561 If no args are given just print msg, ignoring any interpolation specifiers.
563 Args:
564 level: int, the absl logging level at which to log the message
565 (logging.DEBUG|INFO|WARNING|ERROR|FATAL). While some C++ verbose logging
566 level constants are also supported, callers should prefer explicit
567 logging.vlog() calls for such purpose.
569 msg: str, the message to be logged.
570 *args: The args to be substituted into the msg.
571 **kwargs: May contain exc_info to add exception traceback to message.
572 """
573 if level > converter.ABSL_DEBUG:
574 # Even though this function supports level that is greater than 1, users
575 # should use logging.vlog instead for such cases.
576 # Treat this as vlog, 1 is equivalent to DEBUG.
577 standard_level = converter.STANDARD_DEBUG - (level - 1)
578 else:
579 if level < converter.ABSL_FATAL:
580 level = converter.ABSL_FATAL
581 standard_level = converter.absl_to_standard(level)
583 # Match standard logging's behavior. Before use_absl_handler() and
584 # logging is configured, there is no handler attached on _absl_logger nor
585 # logging.root. So logs go no where.
586 if not logging.root.handlers:
587 logging.basicConfig()
589 _absl_logger.log(standard_level, msg, *args, **kwargs)
592def vlog(level, msg, *args, **kwargs):
593 """Log ``msg % args`` at C++ vlog level ``level``.
595 Args:
596 level: int, the C++ verbose logging level at which to log the message,
597 e.g. 1, 2, 3, 4... While absl level constants are also supported,
598 callers should prefer logging.log|debug|info|... calls for such purpose.
599 msg: str, the message to be logged.
600 *args: The args to be substituted into the msg.
601 **kwargs: May contain exc_info to add exception traceback to message.
602 """
603 log(level, msg, *args, **kwargs)
606def vlog_is_on(level):
607 """Checks if vlog is enabled for the given level in caller's source file.
609 Args:
610 level: int, the C++ verbose logging level at which to log the message,
611 e.g. 1, 2, 3, 4... While absl level constants are also supported,
612 callers should prefer level_debug|level_info|... calls for
613 checking those.
615 Returns:
616 True if logging is turned on for that level.
617 """
619 if level > converter.ABSL_DEBUG:
620 # Even though this function supports level that is greater than 1, users
621 # should use logging.vlog instead for such cases.
622 # Treat this as vlog, 1 is equivalent to DEBUG.
623 standard_level = converter.STANDARD_DEBUG - (level - 1)
624 else:
625 if level < converter.ABSL_FATAL:
626 level = converter.ABSL_FATAL
627 standard_level = converter.absl_to_standard(level)
628 return _absl_logger.isEnabledFor(standard_level)
631def flush():
632 """Flushes all log files."""
633 get_absl_handler().flush()
636def level_debug():
637 """Returns True if debug logging is turned on."""
638 return get_verbosity() >= DEBUG
641def level_info():
642 """Returns True if info logging is turned on."""
643 return get_verbosity() >= INFO
646def level_warning():
647 """Returns True if warning logging is turned on."""
648 return get_verbosity() >= WARNING
651level_warn = level_warning # Deprecated function.
654def level_error():
655 """Returns True if error logging is turned on."""
656 return get_verbosity() >= ERROR
659def get_log_file_name(level=INFO):
660 """Returns the name of the log file.
662 For Python logging, only one file is used and level is ignored. And it returns
663 empty string if it logs to stderr/stdout or the log stream has no `name`
664 attribute.
666 Args:
667 level: int, the absl.logging level.
669 Raises:
670 ValueError: Raised when `level` has an invalid value.
671 """
672 if level not in converter.ABSL_LEVELS:
673 raise ValueError('Invalid absl.logging level {}'.format(level))
674 stream = get_absl_handler().python_handler.stream
675 if (stream == sys.stderr or stream == sys.stdout or
676 not hasattr(stream, 'name')):
677 return ''
678 else:
679 return stream.name
682def find_log_dir_and_names(program_name=None, log_dir=None):
683 """Computes the directory and filename prefix for log file.
685 Args:
686 program_name: str|None, the filename part of the path to the program that
687 is running without its extension. e.g: if your program is called
688 ``usr/bin/foobar.py`` this method should probably be called with
689 ``program_name='foobar`` However, this is just a convention, you can
690 pass in any string you want, and it will be used as part of the
691 log filename. If you don't pass in anything, the default behavior
692 is as described in the example. In python standard logging mode,
693 the program_name will be prepended with ``py_`` if it is the
694 ``program_name`` argument is omitted.
695 log_dir: str|None, the desired log directory.
697 Returns:
698 (log_dir, file_prefix, symlink_prefix)
700 Raises:
701 FileNotFoundError: raised in Python 3 when it cannot find a log directory.
702 OSError: raised in Python 2 when it cannot find a log directory.
703 """
704 if not program_name:
705 # Strip the extension (foobar.par becomes foobar, and
706 # fubar.py becomes fubar). We do this so that the log
707 # file names are similar to C++ log file names.
708 program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
710 # Prepend py_ to files so that python code gets a unique file, and
711 # so that C++ libraries do not try to write to the same log files as us.
712 program_name = 'py_%s' % program_name
714 actual_log_dir = find_log_dir(log_dir=log_dir)
716 try:
717 username = getpass.getuser()
718 except KeyError:
719 # This can happen, e.g. when running under docker w/o passwd file.
720 if hasattr(os, 'getuid'):
721 # Windows doesn't have os.getuid
722 username = str(os.getuid())
723 else:
724 username = 'unknown'
725 hostname = socket.gethostname()
726 file_prefix = '%s.%s.%s.log' % (program_name, hostname, username)
728 return actual_log_dir, file_prefix, program_name
731def find_log_dir(log_dir=None):
732 """Returns the most suitable directory to put log files into.
734 Args:
735 log_dir: str|None, if specified, the logfile(s) will be created in that
736 directory. Otherwise if the --log_dir command-line flag is provided,
737 the logfile will be created in that directory. Otherwise the logfile
738 will be created in a standard location.
740 Raises:
741 FileNotFoundError: raised in Python 3 when it cannot find a log directory.
742 OSError: raised in Python 2 when it cannot find a log directory.
743 """
744 # Get a list of possible log dirs (will try to use them in order).
745 # NOTE: Google's internal implementation has a special handling for Google
746 # machines, which uses a list of directories. Hence the following uses `dirs`
747 # instead of a single directory.
748 if log_dir:
749 # log_dir was explicitly specified as an arg, so use it and it alone.
750 dirs = [log_dir]
751 elif FLAGS['log_dir'].value:
752 # log_dir flag was provided, so use it and it alone (this mimics the
753 # behavior of the same flag in logging.cc).
754 dirs = [FLAGS['log_dir'].value]
755 else:
756 dirs = [tempfile.gettempdir()]
758 # Find the first usable log dir.
759 for d in dirs:
760 if os.path.isdir(d) and os.access(d, os.W_OK):
761 return d
762 raise FileNotFoundError(
763 "Can't find a writable directory for logs, tried %s" % dirs)
766def get_absl_log_prefix(record):
767 """Returns the absl log prefix for the log record.
769 Args:
770 record: logging.LogRecord, the record to get prefix for.
771 """
772 created_tuple = time.localtime(record.created)
773 created_microsecond = int(record.created % 1.0 * 1e6)
775 critical_prefix = ''
776 level = record.levelno
777 if _is_non_absl_fatal_record(record):
778 # When the level is FATAL, but not logged from absl, lower the level so
779 # it's treated as ERROR.
780 level = logging.ERROR
781 critical_prefix = _CRITICAL_PREFIX
782 severity = converter.get_initial_for_level(level)
784 return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % (
785 severity,
786 created_tuple.tm_mon,
787 created_tuple.tm_mday,
788 created_tuple.tm_hour,
789 created_tuple.tm_min,
790 created_tuple.tm_sec,
791 created_microsecond,
792 _get_thread_id(),
793 record.filename,
794 record.lineno,
795 critical_prefix)
798def skip_log_prefix(func):
799 """Skips reporting the prefix of a given function or name by :class:`~absl.logging.ABSLLogger`.
801 This is a convenience wrapper function / decorator for
802 :meth:`~absl.logging.ABSLLogger.register_frame_to_skip`.
804 If a callable function is provided, only that function will be skipped.
805 If a function name is provided, all functions with the same name in the
806 file that this is called in will be skipped.
808 This can be used as a decorator of the intended function to be skipped.
810 Args:
811 func: Callable function or its name as a string.
813 Returns:
814 func (the input, unchanged).
816 Raises:
817 ValueError: The input is callable but does not have a function code object.
818 TypeError: The input is neither callable nor a string.
819 """
820 if callable(func):
821 func_code = getattr(func, '__code__', None)
822 if func_code is None:
823 raise ValueError('Input callable does not have a function code object.')
824 file_name = func_code.co_filename
825 func_name = func_code.co_name
826 func_lineno = func_code.co_firstlineno
827 elif isinstance(func, str):
828 file_name = get_absl_logger().findCaller()[0]
829 func_name = func
830 func_lineno = None
831 else:
832 raise TypeError('Input is neither callable nor a string.')
833 ABSLLogger.register_frame_to_skip(file_name, func_name, func_lineno)
834 return func
837def _is_non_absl_fatal_record(log_record):
838 return (log_record.levelno >= logging.FATAL and
839 not log_record.__dict__.get(_ABSL_LOG_FATAL, False))
842def _is_absl_fatal_record(log_record):
843 return (log_record.levelno >= logging.FATAL and
844 log_record.__dict__.get(_ABSL_LOG_FATAL, False))
847# Indicates if we still need to warn about pre-init logs going to stderr.
848_warn_preinit_stderr = True
851class PythonHandler(logging.StreamHandler):
852 """The handler class used by Abseil Python logging implementation."""
854 def __init__(self, stream=None, formatter=None):
855 super(PythonHandler, self).__init__(stream)
856 self.setFormatter(formatter or PythonFormatter())
858 def start_logging_to_file(self, program_name=None, log_dir=None):
859 """Starts logging messages to files instead of standard error."""
860 FLAGS.logtostderr = False
862 actual_log_dir, file_prefix, symlink_prefix = find_log_dir_and_names(
863 program_name=program_name, log_dir=log_dir)
865 basename = '%s.INFO.%s.%d' % (
866 file_prefix,
867 time.strftime('%Y%m%d-%H%M%S', time.localtime(time.time())),
868 os.getpid())
869 filename = os.path.join(actual_log_dir, basename)
871 self.stream = open(filename, 'a', encoding='utf-8')
873 # os.symlink is not available on Windows Python 2.
874 if getattr(os, 'symlink', None):
875 # Create a symlink to the log file with a canonical name.
876 symlink = os.path.join(actual_log_dir, symlink_prefix + '.INFO')
877 try:
878 if os.path.islink(symlink):
879 os.unlink(symlink)
880 os.symlink(os.path.basename(filename), symlink)
881 except EnvironmentError:
882 # If it fails, we're sad but it's no error. Commonly, this
883 # fails because the symlink was created by another user and so
884 # we can't modify it
885 pass
887 def use_absl_log_file(self, program_name=None, log_dir=None):
888 """Conditionally logs to files, based on --logtostderr."""
889 if FLAGS['logtostderr'].value:
890 self.stream = sys.stderr
891 else:
892 self.start_logging_to_file(program_name=program_name, log_dir=log_dir)
894 def flush(self):
895 """Flushes all log files."""
896 self.acquire()
897 try:
898 if self.stream and hasattr(self.stream, 'flush'):
899 self.stream.flush()
900 except (EnvironmentError, ValueError):
901 # A ValueError is thrown if we try to flush a closed file.
902 pass
903 finally:
904 self.release()
906 def _log_to_stderr(self, record):
907 """Emits the record to stderr.
909 This temporarily sets the handler stream to stderr, calls
910 StreamHandler.emit, then reverts the stream back.
912 Args:
913 record: logging.LogRecord, the record to log.
914 """
915 # emit() is protected by a lock in logging.Handler, so we don't need to
916 # protect here again.
917 old_stream = self.stream
918 self.stream = sys.stderr
919 try:
920 super(PythonHandler, self).emit(record)
921 finally:
922 self.stream = old_stream
924 def emit(self, record):
925 """Prints a record out to some streams.
927 1. If ``FLAGS.logtostderr`` is set, it will print to ``sys.stderr`` ONLY.
928 2. If ``FLAGS.alsologtostderr`` is set, it will print to ``sys.stderr``.
929 3. If ``FLAGS.logtostderr`` is not set, it will log to the stream
930 associated with the current thread.
932 Args:
933 record: :class:`logging.LogRecord`, the record to emit.
934 """
935 # People occasionally call logging functions at import time before
936 # our flags may have even been defined yet, let alone even parsed, as we
937 # rely on the C++ side to define some flags for us and app init to
938 # deal with parsing. Match the C++ library behavior of notify and emit
939 # such messages to stderr. It encourages people to clean-up and does
940 # not hide the message.
941 level = record.levelno
942 if not FLAGS.is_parsed(): # Also implies "before flag has been defined".
943 global _warn_preinit_stderr
944 if _warn_preinit_stderr:
945 sys.stderr.write(
946 'WARNING: Logging before flag parsing goes to stderr.\n')
947 _warn_preinit_stderr = False
948 self._log_to_stderr(record)
949 elif FLAGS['logtostderr'].value:
950 self._log_to_stderr(record)
951 else:
952 super(PythonHandler, self).emit(record)
953 stderr_threshold = converter.string_to_standard(
954 FLAGS['stderrthreshold'].value)
955 if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
956 self.stream != sys.stderr):
957 self._log_to_stderr(record)
958 # Die when the record is created from ABSLLogger and level is FATAL.
959 if _is_absl_fatal_record(record):
960 self.flush() # Flush the log before dying.
962 # In threaded python, sys.exit() from a non-main thread only
963 # exits the thread in question.
964 os.abort()
966 def close(self):
967 """Closes the stream to which we are writing."""
968 self.acquire()
969 try:
970 self.flush()
971 try:
972 # Do not close the stream if it's sys.stderr|stdout. They may be
973 # redirected or overridden to files, which should be managed by users
974 # explicitly.
975 user_managed = sys.stderr, sys.stdout, sys.__stderr__, sys.__stdout__
976 if self.stream not in user_managed and (
977 not hasattr(self.stream, 'isatty') or not self.stream.isatty()):
978 self.stream.close()
979 except ValueError:
980 # A ValueError is thrown if we try to run isatty() on a closed file.
981 pass
982 super(PythonHandler, self).close()
983 finally:
984 self.release()
987class ABSLHandler(logging.Handler):
988 """Abseil Python logging module's log handler."""
990 def __init__(self, python_logging_formatter):
991 super(ABSLHandler, self).__init__()
993 self._python_handler = PythonHandler(formatter=python_logging_formatter)
994 self.activate_python_handler()
996 def format(self, record):
997 return self._current_handler.format(record)
999 def setFormatter(self, fmt):
1000 self._current_handler.setFormatter(fmt)
1002 def emit(self, record):
1003 self._current_handler.emit(record)
1005 def flush(self):
1006 self._current_handler.flush()
1008 def close(self):
1009 super(ABSLHandler, self).close()
1010 self._current_handler.close()
1012 def handle(self, record):
1013 rv = self.filter(record)
1014 if rv:
1015 return self._current_handler.handle(record)
1016 return rv
1018 @property
1019 def python_handler(self):
1020 return self._python_handler
1022 def activate_python_handler(self):
1023 """Uses the Python logging handler as the current logging handler."""
1024 self._current_handler = self._python_handler
1026 def use_absl_log_file(self, program_name=None, log_dir=None):
1027 self._current_handler.use_absl_log_file(program_name, log_dir)
1029 def start_logging_to_file(self, program_name=None, log_dir=None):
1030 self._current_handler.start_logging_to_file(program_name, log_dir)
1033class PythonFormatter(logging.Formatter):
1034 """Formatter class used by :class:`~absl.logging.PythonHandler`."""
1036 def format(self, record):
1037 """Appends the message from the record to the results of the prefix.
1039 Args:
1040 record: logging.LogRecord, the record to be formatted.
1042 Returns:
1043 The formatted string representing the record.
1044 """
1045 if (not FLAGS['showprefixforinfo'].value and
1046 FLAGS['verbosity'].value == converter.ABSL_INFO and
1047 record.levelno == logging.INFO and
1048 _absl_handler.python_handler.stream == sys.stderr):
1049 prefix = ''
1050 else:
1051 prefix = get_absl_log_prefix(record)
1052 return prefix + super(PythonFormatter, self).format(record)
1055class ABSLLogger(logging.getLoggerClass()):
1056 """A logger that will create LogRecords while skipping some stack frames.
1058 This class maintains an internal list of filenames and method names
1059 for use when determining who called the currently executing stack
1060 frame. Any method names from specific source files are skipped when
1061 walking backwards through the stack.
1063 Client code should use the register_frame_to_skip method to let the
1064 ABSLLogger know which method from which file should be
1065 excluded from the walk backwards through the stack.
1066 """
1067 _frames_to_skip = set()
1069 def findCaller(self, stack_info=False, stacklevel=1):
1070 """Finds the frame of the calling method on the stack.
1072 This method skips any frames registered with the
1073 ABSLLogger and any methods from this file, and whatever
1074 method is currently being used to generate the prefix for the log
1075 line. Then it returns the file name, line number, and method name
1076 of the calling method. An optional fourth item may be returned,
1077 callers who only need things from the first three are advised to
1078 always slice or index the result rather than using direct unpacking
1079 assignment.
1081 Args:
1082 stack_info: bool, when True, include the stack trace as a fourth item
1083 returned. On Python 3 there are always four items returned - the
1084 fourth will be None when this is False. On Python 2 the stdlib
1085 base class API only returns three items. We do the same when this
1086 new parameter is unspecified or False for compatibility.
1088 Returns:
1089 (filename, lineno, methodname[, sinfo]) of the calling method.
1090 """
1091 f_to_skip = ABSLLogger._frames_to_skip
1092 # Use sys._getframe(2) instead of logging.currentframe(), it's slightly
1093 # faster because there is one less frame to traverse.
1094 frame = sys._getframe(2) # pylint: disable=protected-access
1096 while frame:
1097 code = frame.f_code
1098 if (_LOGGING_FILE_PREFIX not in code.co_filename and
1099 (code.co_filename, code.co_name,
1100 code.co_firstlineno) not in f_to_skip and
1101 (code.co_filename, code.co_name) not in f_to_skip):
1102 sinfo = None
1103 if stack_info:
1104 out = io.StringIO()
1105 out.write(u'Stack (most recent call last):\n')
1106 traceback.print_stack(frame, file=out)
1107 sinfo = out.getvalue().rstrip(u'\n')
1108 return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
1109 frame = frame.f_back
1111 def critical(self, msg, *args, **kwargs):
1112 """Logs ``msg % args`` with severity ``CRITICAL``."""
1113 self.log(logging.CRITICAL, msg, *args, **kwargs)
1115 def fatal(self, msg, *args, **kwargs):
1116 """Logs ``msg % args`` with severity ``FATAL``."""
1117 self.log(logging.FATAL, msg, *args, **kwargs)
1119 def error(self, msg, *args, **kwargs):
1120 """Logs ``msg % args`` with severity ``ERROR``."""
1121 self.log(logging.ERROR, msg, *args, **kwargs)
1123 def warn(self, msg, *args, **kwargs):
1124 """Logs ``msg % args`` with severity ``WARN``."""
1125 warnings.warn("The 'warn' method is deprecated, use 'warning' instead",
1126 DeprecationWarning, 2)
1127 self.log(logging.WARN, msg, *args, **kwargs)
1129 def warning(self, msg, *args, **kwargs):
1130 """Logs ``msg % args`` with severity ``WARNING``."""
1131 self.log(logging.WARNING, msg, *args, **kwargs)
1133 def info(self, msg, *args, **kwargs):
1134 """Logs ``msg % args`` with severity ``INFO``."""
1135 self.log(logging.INFO, msg, *args, **kwargs)
1137 def debug(self, msg, *args, **kwargs):
1138 """Logs ``msg % args`` with severity ``DEBUG``."""
1139 self.log(logging.DEBUG, msg, *args, **kwargs)
1141 def log(self, level, msg, *args, **kwargs):
1142 """Logs a message at a cetain level substituting in the supplied arguments.
1144 This method behaves differently in python and c++ modes.
1146 Args:
1147 level: int, the standard logging level at which to log the message.
1148 msg: str, the text of the message to log.
1149 *args: The arguments to substitute in the message.
1150 **kwargs: The keyword arguments to substitute in the message.
1151 """
1152 if level >= logging.FATAL:
1153 # Add property to the LogRecord created by this logger.
1154 # This will be used by the ABSLHandler to determine whether it should
1155 # treat CRITICAL/FATAL logs as really FATAL.
1156 extra = kwargs.setdefault('extra', {})
1157 extra[_ABSL_LOG_FATAL] = True
1158 super(ABSLLogger, self).log(level, msg, *args, **kwargs)
1160 def handle(self, record):
1161 """Calls handlers without checking ``Logger.disabled``.
1163 Non-root loggers are set to disabled after setup with :func:`logging.config`
1164 if it's not explicitly specified. Historically, absl logging will not be
1165 disabled by that. To maintaining this behavior, this function skips
1166 checking the ``Logger.disabled`` bit.
1168 This logger can still be disabled by adding a filter that filters out
1169 everything.
1171 Args:
1172 record: logging.LogRecord, the record to handle.
1173 """
1174 if self.filter(record):
1175 self.callHandlers(record)
1177 @classmethod
1178 def register_frame_to_skip(cls, file_name, function_name, line_number=None):
1179 """Registers a function name to skip when walking the stack.
1181 The :class:`~absl.logging.ABSLLogger` sometimes skips method calls on the
1182 stack to make the log messages meaningful in their appropriate context.
1183 This method registers a function from a particular file as one
1184 which should be skipped.
1186 Args:
1187 file_name: str, the name of the file that contains the function.
1188 function_name: str, the name of the function to skip.
1189 line_number: int, if provided, only the function with this starting line
1190 number will be skipped. Otherwise, all functions with the same name
1191 in the file will be skipped.
1192 """
1193 if line_number is not None:
1194 cls._frames_to_skip.add((file_name, function_name, line_number))
1195 else:
1196 cls._frames_to_skip.add((file_name, function_name))
1199def _get_thread_id():
1200 """Gets id of current thread, suitable for logging as an unsigned quantity.
1202 If pywrapbase is linked, returns GetTID() for the thread ID to be
1203 consistent with C++ logging. Otherwise, returns the numeric thread id.
1204 The quantities are made unsigned by masking with 2*sys.maxint + 1.
1206 Returns:
1207 Thread ID unique to this process (unsigned)
1208 """
1209 thread_id = threading.get_ident()
1210 return thread_id & _THREAD_ID_MASK
1213def get_absl_logger():
1214 """Returns the absl logger instance."""
1215 assert _absl_logger is not None
1216 return _absl_logger
1219def get_absl_handler():
1220 """Returns the absl handler instance."""
1221 assert _absl_handler is not None
1222 return _absl_handler
1225def use_python_logging(quiet=False):
1226 """Uses the python implementation of the logging code.
1228 Args:
1229 quiet: No logging message about switching logging type.
1230 """
1231 get_absl_handler().activate_python_handler()
1232 if not quiet:
1233 info('Restoring pure python logging')
1236_attempted_to_remove_stderr_stream_handlers = False
1239def use_absl_handler():
1240 """Uses the ABSL logging handler for logging.
1242 This method is called in :func:`app.run()<absl.app.run>` so the absl handler
1243 is used in absl apps.
1244 """
1245 global _attempted_to_remove_stderr_stream_handlers
1246 if not _attempted_to_remove_stderr_stream_handlers:
1247 # The absl handler logs to stderr by default. To prevent double logging to
1248 # stderr, the following code tries its best to remove other handlers that
1249 # emit to stderr. Those handlers are most commonly added when
1250 # logging.info/debug is called before calling use_absl_handler().
1251 handlers = [
1252 h for h in logging.root.handlers
1253 if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
1254 for h in handlers:
1255 logging.root.removeHandler(h)
1256 _attempted_to_remove_stderr_stream_handlers = True
1258 absl_handler = get_absl_handler()
1259 if absl_handler not in logging.root.handlers:
1260 logging.root.addHandler(absl_handler)
1261 FLAGS['verbosity']._update_logging_levels() # pylint: disable=protected-access
1262 FLAGS['logger_levels']._update_logger_levels() # pylint: disable=protected-access
1265def _initialize():
1266 """Initializes loggers and handlers."""
1267 global _absl_logger, _absl_handler
1269 if _absl_logger:
1270 return
1272 original_logger_class = logging.getLoggerClass()
1273 logging.setLoggerClass(ABSLLogger)
1274 _absl_logger = logging.getLogger('absl')
1275 logging.setLoggerClass(original_logger_class)
1277 python_logging_formatter = PythonFormatter()
1278 _absl_handler = ABSLHandler(python_logging_formatter)
1281_initialize()