API refactor
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-07 16:25:52 +09:00
parent 76d0d86211
commit 91c7e04474
1171 changed files with 81940 additions and 44117 deletions

View File

@@ -29,7 +29,7 @@ from structlog._config import (
wrap_logger,
)
from structlog._generic import BoundLogger
from structlog._log_levels import make_filtering_bound_logger
from structlog._native import make_filtering_bound_logger
from structlog._output import (
BytesLogger,
BytesLoggerFactory,
@@ -61,23 +61,25 @@ __all__ = [
"BoundLoggerBase",
"BytesLogger",
"BytesLoggerFactory",
"configure_once",
"DropEvent",
"PrintLogger",
"PrintLoggerFactory",
"ReturnLogger",
"ReturnLoggerFactory",
"WriteLogger",
"WriteLoggerFactory",
"configure",
"configure_once",
"contextvars",
"dev",
"DropEvent",
"getLogger",
"get_config",
"get_context",
"get_logger",
"getLogger",
"is_configured",
"make_filtering_bound_logger",
"PrintLogger",
"PrintLoggerFactory",
"processors",
"reset_defaults",
"ReturnLogger",
"ReturnLoggerFactory",
"stdlib",
"testing",
"threadlocal",
@@ -86,8 +88,6 @@ __all__ = [
"types",
"typing",
"wrap_logger",
"WriteLogger",
"WriteLoggerFactory",
]

View File

@@ -9,6 +9,8 @@ Logger wrapper and helper class.
from __future__ import annotations
import sys
from typing import Any, Iterable, Mapping, Sequence
from structlog.exceptions import DropEvent
@@ -16,6 +18,12 @@ from structlog.exceptions import DropEvent
from .typing import BindableLogger, Context, Processor, WrappedLogger
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
class BoundLoggerBase:
"""
Immutable context carrier.
@@ -51,9 +59,7 @@ class BoundLoggerBase:
self._context = context
def __repr__(self) -> str:
return "<{}(context={!r}, processors={!r})>".format(
self.__class__.__name__, self._context, self._processors
)
return f"<{self.__class__.__name__}(context={self._context!r}, processors={self._processors!r})>"
def __eq__(self, other: object) -> bool:
try:
@@ -64,7 +70,7 @@ class BoundLoggerBase:
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def bind(self, **new_values: Any) -> BoundLoggerBase:
def bind(self, **new_values: Any) -> Self:
"""
Return a new logger with *new_values* added to the existing ones.
"""
@@ -74,12 +80,11 @@ class BoundLoggerBase:
self._context.__class__(self._context, **new_values),
)
def unbind(self, *keys: str) -> BoundLoggerBase:
def unbind(self, *keys: str) -> Self:
"""
Return a new logger with *keys* removed from the context.
Raises:
KeyError: If the key is not part of the context.
"""
bl = self.bind()
@@ -88,7 +93,7 @@ class BoundLoggerBase:
return bl
def try_unbind(self, *keys: str) -> BoundLoggerBase:
def try_unbind(self, *keys: str) -> Self:
"""
Like :meth:`unbind`, but best effort: missing keys are ignored.
@@ -100,13 +105,13 @@ class BoundLoggerBase:
return bl
def new(self, **new_values: Any) -> BoundLoggerBase:
def new(self, **new_values: Any) -> Self:
"""
Clear context and binds *new_values* using `bind`.
Only necessary with dict implementations that keep global state like
those wrapped by `structlog.threadlocal.wrap_dict` when threads
are re-used.
are reused.
"""
self._context.clear()
@@ -123,8 +128,7 @@ class BoundLoggerBase:
Call it to combine your *event* and *context* into an event_dict and
process using the processor chain.
Arguments:
Args:
method_name:
The name of the logger method. Is passed into the processors.
@@ -137,7 +141,6 @@ class BoundLoggerBase:
*event_kw* ``{"bar": 42}``.
Raises:
structlog.DropEvent: if log entry should be dropped.
ValueError:
@@ -148,7 +151,6 @@ class BoundLoggerBase:
`tuple` of ``(*args, **kw)``
.. note::
Despite underscore available to custom wrapper classes.
See also `custom-wrappers`.
@@ -176,7 +178,7 @@ class BoundLoggerBase:
if isinstance(event_dict, tuple):
# In this case we assume that the last processor returned a tuple
# of ``(args, kwargs)`` and pass it right through.
return event_dict # type: ignore[return-value]
return event_dict
if isinstance(event_dict, dict):
return (), event_dict
@@ -197,8 +199,7 @@ class BoundLoggerBase:
handling :exc:`structlog.DropEvent`, and finally calls *method_name* on
:attr:`_logger` with the result.
Arguments:
Args:
method_name:
The name of the method that's going to get called. Technically
it should be identical to the method the user called because it
@@ -213,7 +214,6 @@ class BoundLoggerBase:
*event_kw* ``{"bar": 42}``.
.. note::
Despite underscore available to custom wrapper classes.
See also `custom-wrappers`.
@@ -232,12 +232,10 @@ def get_context(bound_logger: BindableLogger) -> Context:
The type of *bound_logger* and the type returned depend on your
configuration.
Arguments:
Args:
bound_logger: The bound logger whose context you want.
Returns:
The *actual* context from *bound_logger*. It is *not* copied first.
.. versionadded:: 20.2.0

View File

@@ -15,7 +15,7 @@ import warnings
from typing import Any, Callable, Iterable, Sequence, Type, cast
from ._log_levels import make_filtering_bound_logger
from ._native import make_filtering_bound_logger
from ._output import PrintLoggerFactory
from .contextvars import merge_contextvars
from .dev import ConsoleRenderer, _has_colors, set_exc_info
@@ -63,9 +63,9 @@ class _Configuration:
default_processors: Iterable[Processor] = _BUILTIN_DEFAULT_PROCESSORS[:]
default_context_class: type[Context] = _BUILTIN_DEFAULT_CONTEXT_CLASS
default_wrapper_class: Any = _BUILTIN_DEFAULT_WRAPPER_CLASS
logger_factory: Callable[
..., WrappedLogger
] = _BUILTIN_DEFAULT_LOGGER_FACTORY
logger_factory: Callable[..., WrappedLogger] = (
_BUILTIN_DEFAULT_LOGGER_FACTORY
)
cache_logger_on_first_use: bool = _BUILTIN_CACHE_LOGGER_ON_FIRST_USE
@@ -114,8 +114,7 @@ def get_logger(*args: Any, **initial_values: Any) -> Any:
>>> log.info("hello", x=42)
y=23 x=42 event='hello'
Arguments:
Args:
args:
*Optional* positional arguments that are passed unmodified to the
logger factory. Therefore it depends on the factory what they
@@ -124,7 +123,6 @@ def get_logger(*args: Any, **initial_values: Any) -> Any:
initial_values: Values that are used to pre-populate your contexts.
Returns:
A proxy that creates a correctly configured bound logger when
necessary. The type of that bound logger depends on your configuration
and is `structlog.BoundLogger` by default.
@@ -169,8 +167,7 @@ def wrap_logger(
In other words: selective overwriting of the defaults while keeping some
*is* possible.
Arguments:
Args:
initial_values: Values that are used to pre-populate your contexts.
logger_factory_args:
@@ -178,7 +175,6 @@ def wrap_logger(
the logger factory if not `None`.
Returns:
A proxy that creates a correctly configured bound logger when
necessary.
@@ -217,8 +213,7 @@ def configure(
Use `reset_defaults` to undo your changes.
Arguments:
Args:
processors: The processor chain. See :doc:`processors` for details.
wrapper_class:
@@ -269,7 +264,6 @@ def configure_once(
`configure_once` before.
Raises:
RuntimeWarning: if repeated configuration is attempted.
"""
if not _CONFIG.is_configured:
@@ -315,6 +309,11 @@ class BoundLoggerLazyProxy:
.. versionchanged:: 0.4.0 Added support for *logger_factory_args*.
"""
# fulfill BindableLogger protocol without carrying accidental state
@property
def _context(self) -> dict[str, str]:
return self._initial_values
def __init__(
self,
logger: WrappedLogger | None,
@@ -364,7 +363,9 @@ class BoundLoggerLazyProxy:
# Looks like Protocols ignore definitions of __init__ so we have to
# silence Mypy here.
logger = cls(
_logger, processors=procs, context=ctx # type: ignore[call-arg]
_logger,
processors=procs,
context=ctx, # type: ignore[call-arg]
)
def finalized_bind(**new_values: Any) -> BindableLogger:

View File

@@ -10,7 +10,9 @@ import traceback
from io import StringIO
from types import FrameType
from typing import Callable
from .contextvars import _ASYNC_CALLING_STACK
from .typing import ExcInfo
@@ -20,9 +22,6 @@ def _format_exception(exc_info: ExcInfo) -> str:
Shamelessly stolen from stdlib's logging module.
"""
if exc_info == (None, None, None): # type: ignore[comparison-overlap]
return "MISSING"
sio = StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, sio)
@@ -36,23 +35,27 @@ def _format_exception(exc_info: ExcInfo) -> str:
def _find_first_app_frame_and_name(
additional_ignores: list[str] | None = None,
*,
_getframe: Callable[[], FrameType] = sys._getframe,
) -> tuple[FrameType, str]:
"""
Remove all intra-structlog calls and return the relevant app frame.
Arguments:
Args:
additional_ignores:
Additional names with which the first frame must not start.
Returns:
_getframe:
Callable to find current frame. Only for testing to avoid
monkeypatching of sys._getframe.
Returns:
tuple of (frame, name)
"""
ignores = ["structlog"] + (additional_ignores or [])
f = sys._getframe()
ignores = tuple(["structlog"] + (additional_ignores or []))
f = _ASYNC_CALLING_STACK.get(_getframe())
name = f.f_globals.get("__name__") or "?"
while any(tuple(name.startswith(i) for i in ignores)):
while name.startswith(ignores):
if f.f_back is None:
name = "?"
break

View File

@@ -9,15 +9,9 @@ Extracted log level data used by both stdlib and native log level filters.
from __future__ import annotations
import asyncio
import contextvars
import logging
import sys
from typing import Any
from typing import Any, Callable
from ._base import BoundLoggerBase
from .typing import EventDict, FilteringBoundLogger
from .typing import EventDict
# Adapted from the stdlib
@@ -30,7 +24,7 @@ INFO = 20
DEBUG = 10
NOTSET = 0
_NAME_TO_LEVEL = {
NAME_TO_LEVEL = {
"critical": CRITICAL,
"exception": ERROR,
"error": ERROR,
@@ -41,15 +35,31 @@ _NAME_TO_LEVEL = {
"notset": NOTSET,
}
_LEVEL_TO_NAME = {
LEVEL_TO_NAME = {
v: k
for k, v in _NAME_TO_LEVEL.items()
for k, v in NAME_TO_LEVEL.items()
if k not in ("warn", "exception", "notset")
}
# Keep around for backwards-compatability in case someone imported them.
_LEVEL_TO_NAME = LEVEL_TO_NAME
_NAME_TO_LEVEL = NAME_TO_LEVEL
def map_method_name(method_name: str) -> str:
# warn is just a deprecated alias in the stdlib.
if method_name == "warn":
return "warning"
# Calling exception("") is the same as error("", exc_info=True)
if method_name == "exception":
return "error"
return method_name
def add_log_level(
logger: logging.Logger, method_name: str, event_dict: EventDict
logger: Any, method_name: str, event_dict: EventDict
) -> EventDict:
"""
Add the log level to the event dict under the ``level`` key.
@@ -62,189 +72,10 @@ def add_log_level(
.. versionchanged:: 20.2.0
Importable from `structlog.processors` (additionally to
`structlog.stdlib`).
.. versionchanged:: 24.1.0
Added mapping from "exception" to "error"
"""
if method_name == "warn":
# The stdlib has an alias
method_name = "warning"
event_dict["level"] = method_name
event_dict["level"] = map_method_name(method_name)
return event_dict
def _nop(self: Any, event: str, *args: Any, **kw: Any) -> Any:
return None
async def _anop(self: Any, event: str, *args: Any, **kw: Any) -> Any:
return None
def exception(
self: FilteringBoundLogger, event: str, *args: Any, **kw: Any
) -> Any:
kw.setdefault("exc_info", True)
return self.error(event, *args, **kw)
async def aexception(
self: FilteringBoundLogger, event: str, *args: Any, **kw: Any
) -> Any:
# Exception info has to be extracted this early, because it is no longer
# available once control is passed to the executor.
if kw.get("exc_info", True) is True:
kw["exc_info"] = sys.exc_info()
ctx = contextvars.copy_context()
return await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: self.error(event, *args, **kw)),
)
def make_filtering_bound_logger(min_level: int) -> type[FilteringBoundLogger]:
"""
Create a new `FilteringBoundLogger` that only logs *min_level* or higher.
The logger is optimized such that log levels below *min_level* only consist
of a ``return None``.
All familiar log methods are present, with async variants of each that are
prefixed by an ``a``. Therefore, the async version of ``log.info("hello")``
is ``await log.ainfo("hello")``.
Additionally it has a ``log(self, level: int, **kw: Any)`` method to mirror
`logging.Logger.log` and `structlog.stdlib.BoundLogger.log`.
Compared to using *structlog*'s standard library integration and the
`structlog.stdlib.filter_by_level` processor:
- It's faster because once the logger is built at program start; it's a
static class.
- For the same reason you can't change the log level once configured. Use
the dynamic approach of `standard-library` instead, if you need this
feature.
- You *can* have (much) more fine-grained filtering by :ref:`writing a
simple processor <finer-filtering>`.
Arguments:
min_level:
The log level as an integer. You can use the constants from
`logging` like ``logging.INFO`` or pass the values directly. See
`this table from the logging docs
<https://docs.python.org/3/library/logging.html#levels>`_ for
possible values.
.. versionadded:: 20.2.0
.. versionchanged:: 21.1.0 The returned loggers are now pickleable.
.. versionadded:: 20.1.0 The ``log()`` method.
.. versionadded:: 22.2.0
Async variants ``alog()``, ``adebug()``, ``ainfo()``, and so forth.
"""
return _LEVEL_TO_FILTERING_LOGGER[min_level]
def _make_filtering_bound_logger(min_level: int) -> type[FilteringBoundLogger]:
"""
Create a new `FilteringBoundLogger` that only logs *min_level* or higher.
The logger is optimized such that log levels below *min_level* only consist
of a ``return None``.
"""
def make_method(
level: int,
) -> tuple[Callable[..., Any], Callable[..., Any]]:
if level < min_level:
return _nop, _anop
name = _LEVEL_TO_NAME[level]
def meth(self: Any, event: str, *args: Any, **kw: Any) -> Any:
if not args:
return self._proxy_to_logger(name, event, **kw)
return self._proxy_to_logger(name, event % args, **kw)
async def ameth(self: Any, event: str, *args: Any, **kw: Any) -> Any:
if args:
event = event % args
ctx = contextvars.copy_context()
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(
lambda: self._proxy_to_logger(name, event, **kw)
),
)
meth.__name__ = name
ameth.__name__ = f"a{name}"
return meth, ameth
def log(self: Any, level: int, event: str, *args: Any, **kw: Any) -> Any:
if level < min_level:
return None
name = _LEVEL_TO_NAME[level]
if not args:
return self._proxy_to_logger(name, event, **kw)
return self._proxy_to_logger(name, event % args, **kw)
async def alog(
self: Any, level: int, event: str, *args: Any, **kw: Any
) -> Any:
if level < min_level:
return None
name = _LEVEL_TO_NAME[level]
if args:
event = event % args
ctx = contextvars.copy_context()
return await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: self._proxy_to_logger(name, event, **kw)),
)
meths: dict[str, Callable[..., Any]] = {"log": log, "alog": alog}
for lvl, name in _LEVEL_TO_NAME.items():
meths[name], meths[f"a{name}"] = make_method(lvl)
meths["exception"] = exception
meths["aexception"] = aexception
meths["fatal"] = meths["error"]
meths["afatal"] = meths["aerror"]
meths["warn"] = meths["warning"]
meths["awarn"] = meths["awarning"]
meths["msg"] = meths["info"]
meths["amsg"] = meths["ainfo"]
return type(
"BoundLoggerFilteringAt%s"
% (_LEVEL_TO_NAME.get(min_level, "Notset").capitalize()),
(BoundLoggerBase,),
meths,
)
# Pre-create all possible filters to make them pickleable.
BoundLoggerFilteringAtNotset = _make_filtering_bound_logger(NOTSET)
BoundLoggerFilteringAtDebug = _make_filtering_bound_logger(DEBUG)
BoundLoggerFilteringAtInfo = _make_filtering_bound_logger(INFO)
BoundLoggerFilteringAtWarning = _make_filtering_bound_logger(WARNING)
BoundLoggerFilteringAtError = _make_filtering_bound_logger(ERROR)
BoundLoggerFilteringAtCritical = _make_filtering_bound_logger(CRITICAL)
_LEVEL_TO_FILTERING_LOGGER = {
CRITICAL: BoundLoggerFilteringAtCritical,
ERROR: BoundLoggerFilteringAtError,
WARNING: BoundLoggerFilteringAtWarning,
INFO: BoundLoggerFilteringAtInfo,
DEBUG: BoundLoggerFilteringAtDebug,
NOTSET: BoundLoggerFilteringAtNotset,
}

View File

@@ -0,0 +1,248 @@
# SPDX-License-Identifier: MIT OR Apache-2.0
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the MIT License. See the LICENSE file in the root of this
# repository for complete details.
"""
structlog's native high-performance loggers.
"""
from __future__ import annotations
import asyncio
import contextvars
import sys
from typing import Any, Callable
from ._base import BoundLoggerBase
from ._log_levels import (
CRITICAL,
DEBUG,
ERROR,
INFO,
LEVEL_TO_NAME,
NAME_TO_LEVEL,
NOTSET,
WARNING,
)
from .contextvars import _ASYNC_CALLING_STACK
from .typing import FilteringBoundLogger
def _nop(self: Any, event: str, *args: Any, **kw: Any) -> Any:
return None
async def _anop(self: Any, event: str, *args: Any, **kw: Any) -> Any:
return None
def exception(
self: FilteringBoundLogger, event: str, *args: Any, **kw: Any
) -> Any:
kw.setdefault("exc_info", True)
return self.error(event, *args, **kw)
async def aexception(
self: FilteringBoundLogger, event: str, *args: Any, **kw: Any
) -> Any:
"""
.. versionchanged:: 23.3.0
Callsite parameters are now also collected under asyncio.
"""
# Exception info has to be extracted this early, because it is no longer
# available once control is passed to the executor.
if kw.get("exc_info", True) is True:
kw["exc_info"] = sys.exc_info()
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back) # type: ignore[arg-type]
ctx = contextvars.copy_context()
try:
runner = await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: self.error(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
return runner
def make_filtering_bound_logger(
min_level: int | str,
) -> type[FilteringBoundLogger]:
"""
Create a new `FilteringBoundLogger` that only logs *min_level* or higher.
The logger is optimized such that log levels below *min_level* only consist
of a ``return None``.
All familiar log methods are present, with async variants of each that are
prefixed by an ``a``. Therefore, the async version of ``log.info("hello")``
is ``await log.ainfo("hello")``.
Additionally it has a ``log(self, level: int, **kw: Any)`` method to mirror
`logging.Logger.log` and `structlog.stdlib.BoundLogger.log`.
Compared to using *structlog*'s standard library integration and the
`structlog.stdlib.filter_by_level` processor:
- It's faster because once the logger is built at program start; it's a
static class.
- For the same reason you can't change the log level once configured. Use
the dynamic approach of `standard-library` instead, if you need this
feature.
- You *can* have (much) more fine-grained filtering by :ref:`writing a
simple processor <finer-filtering>`.
Args:
min_level:
The log level as an integer. You can use the constants from
`logging` like ``logging.INFO`` or pass the values directly. See
`this table from the logging docs
<https://docs.python.org/3/library/logging.html#levels>`_ for
possible values.
If you pass a string, it must be one of: ``critical``, ``error``,
``warning``, ``info``, ``debug``, ``notset`` (upper/lower case
doesn't matter).
.. versionadded:: 20.2.0
.. versionchanged:: 21.1.0 The returned loggers are now pickleable.
.. versionadded:: 20.1.0 The ``log()`` method.
.. versionadded:: 22.2.0
Async variants ``alog()``, ``adebug()``, ``ainfo()``, and so forth.
.. versionchanged:: 25.1.0 *min_level* can now be a string.
"""
if isinstance(min_level, str):
min_level = NAME_TO_LEVEL[min_level.lower()]
return LEVEL_TO_FILTERING_LOGGER[min_level]
def _make_filtering_bound_logger(min_level: int) -> type[FilteringBoundLogger]:
"""
Create a new `FilteringBoundLogger` that only logs *min_level* or higher.
The logger is optimized such that log levels below *min_level* only consist
of a ``return None``.
"""
def make_method(
level: int,
) -> tuple[Callable[..., Any], Callable[..., Any]]:
if level < min_level:
return _nop, _anop
name = LEVEL_TO_NAME[level]
def meth(self: Any, event: str, *args: Any, **kw: Any) -> Any:
if not args:
return self._proxy_to_logger(name, event, **kw)
return self._proxy_to_logger(name, event % args, **kw)
async def ameth(self: Any, event: str, *args: Any, **kw: Any) -> Any:
"""
.. versionchanged:: 23.3.0
Callsite parameters are now also collected under asyncio.
"""
if args:
event = event % args
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back) # type: ignore[arg-type]
ctx = contextvars.copy_context()
try:
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(
lambda: self._proxy_to_logger(name, event, **kw)
),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
meth.__name__ = name
ameth.__name__ = f"a{name}"
return meth, ameth
def log(self: Any, level: int, event: str, *args: Any, **kw: Any) -> Any:
if level < min_level:
return None
name = LEVEL_TO_NAME[level]
if not args:
return self._proxy_to_logger(name, event, **kw)
return self._proxy_to_logger(name, event % args, **kw)
async def alog(
self: Any, level: int, event: str, *args: Any, **kw: Any
) -> Any:
"""
.. versionchanged:: 23.3.0
Callsite parameters are now also collected under asyncio.
"""
if level < min_level:
return None
name = LEVEL_TO_NAME[level]
if args:
event = event % args
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back) # type: ignore[arg-type]
ctx = contextvars.copy_context()
try:
runner = await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(
lambda: self._proxy_to_logger(name, event, **kw)
),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
return runner
meths: dict[str, Callable[..., Any]] = {"log": log, "alog": alog}
for lvl, name in LEVEL_TO_NAME.items():
meths[name], meths[f"a{name}"] = make_method(lvl)
meths["exception"] = exception
meths["aexception"] = aexception
meths["fatal"] = meths["critical"]
meths["afatal"] = meths["acritical"]
meths["warn"] = meths["warning"]
meths["awarn"] = meths["awarning"]
meths["msg"] = meths["info"]
meths["amsg"] = meths["ainfo"]
# Introspection
meths["is_enabled_for"] = lambda self, level: level >= min_level
meths["get_effective_level"] = lambda self: min_level
return type(
f"BoundLoggerFilteringAt{LEVEL_TO_NAME.get(min_level, 'Notset').capitalize()}",
(BoundLoggerBase,),
meths,
)
# Pre-create all possible filters to make them pickleable.
BoundLoggerFilteringAtNotset = _make_filtering_bound_logger(NOTSET)
BoundLoggerFilteringAtDebug = _make_filtering_bound_logger(DEBUG)
BoundLoggerFilteringAtInfo = _make_filtering_bound_logger(INFO)
BoundLoggerFilteringAtWarning = _make_filtering_bound_logger(WARNING)
BoundLoggerFilteringAtError = _make_filtering_bound_logger(ERROR)
BoundLoggerFilteringAtCritical = _make_filtering_bound_logger(CRITICAL)
LEVEL_TO_FILTERING_LOGGER = {
CRITICAL: BoundLoggerFilteringAtCritical,
ERROR: BoundLoggerFilteringAtError,
WARNING: BoundLoggerFilteringAtWarning,
INFO: BoundLoggerFilteringAtInfo,
DEBUG: BoundLoggerFilteringAtDebug,
NOTSET: BoundLoggerFilteringAtNotset,
}

View File

@@ -17,8 +17,6 @@ from pickle import PicklingError
from sys import stderr, stdout
from typing import IO, Any, BinaryIO, TextIO
from structlog._utils import until_not_interrupted
WRITE_LOCKS: dict[IO[Any], threading.Lock] = {}
@@ -36,8 +34,7 @@ class PrintLogger:
"""
Print events into a file.
Arguments:
Args:
file: File to print to. (default: `sys.stdout`)
>>> from structlog import PrintLogger
@@ -110,7 +107,7 @@ class PrintLogger:
"""
f = self._file if self._file is not stdout else None
with self._lock:
until_not_interrupted(print, message, file=f, flush=True)
print(message, file=f, flush=True)
log = debug = info = warn = warning = msg
fatal = failure = err = error = critical = exception = msg
@@ -122,8 +119,7 @@ class PrintLoggerFactory:
To be used with `structlog.configure`\ 's ``logger_factory``.
Arguments:
Args:
file: File to print to. (default: `sys.stdout`)
Positional arguments are silently ignored.
@@ -142,8 +138,7 @@ class WriteLogger:
"""
Write events into a file.
Arguments:
Args:
file: File to print to. (default: `sys.stdout`)
>>> from structlog import WriteLogger
@@ -219,8 +214,8 @@ class WriteLogger:
Write and flush *message*.
"""
with self._lock:
until_not_interrupted(self._write, message + "\n")
until_not_interrupted(self._flush)
self._write(message + "\n")
self._flush()
log = debug = info = warn = warning = msg
fatal = failure = err = error = critical = exception = msg
@@ -232,8 +227,7 @@ class WriteLoggerFactory:
To be used with `structlog.configure`\ 's ``logger_factory``.
Arguments:
Args:
file: File to print to. (default: `sys.stdout`)
Positional arguments are silently ignored.
@@ -252,7 +246,7 @@ class BytesLogger:
r"""
Writes bytes into a file.
Arguments:
Args:
file: File to print to. (default: `sys.stdout`\ ``.buffer``)
Useful if you follow `current logging best practices
@@ -261,7 +255,8 @@ class BytesLogger:
.. versionadded:: 20.2.0
"""
__slots__ = ("_file", "_write", "_flush", "_lock")
__slots__ = ("_file", "_flush", "_lock", "_write")
def __init__(self, file: BinaryIO | None = None):
self._file = file or sys.stdout.buffer
@@ -323,8 +318,8 @@ class BytesLogger:
Write *message*.
"""
with self._lock:
until_not_interrupted(self._write, message + b"\n")
until_not_interrupted(self._flush)
self._write(message + b"\n")
self._flush()
log = debug = info = warn = warning = msg
fatal = failure = err = error = critical = exception = msg
@@ -336,14 +331,14 @@ class BytesLoggerFactory:
To be used with `structlog.configure`\ 's ``logger_factory``.
Arguments:
Args:
file: File to print to. (default: `sys.stdout`\ ``.buffer``)
Positional arguments are silently ignored.
.. versionadded:: 20.2.0
"""
__slots__ = ("_file",)
def __init__(self, file: BinaryIO | None = None):

View File

@@ -9,32 +9,10 @@ Generic utilities.
from __future__ import annotations
import errno
import sys
from contextlib import suppress
from typing import Any, Callable
def until_not_interrupted(f: Callable[..., Any], *args: Any, **kw: Any) -> Any:
"""
Retry until *f* succeeds or an exception that isn't caused by EINTR occurs.
Arguments:
f: A callable like a function.
*args: Positional arguments for *f*.
**kw: Keyword arguments for *f*.
"""
while True:
try:
return f(*args, **kw)
except OSError as e: # noqa: PERF203
if e.args[0] == errno.EINTR:
continue
raise
from typing import Any
def get_processname() -> str:

View File

@@ -11,6 +11,8 @@ Python 3.7 as :mod:`contextvars`.
.. versionchanged:: 21.1.0
Reimplemented without using a single dict as context carrier for improved
isolation. Every key-value pair is a separate `contextvars.ContextVar` now.
.. versionchanged:: 23.3.0
Callsite parameters are now also collected under asyncio.
See :doc:`contextvars`.
"""
@@ -20,6 +22,7 @@ from __future__ import annotations
import contextlib
import contextvars
from types import FrameType
from typing import Any, Generator, Mapping
import structlog
@@ -30,6 +33,10 @@ from .typing import BindableLogger, EventDict, WrappedLogger
STRUCTLOG_KEY_PREFIX = "structlog_"
STRUCTLOG_KEY_PREFIX_LEN = len(STRUCTLOG_KEY_PREFIX)
_ASYNC_CALLING_STACK: contextvars.ContextVar[FrameType] = (
contextvars.ContextVar("_ASYNC_CALLING_STACK")
)
# For proper isolation, we have to use a dict of ContextVars instead of a
# single ContextVar with a dict.
# See https://github.com/hynek/structlog/pull/302 for details.

View File

@@ -6,7 +6,7 @@
"""
Helpers that make development with *structlog* more pleasant.
See also the narrative documentation in `development`.
See also the narrative documentation in `console-output`.
"""
from __future__ import annotations
@@ -20,13 +20,14 @@ from io import StringIO
from types import ModuleType
from typing import (
Any,
Iterable,
Callable,
Literal,
Protocol,
Sequence,
TextIO,
Type,
Union,
cast,
)
from ._frames import _format_exception
@@ -52,12 +53,12 @@ try:
except ImportError:
rich = None # type: ignore[assignment]
__all__ = [
"ConsoleRenderer",
"RichTracebackFormatter",
"better_traceback",
"plain_traceback",
"rich_traceback",
"better_traceback",
]
_IS_WINDOWS = sys.platform == "win32"
@@ -72,7 +73,7 @@ def _pad(s: str, length: int) -> str:
"""
missing = length - len(s)
return s + " " * (missing if missing > 0 else 0)
return s + " " * (max(0, missing))
if colorama is not None:
@@ -164,6 +165,167 @@ class _PlainStyles:
kv_value = ""
class ColumnFormatter(Protocol):
"""
:class:`~typing.Protocol` for column formatters.
See `KeyValueColumnFormatter` and `LogLevelColumnFormatter` for examples.
.. versionadded:: 23.3.0
"""
def __call__(self, key: str, value: object) -> str:
"""
Format *value* for *key*.
This method is responsible for formatting, *key*, the ``=``, and the
*value*. That means that it can use any string instead of the ``=`` and
it can leave out both the *key* or the *value*.
If it returns an empty string, the column is omitted completely.
"""
@dataclass
class Column:
"""
A column defines the way a key-value pair is formatted, and, by it's
position to the *columns* argument of `ConsoleRenderer`, the order in which
it is rendered.
Args:
key:
The key for which this column is responsible. Leave empty to define
it as the default formatter.
formatter: The formatter for columns with *key*.
.. versionadded:: 23.3.0
"""
key: str
formatter: ColumnFormatter
@dataclass
class KeyValueColumnFormatter:
"""
Format a key-value pair.
Args:
key_style: The style to apply to the key. If None, the key is omitted.
value_style: The style to apply to the value.
reset_style: The style to apply whenever a style is no longer needed.
value_repr:
A callable that returns the string representation of the value.
width: The width to pad the value to. If 0, no padding is done.
prefix:
A string to prepend to the formatted key-value pair. May contain
styles.
postfix:
A string to append to the formatted key-value pair. May contain
styles.
.. versionadded:: 23.3.0
"""
key_style: str | None
value_style: str
reset_style: str
value_repr: Callable[[object], str]
width: int = 0
prefix: str = ""
postfix: str = ""
def __call__(self, key: str, value: object) -> str:
sio = StringIO()
if self.prefix:
sio.write(self.prefix)
sio.write(self.reset_style)
if self.key_style is not None:
sio.write(self.key_style)
sio.write(key)
sio.write(self.reset_style)
sio.write("=")
sio.write(self.value_style)
sio.write(_pad(self.value_repr(value), self.width))
sio.write(self.reset_style)
if self.postfix:
sio.write(self.postfix)
sio.write(self.reset_style)
return sio.getvalue()
class LogLevelColumnFormatter:
"""
Format a log level according to *level_styles*.
The width is padded to the longest level name (if *level_styles* is passed
-- otherwise there's no way to know the lengths of all levels).
Args:
level_styles:
A dictionary of level names to styles that are applied to it. If
None, the level is formatted as a plain ``[level]``.
reset_style:
What to use to reset the style after the level name. Ignored if
if *level_styles* is None.
width:
The width to pad the level to. If 0, no padding is done.
.. versionadded:: 23.3.0
.. versionadded:: 24.2.0 *width*
"""
level_styles: dict[str, str] | None
reset_style: str
width: int
def __init__(
self,
level_styles: dict[str, str],
reset_style: str,
width: int | None = None,
) -> None:
self.level_styles = level_styles
if level_styles:
self.width = (
0
if width == 0
else len(max(self.level_styles.keys(), key=lambda e: len(e)))
)
self.reset_style = reset_style
else:
self.width = 0
self.reset_style = ""
def __call__(self, key: str, value: object) -> str:
level = cast(str, value)
style = (
""
if self.level_styles is None
else self.level_styles.get(level, "")
)
return f"[{style}{_pad(level, self.width)}{self.reset_style}]"
_NOTHING = object()
def plain_traceback(sio: TextIO, exc_info: ExcInfo) -> None:
"""
"Pretty"-print *exc_info* to *sio* using our own plain formatter.
@@ -214,7 +376,9 @@ class RichTracebackFormatter:
sio.write("\n")
Console(file=sio, color_system=self.color_system).print(
Console(
file=sio, color_system=self.color_system, width=self.width
).print(
Traceback.from_exception(
*exc_info,
show_locals=self.show_locals,
@@ -268,36 +432,48 @@ else:
class ConsoleRenderer:
"""
r"""
Render ``event_dict`` nicely aligned, possibly in colors, and ordered.
If ``event_dict`` contains a true-ish ``exc_info`` key, it will be rendered
*after* the log line. If Rich_ or better-exceptions_ are present, in colors
and with extra context.
Arguments:
Args:
columns:
A list of `Column` objects defining both the order and format of
the key-value pairs in the output. If passed, most other arguments
become meaningless.
pad_event: Pad the event to this many characters.
**Must** contain a column with ``key=''`` that defines the default
formatter.
.. seealso:: `columns-config`
pad_event:
Pad the event to this many characters. Ignored if *columns* are
passed.
colors:
Use colors for a nicer output. `True` by default. On Windows only
if Colorama_ is installed.
if Colorama_ is installed. Ignored if *columns* are passed.
force_colors:
Force colors even for non-tty destinations. Use this option if your
logs are stored in a file that is meant to be streamed to the
console. Only meaningful on Windows.
console. Only meaningful on Windows. Ignored if *columns* are
passed.
repr_native_str:
When `True`, `repr` is also applied to native strings (i.e. unicode
on Python 3 and bytes on Python 2). Setting this to `False` is
useful if you want to have human-readable non-ASCII output on
Python 2. The ``event`` key is *never* `repr` -ed.
When `True`, `repr` is also applied to ``str``\ s. The ``event``
key is *never* `repr` -ed. Ignored if *columns* are passed.
level_styles:
When present, use these styles for colors. This must be a dict from
level names (strings) to Colorama styles. The default can be
obtained by calling `ConsoleRenderer.get_default_level_styles`
level names (strings) to terminal sequences (for example, Colorama)
styles. The default can be obtained by calling
`ConsoleRenderer.get_default_level_styles`. Ignored when *columns*
are passed.
exception_formatter:
A callable to render ``exc_infos``. If Rich_ or better-exceptions_
@@ -307,18 +483,29 @@ class ConsoleRenderer:
`RichTracebackFormatter` like `rich_traceback`, or implement your
own.
sort_keys: Whether to sort keys when formatting. `True` by default.
sort_keys:
Whether to sort keys when formatting. `True` by default. Ignored if
*columns* are passed.
event_key:
The key to look for the main log message. Needed when you rename it
e.g. using `structlog.processors.EventRenamer`.
e.g. using `structlog.processors.EventRenamer`. Ignored if
*columns* are passed.
timestamp_key:
The key to look for timestamp of the log message. Needed when you
rename it e.g. using `structlog.processors.EventRenamer`.
rename it e.g. using `structlog.processors.EventRenamer`. Ignored
if *columns* are passed.
pad_level:
Whether to pad log level with blanks to the longest amongst all
level label.
Requires the Colorama_ package if *colors* is `True` **on Windows**.
Raises:
ValueError: If there's not exactly one default column formatter.
.. _Colorama: https://pypi.org/project/colorama/
.. _better-exceptions: https://pypi.org/project/better-exceptions/
.. _Rich: https://pypi.org/project/rich/
@@ -352,20 +539,73 @@ class ConsoleRenderer:
.. versionadded:: 21.3.0 *sort_keys*
.. versionadded:: 22.1.0 *event_key*
.. versionadded:: 23.2.0 *timestamp_key*
.. versionadded:: 23.3.0 *columns*
.. versionadded:: 24.2.0 *pad_level*
"""
def __init__(
def __init__( # noqa: PLR0912, PLR0915
self,
pad_event: int = _EVENT_WIDTH,
colors: bool = _has_colors,
force_colors: bool = False,
repr_native_str: bool = False,
level_styles: Styles | None = None,
level_styles: dict[str, str] | None = None,
exception_formatter: ExceptionRenderer = default_exception_formatter,
sort_keys: bool = True,
event_key: str = "event",
timestamp_key: str = "timestamp",
columns: list[Column] | None = None,
pad_level: bool = True,
):
self._exception_formatter = exception_formatter
self._sort_keys = sort_keys
if columns is not None:
to_warn = []
def add_meaningless_arg(arg: str) -> None:
to_warn.append(
f"The `{arg}` argument is ignored when passing `columns`.",
)
if pad_event != _EVENT_WIDTH:
add_meaningless_arg("pad_event")
if colors != _has_colors:
add_meaningless_arg("colors")
if force_colors is not False:
add_meaningless_arg("force_colors")
if repr_native_str is not False:
add_meaningless_arg("repr_native_str")
if level_styles is not None:
add_meaningless_arg("level_styles")
if event_key != "event":
add_meaningless_arg("event_key")
if timestamp_key != "timestamp":
add_meaningless_arg("timestamp_key")
for w in to_warn:
warnings.warn(w, stacklevel=2)
defaults = [col for col in columns if col.key == ""]
if not defaults:
raise ValueError(
"Must pass a default column formatter (a column with `key=''`)."
)
if len(defaults) > 1:
raise ValueError("Only one default column formatter allowed.")
self._default_column_formatter = defaults[0].formatter
self._columns = [col for col in columns if col.key]
return
# Create default columns configuration.
styles: Styles
if colors:
if _IS_WINDOWS: # pragma: no cover
@@ -391,24 +631,69 @@ class ConsoleRenderer:
styles = _PlainStyles
self._styles = styles
self._pad_event = pad_event
if level_styles is None:
self._level_to_color = self.get_default_level_styles(colors)
else:
self._level_to_color = level_styles
level_to_color = (
self.get_default_level_styles(colors)
if level_styles is None
else level_styles
).copy()
for key in self._level_to_color:
self._level_to_color[key] += styles.bright
for key in level_to_color:
level_to_color[key] += styles.bright
self._longest_level = len(
max(self._level_to_color.keys(), key=lambda e: len(e))
max(level_to_color.keys(), key=lambda e: len(e))
)
self._repr_native_str = repr_native_str
self._exception_formatter = exception_formatter
self._sort_keys = sort_keys
self._event_key = event_key
self._timestamp_key = timestamp_key
self._default_column_formatter = KeyValueColumnFormatter(
styles.kv_key,
styles.kv_value,
styles.reset,
value_repr=self._repr,
width=0,
)
logger_name_formatter = KeyValueColumnFormatter(
key_style=None,
value_style=styles.bright + styles.logger_name,
reset_style=styles.reset,
value_repr=str,
prefix="[",
postfix="]",
)
level_width = 0 if not pad_level else None
self._columns = [
Column(
timestamp_key,
KeyValueColumnFormatter(
key_style=None,
value_style=styles.timestamp,
reset_style=styles.reset,
value_repr=str,
),
),
Column(
"level",
LogLevelColumnFormatter(
level_to_color, reset_style=styles.reset, width=level_width
),
),
Column(
event_key,
KeyValueColumnFormatter(
key_style=None,
value_style=styles.bright,
reset_style=styles.reset,
value_repr=str,
width=pad_event,
),
),
Column("logger", logger_name_formatter),
Column("logger_name", logger_name_formatter),
]
def _repr(self, val: Any) -> str:
"""
@@ -419,90 +704,39 @@ class ConsoleRenderer:
return repr(val)
if isinstance(val, str):
if set(val) & {" ", "\t", "=", "\r", "\n", '"', "'"}:
return repr(val)
return val
return repr(val)
def __call__( # noqa: PLR0912
def __call__(
self, logger: WrappedLogger, name: str, event_dict: EventDict
) -> str:
sio = StringIO()
ts = event_dict.pop(self._timestamp_key, None)
if ts is not None:
sio.write(
# can be a number if timestamp is UNIXy
self._styles.timestamp
+ str(ts)
+ self._styles.reset
+ " "
)
level = event_dict.pop("level", None)
if level is not None:
sio.write(
"["
+ self._level_to_color.get(level, "")
+ _pad(level, self._longest_level)
+ self._styles.reset
+ "] "
)
# force event to str for compatibility with standard library
event = event_dict.pop(self._event_key, None)
if not isinstance(event, str):
event = str(event)
if event_dict:
event = _pad(event, self._pad_event) + self._styles.reset + " "
else:
event += self._styles.reset
sio.write(self._styles.bright + event)
logger_name = event_dict.pop("logger", None)
if logger_name is None:
logger_name = event_dict.pop("logger_name", None)
if logger_name is not None:
sio.write(
"["
+ self._styles.logger_name
+ self._styles.bright
+ logger_name
+ self._styles.reset
+ "] "
)
stack = event_dict.pop("stack", None)
exc = event_dict.pop("exception", None)
exc_info = event_dict.pop("exc_info", None)
event_dict_keys: Iterable[str] = event_dict.keys()
if self._sort_keys:
event_dict_keys = sorted(event_dict_keys)
kvs = [
col.formatter(col.key, val)
for col in self._columns
if (val := event_dict.pop(col.key, _NOTHING)) is not _NOTHING
] + [
self._default_column_formatter(key, event_dict[key])
for key in (sorted(event_dict) if self._sort_keys else event_dict)
]
sio.write(
" ".join(
self._styles.kv_key
+ key
+ self._styles.reset
+ "="
+ self._styles.kv_value
+ self._repr(event_dict[key])
+ self._styles.reset
for key in event_dict_keys
)
)
sio = StringIO()
sio.write((" ".join(kv for kv in kvs if kv)).rstrip(" "))
if stack is not None:
sio.write("\n" + stack)
if exc_info or exc is not None:
sio.write("\n\n" + "=" * 79 + "\n")
exc_info = _figure_out_exc_info(exc_info)
if exc_info:
exc_info = _figure_out_exc_info(exc_info)
if exc_info != (None, None, None):
self._exception_formatter(sio, exc_info)
self._exception_formatter(sio, exc_info)
elif exc is not None:
if self._exception_formatter is not plain_traceback:
warnings.warn(
@@ -510,12 +744,13 @@ class ConsoleRenderer:
"if you want pretty exceptions.",
stacklevel=2,
)
sio.write("\n" + exc)
return sio.getvalue()
@staticmethod
def get_default_level_styles(colors: bool = True) -> Any:
def get_default_level_styles(colors: bool = True) -> dict[str, str]:
"""
Get the default styles for log levels
@@ -524,11 +759,10 @@ class ConsoleRenderer:
home-grown :func:`~structlog.stdlib.add_log_level` you could do::
my_styles = ConsoleRenderer.get_default_level_styles()
my_styles["EVERYTHING_IS_ON_FIRE"] = my_styles["critical"] renderer
= ConsoleRenderer(level_styles=my_styles)
Arguments:
my_styles["EVERYTHING_IS_ON_FIRE"] = my_styles["critical"]
renderer = ConsoleRenderer(level_styles=my_styles)
Args:
colors:
Whether to use colorful styles. This must match the *colors*
parameter to `ConsoleRenderer`. Default: `True`.

View File

@@ -11,7 +11,6 @@ from __future__ import annotations
import datetime
import enum
import inspect
import json
import logging
import operator
@@ -20,6 +19,7 @@ import sys
import threading
import time
from types import FrameType, TracebackType
from typing import (
Any,
Callable,
@@ -28,6 +28,7 @@ from typing import (
NamedTuple,
Sequence,
TextIO,
cast,
)
from ._frames import (
@@ -35,27 +36,33 @@ from ._frames import (
_format_exception,
_format_stack,
)
from ._log_levels import _NAME_TO_LEVEL, add_log_level
from ._log_levels import NAME_TO_LEVEL, add_log_level
from ._utils import get_processname
from .tracebacks import ExceptionDictTransformer
from .typing import EventDict, ExceptionTransformer, ExcInfo, WrappedLogger
from .typing import (
EventDict,
ExceptionTransformer,
ExcInfo,
WrappedLogger,
)
__all__ = [
"_NAME_TO_LEVEL", # some people rely on it being here
"add_log_level",
"NAME_TO_LEVEL", # some people rely on it being here
"CallsiteParameter",
"CallsiteParameterAdder",
"dict_tracebacks",
"EventRenamer",
"ExceptionPrettyPrinter",
"format_exc_info",
"JSONRenderer",
"KeyValueRenderer",
"LogfmtRenderer",
"StackInfoRenderer",
"TimeStamper",
"UnicodeDecoder",
"UnicodeEncoder",
"add_log_level",
"dict_tracebacks",
"format_exc_info",
]
@@ -63,8 +70,7 @@ class KeyValueRenderer:
"""
Render ``event_dict`` as a list of ``Key=repr(Value)`` pairs.
Arguments:
Args:
sort_keys: Whether to sort keys when formatting.
key_order:
@@ -119,8 +125,7 @@ class LogfmtRenderer:
.. _logfmt: https://brandur.org/logfmt
Arguments:
Args:
sort_keys: Whether to sort keys when formatting.
key_order:
@@ -138,8 +143,7 @@ class LogfmtRenderer:
``flag=false``.
Raises:
ValueError: If a key contains non printable or space characters.
ValueError: If a key contains non-printable or whitespace characters.
.. versionadded:: 21.5.0
"""
@@ -173,9 +177,16 @@ class LogfmtRenderer:
continue
value = "true" if value else "false"
value = f"{value}".replace('"', '\\"')
value = str(value)
backslashes_need_escaping = (
" " in value or "=" in value or '"' in value
)
if backslashes_need_escaping and "\\" in value:
value = value.replace("\\", "\\\\")
if " " in value or "=" in value:
value = value.replace('"', '\\"').replace("\n", "\\n")
if backslashes_need_escaping:
value = f'"{value}"'
elements.append(f"{key}={value}")
@@ -237,8 +248,7 @@ class UnicodeEncoder:
"""
Encode unicode values in ``event_dict``.
Arguments:
Args:
encoding: Encoding to encode to (default: ``"utf-8"``).
errors:
@@ -272,8 +282,7 @@ class UnicodeDecoder:
"""
Decode byte string values in ``event_dict``.
Arguments:
Args:
encoding: Encoding to decode from (default: ``"utf-8"``).
errors: How to cope with encoding errors (default: ``"replace"``).
@@ -308,8 +317,7 @@ class JSONRenderer:
"""
Render the ``event_dict`` using ``serializer(event_dict, **dumps_kw)``.
Arguments:
Args:
dumps_kw:
Are passed unmodified to *serializer*. If *default* is passed, it
will disable support for ``__structlog__``-based serialization.
@@ -317,9 +325,9 @@ class JSONRenderer:
serializer:
A :func:`json.dumps`-compatible callable that will be used to
format the string. This can be used to use alternative JSON
encoders like `orjson <https://pypi.org/project/orjson/>`__ or
`RapidJSON <https://pypi.org/project/python-rapidjson/>`_
(default: :func:`json.dumps`).
encoders (default: :func:`json.dumps`).
.. seealso:: :doc:`performance` for examples.
.. versionadded:: 0.2.0 Support for ``__structlog__`` serialization method.
.. versionadded:: 15.4.0 *serializer* parameter.
@@ -385,8 +393,7 @@ class ExceptionRenderer:
If there is no ``exc_info`` key, the *event_dict* is not touched. This
behavior is analog to the one of the stdlib's logging.
Arguments:
Args:
exception_formatter:
A callable that is used to format the exception from the
``exc_info`` field into the ``exception`` field.
@@ -407,11 +414,9 @@ class ExceptionRenderer:
def __call__(
self, logger: WrappedLogger, name: str, event_dict: EventDict
) -> EventDict:
exc_info = event_dict.pop("exc_info", None)
exc_info = _figure_out_exc_info(event_dict.pop("exc_info", None))
if exc_info:
event_dict["exception"] = self.format_exception(
_figure_out_exc_info(exc_info)
)
event_dict["exception"] = self.format_exception(exc_info)
return event_dict
@@ -459,8 +464,7 @@ class TimeStamper:
"""
Add a timestamp to ``event_dict``.
Arguments:
Args:
fmt:
strftime format string, or ``"iso"`` for `ISO 8601
<https://en.wikipedia.org/wiki/ISO_8601>`_, or `None` for a `UNIX
@@ -473,7 +477,7 @@ class TimeStamper:
.. versionchanged:: 19.2.0 Can be pickled now.
"""
__slots__ = ("_stamper", "fmt", "utc", "key")
__slots__ = ("_stamper", "fmt", "key", "utc")
def __init__(
self,
@@ -521,7 +525,8 @@ def _make_stamper(
else:
def now() -> datetime.datetime:
# A naive local datetime is fine here, because we only format it.
# We don't need the TZ for our own formatting. We add it only for
# user-defined formats later.
return datetime.datetime.now() # noqa: DTZ005
if fmt is None:
@@ -548,12 +553,18 @@ def _make_stamper(
return stamper_iso_local
def stamper_fmt(event_dict: EventDict) -> EventDict:
event_dict[key] = now().strftime(fmt)
def stamper_fmt_local(event_dict: EventDict) -> EventDict:
event_dict[key] = now().astimezone().strftime(fmt)
return event_dict
return stamper_fmt
def stamper_fmt_utc(event_dict: EventDict) -> EventDict:
event_dict[key] = now().strftime(fmt)
return event_dict
if utc:
return stamper_fmt_utc
return stamper_fmt_local
class MaybeTimeStamper:
@@ -587,36 +598,49 @@ class MaybeTimeStamper:
return event_dict
def _figure_out_exc_info(v: Any) -> ExcInfo:
def _figure_out_exc_info(v: Any) -> ExcInfo | None:
"""
Depending on the Python version will try to do the smartest thing possible
to transform *v* into an ``exc_info`` tuple.
Try to convert *v* into an ``exc_info`` tuple.
Return ``None`` if *v* does not represent an exception or if there is no
current exception.
"""
if isinstance(v, BaseException):
return (v.__class__, v, v.__traceback__)
if isinstance(v, tuple):
return v # type: ignore[return-value]
if isinstance(v, tuple) and len(v) == 3:
has_type = isinstance(v[0], type) and issubclass(v[0], BaseException)
has_exc = isinstance(v[1], BaseException)
has_tb = v[2] is None or isinstance(v[2], TracebackType)
if has_type and has_exc and has_tb:
return v
if v:
return sys.exc_info() # type: ignore[return-value]
result = sys.exc_info()
if result == (None, None, None):
return None
return cast(ExcInfo, result)
return v
return None
class ExceptionPrettyPrinter:
"""
Pretty print exceptions and remove them from the ``event_dict``.
Arguments:
Pretty print exceptions rendered by *exception_formatter* and remove them
from the ``event_dict``.
Args:
file: Target file for output (default: ``sys.stdout``).
exception_formatter:
A callable that is used to format the exception from the
``exc_info`` field into the ``exception`` field.
This processor is mostly for development and testing so you can read
exceptions properly formatted.
It behaves like `format_exc_info` except it removes the exception data from
the event dictionary after printing it.
It behaves like `format_exc_info`, except that it removes the exception data
from the event dictionary after printing it using the passed
*exception_formatter*, which defaults to Python's built-in traceback formatting.
It's tolerant to having `format_exc_info` in front of itself in the
processor chain but doesn't require it. In other words, it handles both
@@ -626,6 +650,9 @@ class ExceptionPrettyPrinter:
.. versionchanged:: 16.0.0
Added support for passing exceptions as ``exc_info`` on Python 3.
.. versionchanged:: 25.4.0
Fixed *exception_formatter* so that it overrides the default if set.
"""
def __init__(
@@ -633,6 +660,7 @@ class ExceptionPrettyPrinter:
file: TextIO | None = None,
exception_formatter: ExceptionTransformer = _format_exception,
) -> None:
self.format_exception = exception_formatter
if file is not None:
self._file = file
else:
@@ -645,7 +673,7 @@ class ExceptionPrettyPrinter:
if exc is None:
exc_info = _figure_out_exc_info(event_dict.pop("exc_info", None))
if exc_info:
exc = _format_exception(exc_info)
exc = self.format_exception(exc_info)
if exc:
print(exc, file=self._file)
@@ -661,8 +689,7 @@ class StackInfoRenderer:
involving an exception and works analogously to the *stack_info* argument
of the Python standard library logging.
Arguments:
Args:
additional_ignores:
By default, stack frames coming from *structlog* are ignored. With
this argument you can add additional names that are ignored, before
@@ -727,6 +754,42 @@ class CallsiteParameter(enum.Enum):
PROCESS_NAME = "process_name"
def _get_callsite_pathname(module: str, frame: FrameType) -> Any:
return frame.f_code.co_filename
def _get_callsite_filename(module: str, frame: FrameType) -> Any:
return os.path.basename(frame.f_code.co_filename)
def _get_callsite_module(module: str, frame: FrameType) -> Any:
return os.path.splitext(os.path.basename(frame.f_code.co_filename))[0]
def _get_callsite_func_name(module: str, frame: FrameType) -> Any:
return frame.f_code.co_name
def _get_callsite_lineno(module: str, frame: FrameType) -> Any:
return frame.f_lineno
def _get_callsite_thread(module: str, frame: FrameType) -> Any:
return threading.get_ident()
def _get_callsite_thread_name(module: str, frame: FrameType) -> Any:
return threading.current_thread().name
def _get_callsite_process(module: str, frame: FrameType) -> Any:
return os.getpid()
def _get_callsite_process_name(module: str, frame: FrameType) -> Any:
return get_processname()
class CallsiteParameterAdder:
"""
Adds parameters of the callsite that an event dictionary originated from to
@@ -734,10 +797,6 @@ class CallsiteParameterAdder:
dictionaries with information such as the function name, line number and
filename that an event dictionary originated from.
.. warning::
This processor cannot detect the correct callsite for invocation of
async functions.
If the event dictionary has an embedded `logging.LogRecord` object and did
not originate from *structlog* then the callsite information will be
determined from the `logging.LogRecord` object. For event dictionaries
@@ -749,8 +808,7 @@ class CallsiteParameterAdder:
The keys used for callsite parameters in the event dictionary are the
string values of `CallsiteParameter` enum members.
Arguments:
Args:
parameters:
A collection of `CallsiteParameter` values that should be added to
the event dictionary.
@@ -773,35 +831,17 @@ class CallsiteParameterAdder:
"""
_handlers: ClassVar[
dict[CallsiteParameter, Callable[[str, inspect.Traceback], Any]]
dict[CallsiteParameter, Callable[[str, FrameType], Any]]
] = {
CallsiteParameter.PATHNAME: (
lambda module, frame_info: frame_info.filename
),
CallsiteParameter.FILENAME: (
lambda module, frame_info: os.path.basename(frame_info.filename)
),
CallsiteParameter.MODULE: (
lambda module, frame_info: os.path.splitext(
os.path.basename(frame_info.filename)
)[0]
),
CallsiteParameter.FUNC_NAME: (
lambda module, frame_info: frame_info.function
),
CallsiteParameter.LINENO: (
lambda module, frame_info: frame_info.lineno
),
CallsiteParameter.THREAD: (
lambda module, frame_info: threading.get_ident()
),
CallsiteParameter.THREAD_NAME: (
lambda module, frame_info: threading.current_thread().name
),
CallsiteParameter.PROCESS: (lambda module, frame_info: os.getpid()),
CallsiteParameter.PROCESS_NAME: (
lambda module, frame_info: get_processname()
),
CallsiteParameter.PATHNAME: _get_callsite_pathname,
CallsiteParameter.FILENAME: _get_callsite_filename,
CallsiteParameter.MODULE: _get_callsite_module,
CallsiteParameter.FUNC_NAME: _get_callsite_func_name,
CallsiteParameter.LINENO: _get_callsite_lineno,
CallsiteParameter.THREAD: _get_callsite_thread,
CallsiteParameter.THREAD_NAME: _get_callsite_thread_name,
CallsiteParameter.PROCESS: _get_callsite_process,
CallsiteParameter.PROCESS_NAME: _get_callsite_process_name,
}
_record_attribute_map: ClassVar[dict[CallsiteParameter, str]] = {
CallsiteParameter.PATHNAME: "pathname",
@@ -835,7 +875,7 @@ class CallsiteParameterAdder:
# module should not be logging using structlog.
self._additional_ignores = ["logging", *additional_ignores]
self._active_handlers: list[
tuple[CallsiteParameter, Callable[[str, inspect.Traceback], Any]]
tuple[CallsiteParameter, Callable[[str, FrameType], Any]]
] = []
self._record_mappings: list[CallsiteParameterAdder._RecordMapping] = []
for parameter in parameters:
@@ -865,9 +905,8 @@ class CallsiteParameterAdder:
frame, module = _find_first_app_frame_and_name(
additional_ignores=self._additional_ignores
)
frame_info = inspect.getframeinfo(frame)
for parameter, handler in self._active_handlers:
event_dict[parameter.value] = handler(module, frame_info)
event_dict[parameter.value] = handler(module, frame)
return event_dict
@@ -884,8 +923,7 @@ class EventRenamer:
some processors may rely on the presence and meaning of the ``event``
key.
Arguments:
Args:
to: Rename ``event_dict["event"]`` to ``event_dict[to]``
replace_by:

View File

@@ -16,32 +16,48 @@ import contextvars
import functools
import logging
import sys
import warnings
from functools import partial
from typing import Any, Callable, Collection, Iterable, Sequence
from typing import Any, Callable, Collection, Dict, Iterable, Sequence, cast
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
from . import _config
from ._base import BoundLoggerBase
from ._frames import _find_first_app_frame_and_name, _format_stack
from ._log_levels import _LEVEL_TO_NAME, _NAME_TO_LEVEL, add_log_level
from .contextvars import merge_contextvars
from ._log_levels import LEVEL_TO_NAME, NAME_TO_LEVEL, add_log_level
from .contextvars import _ASYNC_CALLING_STACK, merge_contextvars
from .exceptions import DropEvent
from .processors import StackInfoRenderer
from .typing import Context, EventDict, ExcInfo, Processor, WrappedLogger
from .typing import (
Context,
EventDict,
ExcInfo,
Processor,
ProcessorReturnValue,
WrappedLogger,
)
__all__ = [
"add_log_level_number",
"add_log_level",
"add_logger_name",
"ExtraAdder",
"BoundLogger",
"filter_by_level",
"get_logger",
"ExtraAdder",
"LoggerFactory",
"PositionalArgumentsFormatter",
"ProcessorFormatter",
"add_log_level",
"add_log_level_number",
"add_logger_name",
"filter_by_level",
"get_logger",
"recreate_defaults",
"render_to_log_args_and_kwargs",
"render_to_log_kwargs",
]
@@ -55,8 +71,7 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
As with vanilla defaults, the backwards-compatibility guarantees don't
apply to the settings applied here.
Arguments:
Args:
log_level:
If `None`, don't configure standard library logging **at all**.
@@ -67,6 +82,8 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
configure it yourself.
.. versionadded:: 22.1.0
.. versionchanged:: 23.3.0 Added `add_logger_name`.
.. versionchanged:: 25.1.0 Added `PositionalArgumentsFormatter`.
"""
if log_level is not None:
kw = {"force": True}
@@ -81,8 +98,10 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
_config.reset_defaults()
_config.configure(
processors=[
PositionalArgumentsFormatter(), # handled by native loggers
merge_contextvars,
add_log_level,
add_logger_name,
StackInfoRenderer(),
_config._BUILTIN_DEFAULT_PROCESSORS[-2], # TimeStamper
_config._BUILTIN_DEFAULT_PROCESSORS[-1], # ConsoleRenderer
@@ -135,43 +154,46 @@ class BoundLogger(BoundLoggerBase):
.. versionadded:: 23.1.0
Async variants `alog()`, `adebug()`, `ainfo()`, and so forth.
.. versionchanged:: 24.2.0
Callsite parameters are now also collected by
`structlog.processors.CallsiteParameterAdder` for async log methods.
"""
_logger: logging.Logger
def bind(self, **new_values: Any) -> BoundLogger:
def bind(self, **new_values: Any) -> Self:
"""
Return a new logger with *new_values* added to the existing ones.
"""
return super().bind(**new_values) # type: ignore[return-value]
return super().bind(**new_values)
def unbind(self, *keys: str) -> BoundLogger:
def unbind(self, *keys: str) -> Self:
"""
Return a new logger with *keys* removed from the context.
Raises:
KeyError: If the key is not part of the context.
"""
return super().unbind(*keys) # type: ignore[return-value]
return super().unbind(*keys)
def try_unbind(self, *keys: str) -> BoundLogger:
def try_unbind(self, *keys: str) -> Self:
"""
Like :meth:`unbind`, but best effort: missing keys are ignored.
.. versionadded:: 18.2.0
"""
return super().try_unbind(*keys) # type: ignore[return-value]
return super().try_unbind(*keys)
def new(self, **new_values: Any) -> BoundLogger:
def new(self, **new_values: Any) -> Self:
"""
Clear context and binds *initial_values* using `bind`.
Only necessary with dict implementations that keep global state like
those wrapped by `structlog.threadlocal.wrap_dict` when threads
are re-used.
are reused.
"""
return super().new(**new_values) # type: ignore[return-value]
return super().new(**new_values)
def debug(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
@@ -205,16 +227,21 @@ class BoundLogger(BoundLoggerBase):
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def fatal(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.critical` with the result.
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def exception(
self, event: str | None = None, *args: Any, **kw: Any
) -> Any:
"""
Process event and call `logging.Logger.error` with the result,
after setting ``exc_info`` to `True`.
Process event and call `logging.Logger.exception` with the result,
after setting ``exc_info`` to `True` if it's not already set.
"""
kw.setdefault("exc_info", True)
return self.error(event, *args, **kw)
return self._proxy_to_logger("exception", event, *args, **kw)
def log(
self, level: int, event: str | None = None, *args: Any, **kw: Any
@@ -223,9 +250,7 @@ class BoundLogger(BoundLoggerBase):
Process *event* and call the appropriate logging method depending on
*level*.
"""
return self._proxy_to_logger(_LEVEL_TO_NAME[level], event, *args, **kw)
fatal = critical
return self._proxy_to_logger(LEVEL_TO_NAME[level], event, *args, **kw)
def _proxy_to_logger(
self,
@@ -386,12 +411,16 @@ class BoundLogger(BoundLoggerBase):
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
try:
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
async def adebug(self, event: str, *args: Any, **kw: Any) -> None:
"""
@@ -433,7 +462,13 @@ class BoundLogger(BoundLoggerBase):
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
afatal = acritical
async def afatal(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `critical()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
async def aexception(self, event: str, *args: Any, **kw: Any) -> None:
"""
@@ -492,9 +527,11 @@ class AsyncBoundLogger:
.. versionchanged:: 20.2.0 fix _dispatch_to_sync contextvars usage
.. deprecated:: 23.1.0
Use the regular `BoundLogger` with its a-prefixed methods instead.
.. versionchanged:: 23.3.0
Callsite parameters are now also collected for async log methods.
"""
__slots__ = ("sync_bl", "_loop")
__slots__ = ("_loop", "sync_bl")
#: The wrapped synchronous logger. It is useful to be able to log
#: synchronously occasionally.
@@ -588,12 +625,16 @@ class AsyncBoundLogger:
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
await asyncio.get_running_loop().run_in_executor(
self._executor,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
try:
await asyncio.get_running_loop().run_in_executor(
self._executor,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
async def debug(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.debug, event, args, kw)
@@ -604,7 +645,8 @@ class AsyncBoundLogger:
async def warning(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
warn = warning
async def warn(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
async def error(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.error, event, args, kw)
@@ -612,7 +654,8 @@ class AsyncBoundLogger:
async def critical(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
fatal = critical
async def fatal(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
async def exception(self, event: str, *args: Any, **kw: Any) -> None:
# To make `log.exception("foo") work, we have to check if the user
@@ -642,8 +685,7 @@ class LoggerFactory:
>>> from structlog.stdlib import LoggerFactory
>>> configure(logger_factory=LoggerFactory())
Arguments:
Args:
ignore_frame_names:
When guessing the name of a logger, skip frames whose names *start*
with one of these. For example, in pyramid applications you'll
@@ -745,7 +787,12 @@ def filter_by_level(
...
DropEvent
"""
if logger.isEnabledFor(_NAME_TO_LEVEL[method_name]):
if (
# We can't use logger.isEnabledFor() because it's always disabled when
# a log entry is in flight on Python 3.14 and later,
not logger.disabled
and NAME_TO_LEVEL[method_name] >= logger.getEffectiveLevel()
):
return event_dict
raise DropEvent
@@ -769,7 +816,7 @@ def add_log_level_number(
.. versionadded:: 18.2.0
"""
event_dict["level_number"] = _NAME_TO_LEVEL[method_name]
event_dict["level_number"] = NAME_TO_LEVEL[method_name]
return event_dict
@@ -801,8 +848,7 @@ class ExtraAdder:
This processor can be used for adding data passed in the ``extra``
parameter of the `logging` module's log methods to the event dictionary.
Arguments:
Args:
allow:
An optional collection of attributes that, if present in
`logging.LogRecord` objects, will be copied to event dictionaries.
@@ -854,28 +900,66 @@ class ExtraAdder:
event_dict[key] = record.__dict__[key]
LOG_KWARG_NAMES = ("exc_info", "stack_info", "stacklevel")
def render_to_log_args_and_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> tuple[tuple[Any, ...], dict[str, Any]]:
"""
Render ``event_dict`` into positional and keyword arguments for
`logging.Logger` logging methods.
See `logging.Logger.debug` method for keyword arguments reference.
The ``event`` field is passed in the first positional argument, positional
arguments from ``positional_args`` field are passed in subsequent positional
arguments, keyword arguments are extracted from the *event_dict* and the
rest of the *event_dict* is added as ``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 25.1.0
"""
args = (event_dict.pop("event"), *event_dict.pop("positional_args", ()))
kwargs = {
kwarg_name: event_dict.pop(kwarg_name)
for kwarg_name in LOG_KWARG_NAMES
if kwarg_name in event_dict
}
if event_dict:
kwargs["extra"] = event_dict
return args, kwargs
def render_to_log_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> EventDict:
"""
Render ``event_dict`` into keyword arguments for `logging.log`.
Render ``event_dict`` into keyword arguments for `logging.Logger` logging
methods.
See `logging.Logger.debug` method for keyword arguments reference.
The ``event`` field is translated into ``msg`` and the rest of the
*event_dict* is added as ``extra``.
The ``event`` field is translated into ``msg``, keyword arguments are
extracted from the *event_dict* and the rest of the *event_dict* is added as
``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 17.1.0
.. versionchanged:: 22.1.0
``exc_info``, ``stack_info``, and ``stackLevel`` are passed as proper
``exc_info``, ``stack_info``, and ``stacklevel`` are passed as proper
kwargs and not put into ``extra``.
.. versionchanged:: 24.2.0
``stackLevel`` corrected to ``stacklevel``.
"""
return {
"msg": event_dict.pop("event"),
"extra": event_dict,
**{
kw: event_dict.pop(kw)
for kw in ("exc_info", "stack_info", "stackLevel")
for kw in LOG_KWARG_NAMES
if kw in event_dict
},
}
@@ -893,8 +977,7 @@ class ProcessorFormatter(logging.Formatter):
Please refer to :ref:`processor-formatter` for examples.
Arguments:
Args:
foreign_pre_chain:
If not `None`, it is used as a processor chain that is applied to
**non**-*structlog* log entries before the event dictionary is
@@ -951,8 +1034,11 @@ class ProcessorFormatter(logging.Formatter):
This parameter exists for historic reasons. Please use *processors*
instead.
Raises:
use_get_message:
If True, use ``record.getMessage`` to get a fully rendered log
message, otherwise use ``str(record.msg)``. (default: True)
Raises:
TypeError: If both or neither *processor* and *processors* are passed.
.. versionadded:: 17.1.0
@@ -963,6 +1049,7 @@ class ProcessorFormatter(logging.Formatter):
.. deprecated:: 21.3.0
*processor* (singular) in favor of *processors* (plural). Removal is not
planned.
.. versionadded:: 23.3.0 *use_get_message*
"""
def __init__(
@@ -974,6 +1061,7 @@ class ProcessorFormatter(logging.Formatter):
keep_stack_info: bool = False,
logger: logging.Logger | None = None,
pass_foreign_args: bool = False,
use_get_message: bool = True,
*args: Any,
**kwargs: Any,
) -> None:
@@ -981,7 +1069,10 @@ class ProcessorFormatter(logging.Formatter):
super().__init__(*args, fmt=fmt, **kwargs) # type: ignore[misc]
if processor and processors:
msg = "The `processor` and `processors` arguments are mutually exclusive."
msg = (
"The `processor` and `processors` arguments are mutually"
" exclusive."
)
raise TypeError(msg)
self.processors: Sequence[Processor]
@@ -998,6 +1089,7 @@ class ProcessorFormatter(logging.Formatter):
self.keep_stack_info = keep_stack_info
self.logger = logger
self.pass_foreign_args = pass_foreign_args
self.use_get_message = use_get_message
def format(self, record: logging.LogRecord) -> str:
"""
@@ -1013,23 +1105,28 @@ class ProcessorFormatter(logging.Formatter):
logger = getattr(record, "_logger", _SENTINEL)
meth_name = getattr(record, "_name", "__structlog_sentinel__")
ed: ProcessorReturnValue
if logger is not _SENTINEL and meth_name != "__structlog_sentinel__":
# Both attached by wrap_for_formatter
if self.logger is not None:
logger = self.logger
meth_name = record._name # type: ignore[attr-defined]
meth_name = cast(str, record._name) # type:ignore[attr-defined]
# We need to copy because it's possible that the same record gets
# processed by multiple logging formatters. LogRecord.getMessage
# processed by multiple logging formatters. LogRecord.getMessage
# would transform our dict into a str.
ed = record.msg.copy() # type: ignore[union-attr]
ed = cast(Dict[str, Any], record.msg).copy()
ed["_record"] = record
ed["_from_structlog"] = True
else:
logger = self.logger
meth_name = record.levelname.lower()
ed = {
"event": record.getMessage(),
"event": (
record.getMessage()
if self.use_get_message
else str(record.msg)
),
"_record": record,
"_from_structlog": False,
}
@@ -1039,27 +1136,38 @@ class ProcessorFormatter(logging.Formatter):
record.args = ()
# Add stack-related attributes to event_dict and unset them
# on the record copy so that the base implementation wouldn't
# append stacktraces to the output.
# Add stack-related attributes to the event dict
if record.exc_info:
ed["exc_info"] = record.exc_info
if record.stack_info:
ed["stack_info"] = record.stack_info
if not self.keep_exc_info:
record.exc_text = None
record.exc_info = None
if not self.keep_stack_info:
record.stack_info = None
# Non-structlog allows to run through a chain to prepare it for the
# final processor (e.g. adding timestamps and log levels).
for proc in self.foreign_pre_chain or ():
ed = proc(logger, meth_name, ed)
ed = cast(EventDict, proc(logger, meth_name, ed))
# If required, unset stack-related attributes on the record copy so
# that the base implementation doesn't append stacktraces to the
# output.
if not self.keep_exc_info:
record.exc_text = None
record.exc_info = None
if not self.keep_stack_info:
record.stack_info = None
for p in self.processors:
ed = p(logger, meth_name, ed)
ed = p(logger, meth_name, ed) # type: ignore[arg-type]
if not isinstance(ed, str):
warnings.warn(
"The last processor in ProcessorFormatter.processors must "
f"return a string, but {self.processors[-1]} returned a "
f"{type(ed)} instead.",
category=RuntimeWarning,
stacklevel=1,
)
ed = cast(str, ed)
record.msg = ed
@@ -1072,11 +1180,12 @@ class ProcessorFormatter(logging.Formatter):
"""
Wrap *logger*, *name*, and *event_dict*.
The result is later unpacked by `ProcessorFormatter` when
formatting log entries.
The result is later unpacked by `ProcessorFormatter` when formatting
log entries.
Use this static method as the renderer (i.e. final processor) if you
want to use `ProcessorFormatter` in your `logging` configuration.
Use this static method as the renderer (in other words, final
processor) if you want to use `ProcessorFormatter` in your `logging`
configuration.
"""
return (event_dict,), {"extra": {"_logger": logger, "_name": name}}

View File

@@ -17,6 +17,7 @@ from contextlib import contextmanager
from typing import Any, Generator, NamedTuple, NoReturn
from ._config import configure, get_config
from ._log_levels import map_method_name
from .exceptions import DropEvent
from .typing import EventDict, WrappedLogger
@@ -41,6 +42,10 @@ class LogCapture:
:ivar List[structlog.typing.EventDict] entries: The captured log entries.
.. versionadded:: 20.1.0
.. versionchanged:: 24.3.0
Added mapping from "exception" to "error"
Added mapping from "warn" to "warning"
"""
entries: list[EventDict]
@@ -51,7 +56,7 @@ class LogCapture:
def __call__(
self, _: WrappedLogger, method_name: str, event_dict: EventDict
) -> NoReturn:
event_dict["log_level"] = method_name
event_dict["log_level"] = map_method_name(method_name)
self.entries.append(event_dict)
raise DropEvent
@@ -139,8 +144,7 @@ class CapturedCall(NamedTuple):
Can also be unpacked like a tuple.
Arguments:
Args:
method_name: The method name that got called.
args: A tuple of the positional arguments.
@@ -173,7 +177,7 @@ class CapturingLogger:
self.calls = []
def __repr__(self) -> str:
return f"<CapturingLogger with { len(self.calls) } call(s)>"
return f"<CapturingLogger with {len(self.calls)} call(s)>"
def __getattr__(self, name: str) -> Any:
"""
@@ -190,7 +194,7 @@ class CapturingLoggerFactory:
r"""
Produce and cache `CapturingLogger`\ s.
Each factory produces and re-uses only **one** logger.
Each factory produces and reuses only **one** logger.
You can access it via the ``logger`` attribute.
@@ -200,6 +204,7 @@ class CapturingLoggerFactory:
.. versionadded:: 20.2.0
"""
logger: CapturingLogger
def __init__(self) -> None:

View File

@@ -83,8 +83,7 @@ def wrap_dict(dict_class: type[Context]) -> type[Context]:
The wrapped class and used to keep global in the current thread.
Arguments:
Args:
dict_class: Class used for keeping context.
.. deprecated:: 22.1.0
@@ -106,20 +105,18 @@ def as_immutable(logger: TLLogger) -> TLLogger:
"""
Extract the context from a thread local logger into an immutable logger.
Arguments:
Args:
logger (structlog.typing.BindableLogger):
A logger with *possibly* thread local state.
Returns:
:class:`~structlog.BoundLogger` with an immutable context.
.. deprecated:: 22.1.0
"""
_deprecated()
if isinstance(logger, BoundLoggerLazyProxy):
logger = logger.bind() # type: ignore[assignment]
logger = logger.bind()
try:
ctx = logger._context._tl.dict_.__class__( # type: ignore[union-attr]
@@ -150,9 +147,12 @@ def tmp_bind(
.. deprecated:: 22.1.0
"""
_deprecated()
if isinstance(logger, BoundLoggerLazyProxy):
logger = logger.bind()
saved = as_immutable(logger)._context
try:
yield logger.bind(**tmp_values) # type: ignore[misc]
yield logger.bind(**tmp_values)
finally:
logger._context.clear()
logger._context.update(saved)

View File

@@ -6,7 +6,7 @@
"""
Extract a structured traceback from an exception.
`Contributed by Will McGugan
Based on work by Will McGugan
<https://github.com/hynek/structlog/pull/407#issuecomment-1150926246>`_ from
`rich.traceback
<https://github.com/Textualize/rich/blob/972dedff/rich/traceback.py>`_.
@@ -15,11 +15,20 @@ Extract a structured traceback from an exception.
from __future__ import annotations
import os
import os.path
import sys
from dataclasses import asdict, dataclass, field
from traceback import walk_tb
from types import TracebackType
from typing import Any, Tuple, Union
from types import ModuleType, TracebackType
from typing import Any, Iterable, Sequence, Tuple, Union
try:
import rich
import rich.pretty
except ImportError:
rich = None # type: ignore[assignment]
from .typing import ExcInfo
@@ -37,6 +46,7 @@ __all__ = [
SHOW_LOCALS = True
LOCALS_MAX_LENGTH = 10
LOCALS_MAX_STRING = 80
MAX_FRAMES = 50
@@ -52,7 +62,6 @@ class Frame:
filename: str
lineno: int
name: str
line: str = ""
locals: dict[str, str] | None = None
@@ -73,13 +82,22 @@ class SyntaxError_: # noqa: N801
class Stack:
"""
Represents an exception and a list of stack frames.
.. versionchanged:: 25.2.0
Added the *exc_notes* field.
.. versionchanged:: 25.4.0
Added the *is_group* and *exceptions* fields.
"""
exc_type: str
exc_value: str
exc_notes: list[str] = field(default_factory=list)
syntax_error: SyntaxError_ | None = None
is_cause: bool = False
frames: list[Frame] = field(default_factory=list)
is_group: bool = False
exceptions: list[Trace] = field(default_factory=list)
@dataclass
@@ -92,27 +110,72 @@ class Trace:
def safe_str(_object: Any) -> str:
"""Don't allow exceptions from __str__ to propegate."""
"""Don't allow exceptions from __str__ to propagate."""
try:
return str(_object)
except Exception as error: # noqa: BLE001
return f"<str-error {str(error)!r}>"
def to_repr(obj: Any, max_string: int | None = None) -> str:
"""Get repr string for an object, but catch errors."""
if isinstance(obj, str):
obj_repr = obj
def to_repr(
obj: Any,
max_length: int | None = None,
max_string: int | None = None,
use_rich: bool = True,
) -> str:
"""
Get repr string for an object, but catch errors.
:func:`repr()` is used for strings, too, so that secret wrappers that
inherit from :func:`str` and overwrite ``__repr__()`` are handled correctly
(i.e. secrets are not logged in plain text).
Args:
obj: Object to get a string representation for.
max_length: Maximum length of containers before abbreviating, or
``None`` for no abbreviation.
max_string: Maximum length of string before truncating, or ``None`` to
disable truncating.
use_rich: If ``True`` (the default), use rich_ to compute the repr.
If ``False`` or if rich_ is not installed, fall back to a simpler
algorithm.
Returns:
The string representation of *obj*.
.. versionchanged:: 24.3.0
Added *max_length* argument. Use :program:`rich` to render locals if it
is available. Call :func:`repr()` on strings in fallback
implementation.
"""
if use_rich and rich is not None:
# Let rich render the repr if it is available.
# It produces much better results for containers and dataclasses/attrs.
obj_repr = rich.pretty.traverse(
obj, max_length=max_length, max_string=max_string
).render()
else:
# Generate a (truncated) repr if rich is not available.
# Handle str/bytes differently to get better results for truncated
# representations. Also catch all errors, similarly to "safe_str()".
try:
obj_repr = repr(obj)
if isinstance(obj, (str, bytes)):
if max_string is not None and len(obj) > max_string:
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
obj_repr = repr(obj)
else:
obj_repr = repr(obj)
if max_string is not None and len(obj_repr) > max_string:
truncated = len(obj_repr) - max_string
obj_repr = f"{obj_repr[:max_string]!r}+{truncated}"
except Exception as error: # noqa: BLE001
obj_repr = f"<repr-error {str(error)!r}>"
if max_string is not None and len(obj_repr) > max_string:
truncated = len(obj_repr) - max_string
obj_repr = f"{obj_repr[:max_string]!r}+{truncated}"
return obj_repr
@@ -122,13 +185,16 @@ def extract(
traceback: TracebackType | None,
*,
show_locals: bool = False,
locals_max_length: int = LOCALS_MAX_LENGTH,
locals_max_string: int = LOCALS_MAX_STRING,
locals_hide_dunder: bool = True,
locals_hide_sunder: bool = False,
use_rich: bool = True,
) -> Trace:
"""
Extract traceback information.
Arguments:
Args:
exc_type: Exception type.
exc_value: Exception value.
@@ -137,15 +203,38 @@ def extract(
show_locals: Enable display of local variables. Defaults to False.
locals_max_string:
Maximum length of string before truncating, or ``None`` to disable.
locals_max_length:
Maximum length of containers before abbreviating, or ``None`` for
no abbreviation.
max_frames: Maximum number of frames in each stack
locals_max_string:
Maximum length of string before truncating, or ``None`` to disable
truncating.
locals_hide_dunder:
Hide locals prefixed with double underscore.
Defaults to True.
locals_hide_sunder:
Hide locals prefixed with single underscore.
This implies hiding *locals_hide_dunder*.
Defaults to False.
use_rich: If ``True`` (the default), use rich_ to compute the repr.
If ``False`` or if rich_ is not installed, fall back to a simpler
algorithm.
Returns:
A Trace instance with structured information about all exceptions.
.. versionadded:: 22.1.0
.. versionchanged:: 24.3.0
Added *locals_max_length*, *locals_hide_sunder*, *locals_hide_dunder*
and *use_rich* arguments.
.. versionchanged:: 25.4.0
Handle exception groups.
"""
stacks: list[Stack] = []
@@ -155,9 +244,30 @@ def extract(
stack = Stack(
exc_type=safe_str(exc_type.__name__),
exc_value=safe_str(exc_value),
exc_notes=[
safe_str(note) for note in getattr(exc_value, "__notes__", ())
],
is_cause=is_cause,
)
if sys.version_info >= (3, 11):
if isinstance(exc_value, (BaseExceptionGroup, ExceptionGroup)): # noqa: F821
stack.is_group = True
for exception in exc_value.exceptions:
stack.exceptions.append(
extract(
type(exception),
exception,
exception.__traceback__,
show_locals=show_locals,
locals_max_length=locals_max_length,
locals_max_string=locals_max_string,
locals_hide_dunder=locals_hide_dunder,
locals_hide_sunder=locals_hide_sunder,
use_rich=use_rich,
)
)
if isinstance(exc_value, SyntaxError):
stack.syntax_error = SyntaxError_(
offset=exc_value.offset or 0,
@@ -170,20 +280,47 @@ def extract(
stacks.append(stack)
append = stack.frames.append # pylint: disable=no-member
def get_locals(
iter_locals: Iterable[tuple[str, object]],
) -> Iterable[tuple[str, object]]:
"""Extract locals from an iterator of key pairs."""
if not (locals_hide_dunder or locals_hide_sunder):
yield from iter_locals
return
for key, value in iter_locals:
if locals_hide_dunder and key.startswith("__"):
continue
if locals_hide_sunder and key.startswith("_"):
continue
yield key, value
for frame_summary, line_no in walk_tb(traceback):
filename = frame_summary.f_code.co_filename
if filename and not filename.startswith("<"):
filename = os.path.abspath(filename)
# Rich has this, but we are not rich and like to keep all frames:
# if frame_summary.f_locals.get("_rich_traceback_omit", False):
# continue # noqa: ERA001
frame = Frame(
filename=filename or "?",
lineno=line_no,
name=frame_summary.f_code.co_name,
locals={
key: to_repr(value, max_string=locals_max_string)
for key, value in frame_summary.f_locals.items()
}
if show_locals
else None,
locals=(
{
key: to_repr(
value,
max_length=locals_max_length,
max_string=locals_max_string,
use_rich=use_rich,
)
for key, value in get_locals(
frame_summary.f_locals.items()
)
}
if show_locals
else None
),
)
append(frame)
@@ -220,15 +357,31 @@ class ExceptionDictTransformer:
These dictionaries are based on :class:`Stack` instances generated by
:func:`extract()` and can be dumped to JSON.
Arguments:
Args:
show_locals:
Whether or not to include the values of a stack frame's local
variables.
locals_max_length:
Maximum length of containers before abbreviating, or ``None`` for
no abbreviation.
locals_max_string:
The maximum length after which long string representations are
truncated.
Maximum length of string before truncating, or ``None`` to disable
truncating.
locals_hide_dunder:
Hide locals prefixed with double underscore.
Defaults to True.
locals_hide_sunder:
Hide locals prefixed with single underscore.
This implies hiding *locals_hide_dunder*.
Defaults to False.
suppress:
Optional sequence of modules or paths for which to suppress the
display of locals even if *show_locals* is ``True``.
max_frames:
Maximum number of frames in each stack. Frames are removed from
@@ -237,32 +390,78 @@ class ExceptionDictTransformer:
the exception actually happened. With larger web frameworks, this
does not always work, so you should stick with the default.
use_rich: If ``True`` (the default), use rich_ to compute the repr of
locals. If ``False`` or if rich_ is not installed, fall back to
a simpler algorithm.
.. seealso::
:doc:`exceptions` for a broader explanation of *structlog*'s exception
features.
.. versionchanged:: 24.3.0
Added *locals_max_length*, *locals_hide_sunder*, *locals_hide_dunder*,
*suppress* and *use_rich* arguments.
.. versionchanged:: 25.1.0
*locals_max_length* and *locals_max_string* may be None to disable
truncation.
.. versionchanged:: 25.4.0
Handle exception groups.
"""
def __init__(
self,
show_locals: bool = True,
*,
show_locals: bool = SHOW_LOCALS,
locals_max_length: int = LOCALS_MAX_LENGTH,
locals_max_string: int = LOCALS_MAX_STRING,
locals_hide_dunder: bool = True,
locals_hide_sunder: bool = False,
suppress: Iterable[str | ModuleType] = (),
max_frames: int = MAX_FRAMES,
use_rich: bool = True,
) -> None:
if locals_max_string < 0:
if locals_max_length is not None and locals_max_length < 0:
msg = f'"locals_max_length" must be >= 0: {locals_max_length}'
raise ValueError(msg)
if locals_max_string is not None and locals_max_string < 0:
msg = f'"locals_max_string" must be >= 0: {locals_max_string}'
raise ValueError(msg)
if max_frames < 2:
msg = f'"max_frames" must be >= 2: {max_frames}'
raise ValueError(msg)
self.show_locals = show_locals
self.locals_max_length = locals_max_length
self.locals_max_string = locals_max_string
self.locals_hide_dunder = locals_hide_dunder
self.locals_hide_sunder = locals_hide_sunder
self.suppress: Sequence[str] = []
for suppress_entity in suppress:
if not isinstance(suppress_entity, str):
if suppress_entity.__file__ is None:
msg = (
f'"suppress" item {suppress_entity!r} must be a '
f"module with '__file__' attribute"
)
raise ValueError(msg)
path = os.path.dirname(suppress_entity.__file__)
else:
path = suppress_entity
path = os.path.normpath(os.path.abspath(path))
self.suppress.append(path)
self.max_frames = max_frames
self.use_rich = use_rich
def __call__(self, exc_info: ExcInfo) -> list[dict[str, Any]]:
trace = extract(
*exc_info,
show_locals=self.show_locals,
locals_max_length=self.locals_max_length,
locals_max_string=self.locals_max_string,
locals_hide_dunder=self.locals_hide_dunder,
locals_hide_sunder=self.locals_hide_sunder,
use_rich=self.use_rich,
)
for stack in trace.stacks:
@@ -283,4 +482,21 @@ class ExceptionDictTransformer:
*stack.frames[-half:],
]
return [asdict(stack) for stack in trace.stacks]
return self._as_dict(trace)
def _as_dict(self, trace: Trace) -> list[dict[str, Any]]:
stack_dicts = []
for stack in trace.stacks:
stack_dict = asdict(stack)
for frame_dict in stack_dict["frames"]:
if frame_dict["locals"] is None or any(
frame_dict["filename"].startswith(path)
for path in self.suppress
):
del frame_dict["locals"]
if stack.is_group:
stack_dict["exceptions"] = [
self._as_dict(t) for t in stack.exceptions
]
stack_dicts.append(stack_dict)
return stack_dicts

View File

@@ -24,7 +24,6 @@ from zope.interface import implementer
from ._base import BoundLoggerBase
from ._config import _BUILTIN_DEFAULT_PROCESSORS
from ._utils import until_not_interrupted
from .processors import JSONRenderer as GenericJSONRenderer
from .typing import EventDict, WrappedLogger
@@ -204,8 +203,7 @@ class PlainFileLogObserver:
Great to just print JSON to stdout where you catch it with something like
runit.
Arguments:
Args:
file: File to print to.
.. versionadded:: 0.2.0
@@ -216,12 +214,11 @@ class PlainFileLogObserver:
self._flush = file.flush
def __call__(self, eventDict: EventDict) -> None:
until_not_interrupted(
self._write,
self._write(
textFromEventDict(eventDict) # type: ignore[arg-type, operator]
+ "\n",
)
until_not_interrupted(self._flush)
self._flush()
@implementer(ILogObserver)
@@ -229,8 +226,7 @@ class JSONLogObserverWrapper:
"""
Wrap a log *observer* and render non-`JSONRenderer` entries to JSON.
Arguments:
Args:
observer (ILogObserver):
Twisted log observer to wrap. For example
:class:`PlainFileObserver` or Twisted's stock `FileLogObserver
@@ -293,8 +289,7 @@ class EventAdapter:
<https://docs.twisted.org/en/stable/api/twisted.python.log.html#err>`_
behave as expected.
Arguments:
Args:
dictRenderer:
Renderer that is used for the actual log message. Please note that
structlog comes with a dedicated `JSONRenderer`.
@@ -306,8 +301,9 @@ class EventAdapter:
def __init__(
self,
dictRenderer: Callable[[WrappedLogger, str, EventDict], str]
| None = None,
dictRenderer: (
Callable[[WrappedLogger, str, EventDict], str] | None
) = None,
) -> None:
self._dictRenderer = dictRenderer or _BUILTIN_DEFAULT_PROCESSORS[-1]

View File

@@ -26,13 +26,13 @@ from .typing import (
__all__ = (
"WrappedLogger",
"BindableLogger",
"Context",
"EventDict",
"Processor",
"ExcInfo",
"ExceptionRenderer",
"ExceptionTransformer",
"BindableLogger",
"FilteringBoundLogger",
"Processor",
"WrappedLogger",
)

View File

@@ -14,6 +14,8 @@ probably change to something more elegant.
from __future__ import annotations
import sys
from types import TracebackType
from typing import (
Any,
@@ -31,6 +33,12 @@ from typing import (
)
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
WrappedLogger = Any
"""
A logger that is wrapped by a bound logger and is ultimately responsible for
@@ -60,11 +68,15 @@ copy itself.
.. versionadded:: 20.2.0
"""
Processor = Callable[
[WrappedLogger, str, EventDict],
Union[Mapping[str, Any], str, bytes, bytearray, Tuple[Any, ...]],
ProcessorReturnValue = Union[
Mapping[str, Any], str, bytes, bytearray, Tuple[Any, ...]
]
"""
A value returned by a processor.
"""
Processor = Callable[[WrappedLogger, str, EventDict], ProcessorReturnValue]
"""
A callable that is part of the processor chain.
See :doc:`processors`.
@@ -102,20 +114,17 @@ class ExceptionTransformer(Protocol):
Used by `structlog.processors.format_exc_info()` and
`structlog.processors.ExceptionPrettyPrinter`.
Arguments:
Args:
exc_info: Is the exception tuple to format
Returns:
Anything that can be rendered by the last processor in your chain,
for example, a string or a JSON-serializable structure.
Anything that can be rendered by the last processor in your chain, for
example, a string or a JSON-serializable structure.
.. versionadded:: 22.1.0
"""
def __call__(self, exc_info: ExcInfo) -> Any:
...
def __call__(self, exc_info: ExcInfo) -> Any: ...
@runtime_checkable
@@ -129,17 +138,13 @@ class BindableLogger(Protocol):
_context: Context
def bind(self, **new_values: Any) -> BindableLogger:
...
def bind(self, **new_values: Any) -> Self: ...
def unbind(self, *keys: str) -> BindableLogger:
...
def unbind(self, *keys: str) -> Self: ...
def try_unbind(self, *keys: str) -> BindableLogger:
...
def try_unbind(self, *keys: str) -> Self: ...
def new(self, **new_values: Any) -> BindableLogger:
...
def new(self, **new_values: Any) -> Self: ...
class FilteringBoundLogger(BindableLogger, Protocol):
@@ -185,6 +190,20 @@ class FilteringBoundLogger(BindableLogger, Protocol):
.. versionadded:: 22.1.0
"""
def is_enabled_for(self, level: int) -> bool:
"""
Check whether the logger is enabled for *level*.
.. versionadded:: 25.1.0
"""
def get_effective_level(self) -> int:
"""
Return the effective level of the logger.
.. versionadded:: 25.1.0
"""
def debug(self, event: str, *args: Any, **kw: Any) -> Any:
"""
Log ``event % args`` with **kw** at **debug** level.