main commit
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-16 16:30:25 +09:00
parent 91c7e04474
commit 537e7b363f
1146 changed files with 45926 additions and 77196 deletions

View File

@@ -16,48 +16,32 @@ import contextvars
import functools
import logging
import sys
import warnings
from functools import partial
from typing import Any, Callable, Collection, Dict, Iterable, Sequence, cast
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
from typing import Any, Callable, Collection, Iterable, Sequence
from . import _config
from ._base import BoundLoggerBase
from ._frames import _find_first_app_frame_and_name, _format_stack
from ._log_levels import LEVEL_TO_NAME, NAME_TO_LEVEL, add_log_level
from .contextvars import _ASYNC_CALLING_STACK, merge_contextvars
from ._log_levels import _LEVEL_TO_NAME, _NAME_TO_LEVEL, add_log_level
from .contextvars import merge_contextvars
from .exceptions import DropEvent
from .processors import StackInfoRenderer
from .typing import (
Context,
EventDict,
ExcInfo,
Processor,
ProcessorReturnValue,
WrappedLogger,
)
from .typing import Context, EventDict, ExcInfo, Processor, WrappedLogger
__all__ = [
"BoundLogger",
"add_log_level_number",
"add_log_level",
"add_logger_name",
"ExtraAdder",
"BoundLogger",
"filter_by_level",
"get_logger",
"LoggerFactory",
"PositionalArgumentsFormatter",
"ProcessorFormatter",
"add_log_level",
"add_log_level_number",
"add_logger_name",
"filter_by_level",
"get_logger",
"recreate_defaults",
"render_to_log_args_and_kwargs",
"render_to_log_kwargs",
]
@@ -71,7 +55,8 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
As with vanilla defaults, the backwards-compatibility guarantees don't
apply to the settings applied here.
Args:
Arguments:
log_level:
If `None`, don't configure standard library logging **at all**.
@@ -82,8 +67,6 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
configure it yourself.
.. versionadded:: 22.1.0
.. versionchanged:: 23.3.0 Added `add_logger_name`.
.. versionchanged:: 25.1.0 Added `PositionalArgumentsFormatter`.
"""
if log_level is not None:
kw = {"force": True}
@@ -98,10 +81,8 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
_config.reset_defaults()
_config.configure(
processors=[
PositionalArgumentsFormatter(), # handled by native loggers
merge_contextvars,
add_log_level,
add_logger_name,
StackInfoRenderer(),
_config._BUILTIN_DEFAULT_PROCESSORS[-2], # TimeStamper
_config._BUILTIN_DEFAULT_PROCESSORS[-1], # ConsoleRenderer
@@ -154,46 +135,43 @@ class BoundLogger(BoundLoggerBase):
.. versionadded:: 23.1.0
Async variants `alog()`, `adebug()`, `ainfo()`, and so forth.
.. versionchanged:: 24.2.0
Callsite parameters are now also collected by
`structlog.processors.CallsiteParameterAdder` for async log methods.
"""
_logger: logging.Logger
def bind(self, **new_values: Any) -> Self:
def bind(self, **new_values: Any) -> BoundLogger:
"""
Return a new logger with *new_values* added to the existing ones.
"""
return super().bind(**new_values)
return super().bind(**new_values) # type: ignore[return-value]
def unbind(self, *keys: str) -> Self:
def unbind(self, *keys: str) -> BoundLogger:
"""
Return a new logger with *keys* removed from the context.
Raises:
KeyError: If the key is not part of the context.
"""
return super().unbind(*keys)
return super().unbind(*keys) # type: ignore[return-value]
def try_unbind(self, *keys: str) -> Self:
def try_unbind(self, *keys: str) -> BoundLogger:
"""
Like :meth:`unbind`, but best effort: missing keys are ignored.
.. versionadded:: 18.2.0
"""
return super().try_unbind(*keys)
return super().try_unbind(*keys) # type: ignore[return-value]
def new(self, **new_values: Any) -> Self:
def new(self, **new_values: Any) -> BoundLogger:
"""
Clear context and binds *initial_values* using `bind`.
Only necessary with dict implementations that keep global state like
those wrapped by `structlog.threadlocal.wrap_dict` when threads
are reused.
are re-used.
"""
return super().new(**new_values)
return super().new(**new_values) # type: ignore[return-value]
def debug(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
@@ -227,21 +205,16 @@ class BoundLogger(BoundLoggerBase):
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def fatal(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.critical` with the result.
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def exception(
self, event: str | None = None, *args: Any, **kw: Any
) -> Any:
"""
Process event and call `logging.Logger.exception` with the result,
after setting ``exc_info`` to `True` if it's not already set.
Process event and call `logging.Logger.error` with the result,
after setting ``exc_info`` to `True`.
"""
kw.setdefault("exc_info", True)
return self._proxy_to_logger("exception", event, *args, **kw)
return self.error(event, *args, **kw)
def log(
self, level: int, event: str | None = None, *args: Any, **kw: Any
@@ -250,7 +223,9 @@ class BoundLogger(BoundLoggerBase):
Process *event* and call the appropriate logging method depending on
*level*.
"""
return self._proxy_to_logger(LEVEL_TO_NAME[level], event, *args, **kw)
return self._proxy_to_logger(_LEVEL_TO_NAME[level], event, *args, **kw)
fatal = critical
def _proxy_to_logger(
self,
@@ -411,16 +386,12 @@ class BoundLogger(BoundLoggerBase):
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
try:
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
async def adebug(self, event: str, *args: Any, **kw: Any) -> None:
"""
@@ -462,13 +433,7 @@ class BoundLogger(BoundLoggerBase):
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
async def afatal(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `critical()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
afatal = acritical
async def aexception(self, event: str, *args: Any, **kw: Any) -> None:
"""
@@ -527,11 +492,9 @@ class AsyncBoundLogger:
.. versionchanged:: 20.2.0 fix _dispatch_to_sync contextvars usage
.. deprecated:: 23.1.0
Use the regular `BoundLogger` with its a-prefixed methods instead.
.. versionchanged:: 23.3.0
Callsite parameters are now also collected for async log methods.
"""
__slots__ = ("_loop", "sync_bl")
__slots__ = ("sync_bl", "_loop")
#: The wrapped synchronous logger. It is useful to be able to log
#: synchronously occasionally.
@@ -625,16 +588,12 @@ class AsyncBoundLogger:
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
try:
await asyncio.get_running_loop().run_in_executor(
self._executor,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
await asyncio.get_running_loop().run_in_executor(
self._executor,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
async def debug(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.debug, event, args, kw)
@@ -645,8 +604,7 @@ class AsyncBoundLogger:
async def warning(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
async def warn(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
warn = warning
async def error(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.error, event, args, kw)
@@ -654,8 +612,7 @@ class AsyncBoundLogger:
async def critical(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
async def fatal(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
fatal = critical
async def exception(self, event: str, *args: Any, **kw: Any) -> None:
# To make `log.exception("foo") work, we have to check if the user
@@ -685,7 +642,8 @@ class LoggerFactory:
>>> from structlog.stdlib import LoggerFactory
>>> configure(logger_factory=LoggerFactory())
Args:
Arguments:
ignore_frame_names:
When guessing the name of a logger, skip frames whose names *start*
with one of these. For example, in pyramid applications you'll
@@ -787,12 +745,7 @@ def filter_by_level(
...
DropEvent
"""
if (
# We can't use logger.isEnabledFor() because it's always disabled when
# a log entry is in flight on Python 3.14 and later,
not logger.disabled
and NAME_TO_LEVEL[method_name] >= logger.getEffectiveLevel()
):
if logger.isEnabledFor(_NAME_TO_LEVEL[method_name]):
return event_dict
raise DropEvent
@@ -816,7 +769,7 @@ def add_log_level_number(
.. versionadded:: 18.2.0
"""
event_dict["level_number"] = NAME_TO_LEVEL[method_name]
event_dict["level_number"] = _NAME_TO_LEVEL[method_name]
return event_dict
@@ -848,7 +801,8 @@ class ExtraAdder:
This processor can be used for adding data passed in the ``extra``
parameter of the `logging` module's log methods to the event dictionary.
Args:
Arguments:
allow:
An optional collection of attributes that, if present in
`logging.LogRecord` objects, will be copied to event dictionaries.
@@ -900,66 +854,28 @@ class ExtraAdder:
event_dict[key] = record.__dict__[key]
LOG_KWARG_NAMES = ("exc_info", "stack_info", "stacklevel")
def render_to_log_args_and_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> tuple[tuple[Any, ...], dict[str, Any]]:
"""
Render ``event_dict`` into positional and keyword arguments for
`logging.Logger` logging methods.
See `logging.Logger.debug` method for keyword arguments reference.
The ``event`` field is passed in the first positional argument, positional
arguments from ``positional_args`` field are passed in subsequent positional
arguments, keyword arguments are extracted from the *event_dict* and the
rest of the *event_dict* is added as ``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 25.1.0
"""
args = (event_dict.pop("event"), *event_dict.pop("positional_args", ()))
kwargs = {
kwarg_name: event_dict.pop(kwarg_name)
for kwarg_name in LOG_KWARG_NAMES
if kwarg_name in event_dict
}
if event_dict:
kwargs["extra"] = event_dict
return args, kwargs
def render_to_log_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> EventDict:
"""
Render ``event_dict`` into keyword arguments for `logging.Logger` logging
methods.
See `logging.Logger.debug` method for keyword arguments reference.
Render ``event_dict`` into keyword arguments for `logging.log`.
The ``event`` field is translated into ``msg``, keyword arguments are
extracted from the *event_dict* and the rest of the *event_dict* is added as
``extra``.
The ``event`` field is translated into ``msg`` and the rest of the
*event_dict* is added as ``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 17.1.0
.. versionchanged:: 22.1.0
``exc_info``, ``stack_info``, and ``stacklevel`` are passed as proper
``exc_info``, ``stack_info``, and ``stackLevel`` are passed as proper
kwargs and not put into ``extra``.
.. versionchanged:: 24.2.0
``stackLevel`` corrected to ``stacklevel``.
"""
return {
"msg": event_dict.pop("event"),
"extra": event_dict,
**{
kw: event_dict.pop(kw)
for kw in LOG_KWARG_NAMES
for kw in ("exc_info", "stack_info", "stackLevel")
if kw in event_dict
},
}
@@ -977,7 +893,8 @@ class ProcessorFormatter(logging.Formatter):
Please refer to :ref:`processor-formatter` for examples.
Args:
Arguments:
foreign_pre_chain:
If not `None`, it is used as a processor chain that is applied to
**non**-*structlog* log entries before the event dictionary is
@@ -1034,11 +951,8 @@ class ProcessorFormatter(logging.Formatter):
This parameter exists for historic reasons. Please use *processors*
instead.
use_get_message:
If True, use ``record.getMessage`` to get a fully rendered log
message, otherwise use ``str(record.msg)``. (default: True)
Raises:
TypeError: If both or neither *processor* and *processors* are passed.
.. versionadded:: 17.1.0
@@ -1049,7 +963,6 @@ class ProcessorFormatter(logging.Formatter):
.. deprecated:: 21.3.0
*processor* (singular) in favor of *processors* (plural). Removal is not
planned.
.. versionadded:: 23.3.0 *use_get_message*
"""
def __init__(
@@ -1061,7 +974,6 @@ class ProcessorFormatter(logging.Formatter):
keep_stack_info: bool = False,
logger: logging.Logger | None = None,
pass_foreign_args: bool = False,
use_get_message: bool = True,
*args: Any,
**kwargs: Any,
) -> None:
@@ -1069,10 +981,7 @@ class ProcessorFormatter(logging.Formatter):
super().__init__(*args, fmt=fmt, **kwargs) # type: ignore[misc]
if processor and processors:
msg = (
"The `processor` and `processors` arguments are mutually"
" exclusive."
)
msg = "The `processor` and `processors` arguments are mutually exclusive."
raise TypeError(msg)
self.processors: Sequence[Processor]
@@ -1089,7 +998,6 @@ class ProcessorFormatter(logging.Formatter):
self.keep_stack_info = keep_stack_info
self.logger = logger
self.pass_foreign_args = pass_foreign_args
self.use_get_message = use_get_message
def format(self, record: logging.LogRecord) -> str:
"""
@@ -1105,28 +1013,23 @@ class ProcessorFormatter(logging.Formatter):
logger = getattr(record, "_logger", _SENTINEL)
meth_name = getattr(record, "_name", "__structlog_sentinel__")
ed: ProcessorReturnValue
if logger is not _SENTINEL and meth_name != "__structlog_sentinel__":
# Both attached by wrap_for_formatter
if self.logger is not None:
logger = self.logger
meth_name = cast(str, record._name) # type:ignore[attr-defined]
meth_name = record._name # type: ignore[attr-defined]
# We need to copy because it's possible that the same record gets
# processed by multiple logging formatters. LogRecord.getMessage
# processed by multiple logging formatters. LogRecord.getMessage
# would transform our dict into a str.
ed = cast(Dict[str, Any], record.msg).copy()
ed = record.msg.copy() # type: ignore[union-attr]
ed["_record"] = record
ed["_from_structlog"] = True
else:
logger = self.logger
meth_name = record.levelname.lower()
ed = {
"event": (
record.getMessage()
if self.use_get_message
else str(record.msg)
),
"event": record.getMessage(),
"_record": record,
"_from_structlog": False,
}
@@ -1136,38 +1039,27 @@ class ProcessorFormatter(logging.Formatter):
record.args = ()
# Add stack-related attributes to the event dict
# Add stack-related attributes to event_dict and unset them
# on the record copy so that the base implementation wouldn't
# append stacktraces to the output.
if record.exc_info:
ed["exc_info"] = record.exc_info
if record.stack_info:
ed["stack_info"] = record.stack_info
if not self.keep_exc_info:
record.exc_text = None
record.exc_info = None
if not self.keep_stack_info:
record.stack_info = None
# Non-structlog allows to run through a chain to prepare it for the
# final processor (e.g. adding timestamps and log levels).
for proc in self.foreign_pre_chain or ():
ed = cast(EventDict, proc(logger, meth_name, ed))
# If required, unset stack-related attributes on the record copy so
# that the base implementation doesn't append stacktraces to the
# output.
if not self.keep_exc_info:
record.exc_text = None
record.exc_info = None
if not self.keep_stack_info:
record.stack_info = None
ed = proc(logger, meth_name, ed)
for p in self.processors:
ed = p(logger, meth_name, ed) # type: ignore[arg-type]
if not isinstance(ed, str):
warnings.warn(
"The last processor in ProcessorFormatter.processors must "
f"return a string, but {self.processors[-1]} returned a "
f"{type(ed)} instead.",
category=RuntimeWarning,
stacklevel=1,
)
ed = cast(str, ed)
ed = p(logger, meth_name, ed)
record.msg = ed
@@ -1180,12 +1072,11 @@ class ProcessorFormatter(logging.Formatter):
"""
Wrap *logger*, *name*, and *event_dict*.
The result is later unpacked by `ProcessorFormatter` when formatting
log entries.
The result is later unpacked by `ProcessorFormatter` when
formatting log entries.
Use this static method as the renderer (in other words, final
processor) if you want to use `ProcessorFormatter` in your `logging`
configuration.
Use this static method as the renderer (i.e. final processor) if you
want to use `ProcessorFormatter` in your `logging` configuration.
"""
return (event_dict,), {"extra": {"_logger": logger, "_name": name}}