API refactor
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-07 16:25:52 +09:00
parent 76d0d86211
commit 91c7e04474
1171 changed files with 81940 additions and 44117 deletions

View File

@@ -16,32 +16,48 @@ import contextvars
import functools
import logging
import sys
import warnings
from functools import partial
from typing import Any, Callable, Collection, Iterable, Sequence
from typing import Any, Callable, Collection, Dict, Iterable, Sequence, cast
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
from . import _config
from ._base import BoundLoggerBase
from ._frames import _find_first_app_frame_and_name, _format_stack
from ._log_levels import _LEVEL_TO_NAME, _NAME_TO_LEVEL, add_log_level
from .contextvars import merge_contextvars
from ._log_levels import LEVEL_TO_NAME, NAME_TO_LEVEL, add_log_level
from .contextvars import _ASYNC_CALLING_STACK, merge_contextvars
from .exceptions import DropEvent
from .processors import StackInfoRenderer
from .typing import Context, EventDict, ExcInfo, Processor, WrappedLogger
from .typing import (
Context,
EventDict,
ExcInfo,
Processor,
ProcessorReturnValue,
WrappedLogger,
)
__all__ = [
"add_log_level_number",
"add_log_level",
"add_logger_name",
"ExtraAdder",
"BoundLogger",
"filter_by_level",
"get_logger",
"ExtraAdder",
"LoggerFactory",
"PositionalArgumentsFormatter",
"ProcessorFormatter",
"add_log_level",
"add_log_level_number",
"add_logger_name",
"filter_by_level",
"get_logger",
"recreate_defaults",
"render_to_log_args_and_kwargs",
"render_to_log_kwargs",
]
@@ -55,8 +71,7 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
As with vanilla defaults, the backwards-compatibility guarantees don't
apply to the settings applied here.
Arguments:
Args:
log_level:
If `None`, don't configure standard library logging **at all**.
@@ -67,6 +82,8 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
configure it yourself.
.. versionadded:: 22.1.0
.. versionchanged:: 23.3.0 Added `add_logger_name`.
.. versionchanged:: 25.1.0 Added `PositionalArgumentsFormatter`.
"""
if log_level is not None:
kw = {"force": True}
@@ -81,8 +98,10 @@ def recreate_defaults(*, log_level: int | None = logging.NOTSET) -> None:
_config.reset_defaults()
_config.configure(
processors=[
PositionalArgumentsFormatter(), # handled by native loggers
merge_contextvars,
add_log_level,
add_logger_name,
StackInfoRenderer(),
_config._BUILTIN_DEFAULT_PROCESSORS[-2], # TimeStamper
_config._BUILTIN_DEFAULT_PROCESSORS[-1], # ConsoleRenderer
@@ -135,43 +154,46 @@ class BoundLogger(BoundLoggerBase):
.. versionadded:: 23.1.0
Async variants `alog()`, `adebug()`, `ainfo()`, and so forth.
.. versionchanged:: 24.2.0
Callsite parameters are now also collected by
`structlog.processors.CallsiteParameterAdder` for async log methods.
"""
_logger: logging.Logger
def bind(self, **new_values: Any) -> BoundLogger:
def bind(self, **new_values: Any) -> Self:
"""
Return a new logger with *new_values* added to the existing ones.
"""
return super().bind(**new_values) # type: ignore[return-value]
return super().bind(**new_values)
def unbind(self, *keys: str) -> BoundLogger:
def unbind(self, *keys: str) -> Self:
"""
Return a new logger with *keys* removed from the context.
Raises:
KeyError: If the key is not part of the context.
"""
return super().unbind(*keys) # type: ignore[return-value]
return super().unbind(*keys)
def try_unbind(self, *keys: str) -> BoundLogger:
def try_unbind(self, *keys: str) -> Self:
"""
Like :meth:`unbind`, but best effort: missing keys are ignored.
.. versionadded:: 18.2.0
"""
return super().try_unbind(*keys) # type: ignore[return-value]
return super().try_unbind(*keys)
def new(self, **new_values: Any) -> BoundLogger:
def new(self, **new_values: Any) -> Self:
"""
Clear context and binds *initial_values* using `bind`.
Only necessary with dict implementations that keep global state like
those wrapped by `structlog.threadlocal.wrap_dict` when threads
are re-used.
are reused.
"""
return super().new(**new_values) # type: ignore[return-value]
return super().new(**new_values)
def debug(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
@@ -205,16 +227,21 @@ class BoundLogger(BoundLoggerBase):
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def fatal(self, event: str | None = None, *args: Any, **kw: Any) -> Any:
"""
Process event and call `logging.Logger.critical` with the result.
"""
return self._proxy_to_logger("critical", event, *args, **kw)
def exception(
self, event: str | None = None, *args: Any, **kw: Any
) -> Any:
"""
Process event and call `logging.Logger.error` with the result,
after setting ``exc_info`` to `True`.
Process event and call `logging.Logger.exception` with the result,
after setting ``exc_info`` to `True` if it's not already set.
"""
kw.setdefault("exc_info", True)
return self.error(event, *args, **kw)
return self._proxy_to_logger("exception", event, *args, **kw)
def log(
self, level: int, event: str | None = None, *args: Any, **kw: Any
@@ -223,9 +250,7 @@ class BoundLogger(BoundLoggerBase):
Process *event* and call the appropriate logging method depending on
*level*.
"""
return self._proxy_to_logger(_LEVEL_TO_NAME[level], event, *args, **kw)
fatal = critical
return self._proxy_to_logger(LEVEL_TO_NAME[level], event, *args, **kw)
def _proxy_to_logger(
self,
@@ -386,12 +411,16 @@ class BoundLogger(BoundLoggerBase):
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
try:
await asyncio.get_running_loop().run_in_executor(
None,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
async def adebug(self, event: str, *args: Any, **kw: Any) -> None:
"""
@@ -433,7 +462,13 @@ class BoundLogger(BoundLoggerBase):
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
afatal = acritical
async def afatal(self, event: str, *args: Any, **kw: Any) -> None:
"""
Log using `critical()`, but asynchronously in a separate thread.
.. versionadded:: 23.1.0
"""
await self._dispatch_to_sync(self.critical, event, args, kw)
async def aexception(self, event: str, *args: Any, **kw: Any) -> None:
"""
@@ -492,9 +527,11 @@ class AsyncBoundLogger:
.. versionchanged:: 20.2.0 fix _dispatch_to_sync contextvars usage
.. deprecated:: 23.1.0
Use the regular `BoundLogger` with its a-prefixed methods instead.
.. versionchanged:: 23.3.0
Callsite parameters are now also collected for async log methods.
"""
__slots__ = ("sync_bl", "_loop")
__slots__ = ("_loop", "sync_bl")
#: The wrapped synchronous logger. It is useful to be able to log
#: synchronously occasionally.
@@ -588,12 +625,16 @@ class AsyncBoundLogger:
"""
Merge contextvars and log using the sync logger in a thread pool.
"""
scs_token = _ASYNC_CALLING_STACK.set(sys._getframe().f_back.f_back) # type: ignore[union-attr, arg-type, unused-ignore]
ctx = contextvars.copy_context()
await asyncio.get_running_loop().run_in_executor(
self._executor,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
try:
await asyncio.get_running_loop().run_in_executor(
self._executor,
lambda: ctx.run(lambda: meth(event, *args, **kw)),
)
finally:
_ASYNC_CALLING_STACK.reset(scs_token)
async def debug(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.debug, event, args, kw)
@@ -604,7 +645,8 @@ class AsyncBoundLogger:
async def warning(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
warn = warning
async def warn(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.warning, event, args, kw)
async def error(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.error, event, args, kw)
@@ -612,7 +654,8 @@ class AsyncBoundLogger:
async def critical(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
fatal = critical
async def fatal(self, event: str, *args: Any, **kw: Any) -> None:
await self._dispatch_to_sync(self.sync_bl.critical, event, args, kw)
async def exception(self, event: str, *args: Any, **kw: Any) -> None:
# To make `log.exception("foo") work, we have to check if the user
@@ -642,8 +685,7 @@ class LoggerFactory:
>>> from structlog.stdlib import LoggerFactory
>>> configure(logger_factory=LoggerFactory())
Arguments:
Args:
ignore_frame_names:
When guessing the name of a logger, skip frames whose names *start*
with one of these. For example, in pyramid applications you'll
@@ -745,7 +787,12 @@ def filter_by_level(
...
DropEvent
"""
if logger.isEnabledFor(_NAME_TO_LEVEL[method_name]):
if (
# We can't use logger.isEnabledFor() because it's always disabled when
# a log entry is in flight on Python 3.14 and later,
not logger.disabled
and NAME_TO_LEVEL[method_name] >= logger.getEffectiveLevel()
):
return event_dict
raise DropEvent
@@ -769,7 +816,7 @@ def add_log_level_number(
.. versionadded:: 18.2.0
"""
event_dict["level_number"] = _NAME_TO_LEVEL[method_name]
event_dict["level_number"] = NAME_TO_LEVEL[method_name]
return event_dict
@@ -801,8 +848,7 @@ class ExtraAdder:
This processor can be used for adding data passed in the ``extra``
parameter of the `logging` module's log methods to the event dictionary.
Arguments:
Args:
allow:
An optional collection of attributes that, if present in
`logging.LogRecord` objects, will be copied to event dictionaries.
@@ -854,28 +900,66 @@ class ExtraAdder:
event_dict[key] = record.__dict__[key]
LOG_KWARG_NAMES = ("exc_info", "stack_info", "stacklevel")
def render_to_log_args_and_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> tuple[tuple[Any, ...], dict[str, Any]]:
"""
Render ``event_dict`` into positional and keyword arguments for
`logging.Logger` logging methods.
See `logging.Logger.debug` method for keyword arguments reference.
The ``event`` field is passed in the first positional argument, positional
arguments from ``positional_args`` field are passed in subsequent positional
arguments, keyword arguments are extracted from the *event_dict* and the
rest of the *event_dict* is added as ``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 25.1.0
"""
args = (event_dict.pop("event"), *event_dict.pop("positional_args", ()))
kwargs = {
kwarg_name: event_dict.pop(kwarg_name)
for kwarg_name in LOG_KWARG_NAMES
if kwarg_name in event_dict
}
if event_dict:
kwargs["extra"] = event_dict
return args, kwargs
def render_to_log_kwargs(
_: logging.Logger, __: str, event_dict: EventDict
) -> EventDict:
"""
Render ``event_dict`` into keyword arguments for `logging.log`.
Render ``event_dict`` into keyword arguments for `logging.Logger` logging
methods.
See `logging.Logger.debug` method for keyword arguments reference.
The ``event`` field is translated into ``msg`` and the rest of the
*event_dict* is added as ``extra``.
The ``event`` field is translated into ``msg``, keyword arguments are
extracted from the *event_dict* and the rest of the *event_dict* is added as
``extra``.
This allows you to defer formatting to `logging`.
.. versionadded:: 17.1.0
.. versionchanged:: 22.1.0
``exc_info``, ``stack_info``, and ``stackLevel`` are passed as proper
``exc_info``, ``stack_info``, and ``stacklevel`` are passed as proper
kwargs and not put into ``extra``.
.. versionchanged:: 24.2.0
``stackLevel`` corrected to ``stacklevel``.
"""
return {
"msg": event_dict.pop("event"),
"extra": event_dict,
**{
kw: event_dict.pop(kw)
for kw in ("exc_info", "stack_info", "stackLevel")
for kw in LOG_KWARG_NAMES
if kw in event_dict
},
}
@@ -893,8 +977,7 @@ class ProcessorFormatter(logging.Formatter):
Please refer to :ref:`processor-formatter` for examples.
Arguments:
Args:
foreign_pre_chain:
If not `None`, it is used as a processor chain that is applied to
**non**-*structlog* log entries before the event dictionary is
@@ -951,8 +1034,11 @@ class ProcessorFormatter(logging.Formatter):
This parameter exists for historic reasons. Please use *processors*
instead.
Raises:
use_get_message:
If True, use ``record.getMessage`` to get a fully rendered log
message, otherwise use ``str(record.msg)``. (default: True)
Raises:
TypeError: If both or neither *processor* and *processors* are passed.
.. versionadded:: 17.1.0
@@ -963,6 +1049,7 @@ class ProcessorFormatter(logging.Formatter):
.. deprecated:: 21.3.0
*processor* (singular) in favor of *processors* (plural). Removal is not
planned.
.. versionadded:: 23.3.0 *use_get_message*
"""
def __init__(
@@ -974,6 +1061,7 @@ class ProcessorFormatter(logging.Formatter):
keep_stack_info: bool = False,
logger: logging.Logger | None = None,
pass_foreign_args: bool = False,
use_get_message: bool = True,
*args: Any,
**kwargs: Any,
) -> None:
@@ -981,7 +1069,10 @@ class ProcessorFormatter(logging.Formatter):
super().__init__(*args, fmt=fmt, **kwargs) # type: ignore[misc]
if processor and processors:
msg = "The `processor` and `processors` arguments are mutually exclusive."
msg = (
"The `processor` and `processors` arguments are mutually"
" exclusive."
)
raise TypeError(msg)
self.processors: Sequence[Processor]
@@ -998,6 +1089,7 @@ class ProcessorFormatter(logging.Formatter):
self.keep_stack_info = keep_stack_info
self.logger = logger
self.pass_foreign_args = pass_foreign_args
self.use_get_message = use_get_message
def format(self, record: logging.LogRecord) -> str:
"""
@@ -1013,23 +1105,28 @@ class ProcessorFormatter(logging.Formatter):
logger = getattr(record, "_logger", _SENTINEL)
meth_name = getattr(record, "_name", "__structlog_sentinel__")
ed: ProcessorReturnValue
if logger is not _SENTINEL and meth_name != "__structlog_sentinel__":
# Both attached by wrap_for_formatter
if self.logger is not None:
logger = self.logger
meth_name = record._name # type: ignore[attr-defined]
meth_name = cast(str, record._name) # type:ignore[attr-defined]
# We need to copy because it's possible that the same record gets
# processed by multiple logging formatters. LogRecord.getMessage
# processed by multiple logging formatters. LogRecord.getMessage
# would transform our dict into a str.
ed = record.msg.copy() # type: ignore[union-attr]
ed = cast(Dict[str, Any], record.msg).copy()
ed["_record"] = record
ed["_from_structlog"] = True
else:
logger = self.logger
meth_name = record.levelname.lower()
ed = {
"event": record.getMessage(),
"event": (
record.getMessage()
if self.use_get_message
else str(record.msg)
),
"_record": record,
"_from_structlog": False,
}
@@ -1039,27 +1136,38 @@ class ProcessorFormatter(logging.Formatter):
record.args = ()
# Add stack-related attributes to event_dict and unset them
# on the record copy so that the base implementation wouldn't
# append stacktraces to the output.
# Add stack-related attributes to the event dict
if record.exc_info:
ed["exc_info"] = record.exc_info
if record.stack_info:
ed["stack_info"] = record.stack_info
if not self.keep_exc_info:
record.exc_text = None
record.exc_info = None
if not self.keep_stack_info:
record.stack_info = None
# Non-structlog allows to run through a chain to prepare it for the
# final processor (e.g. adding timestamps and log levels).
for proc in self.foreign_pre_chain or ():
ed = proc(logger, meth_name, ed)
ed = cast(EventDict, proc(logger, meth_name, ed))
# If required, unset stack-related attributes on the record copy so
# that the base implementation doesn't append stacktraces to the
# output.
if not self.keep_exc_info:
record.exc_text = None
record.exc_info = None
if not self.keep_stack_info:
record.stack_info = None
for p in self.processors:
ed = p(logger, meth_name, ed)
ed = p(logger, meth_name, ed) # type: ignore[arg-type]
if not isinstance(ed, str):
warnings.warn(
"The last processor in ProcessorFormatter.processors must "
f"return a string, but {self.processors[-1]} returned a "
f"{type(ed)} instead.",
category=RuntimeWarning,
stacklevel=1,
)
ed = cast(str, ed)
record.msg = ed
@@ -1072,11 +1180,12 @@ class ProcessorFormatter(logging.Formatter):
"""
Wrap *logger*, *name*, and *event_dict*.
The result is later unpacked by `ProcessorFormatter` when
formatting log entries.
The result is later unpacked by `ProcessorFormatter` when formatting
log entries.
Use this static method as the renderer (i.e. final processor) if you
want to use `ProcessorFormatter` in your `logging` configuration.
Use this static method as the renderer (in other words, final
processor) if you want to use `ProcessorFormatter` in your `logging`
configuration.
"""
return (event_dict,), {"extra": {"_logger": logger, "_name": name}}