Major fixes and new features
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
40
venv/lib/python3.12/site-packages/coverage/__init__.py
Normal file
40
venv/lib/python3.12/site-packages/coverage/__init__.py
Normal file
@@ -0,0 +1,40 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
Code coverage measurement for Python.
|
||||
|
||||
Ned Batchelder
|
||||
https://coverage.readthedocs.io
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# isort: skip_file
|
||||
|
||||
# mypy's convention is that "import as" names are public from the module.
|
||||
# We import names as themselves to indicate that. Pylint sees it as pointless,
|
||||
# so disable its warning.
|
||||
# pylint: disable=useless-import-alias
|
||||
|
||||
from coverage.version import (
|
||||
__version__ as __version__,
|
||||
version_info as version_info,
|
||||
)
|
||||
|
||||
from coverage.control import (
|
||||
Coverage as Coverage,
|
||||
process_startup as process_startup,
|
||||
)
|
||||
from coverage.data import CoverageData as CoverageData
|
||||
from coverage.exceptions import CoverageException as CoverageException
|
||||
from coverage.plugin import (
|
||||
CodeRegion as CodeRegion,
|
||||
CoveragePlugin as CoveragePlugin,
|
||||
FileReporter as FileReporter,
|
||||
FileTracer as FileTracer,
|
||||
)
|
||||
|
||||
# Backward compatibility.
|
||||
coverage = Coverage
|
||||
12
venv/lib/python3.12/site-packages/coverage/__main__.py
Normal file
12
venv/lib/python3.12/site-packages/coverage/__main__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Coverage.py's main entry point."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
from coverage.cmdline import main
|
||||
|
||||
sys.exit(main())
|
||||
114
venv/lib/python3.12/site-packages/coverage/annotate.py
Normal file
114
venv/lib/python3.12/site-packages/coverage/annotate.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Source file annotation for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from coverage.files import flat_rootname
|
||||
from coverage.misc import ensure_dir, isolate_module
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class AnnotateReporter:
|
||||
"""Generate annotated source files showing line coverage.
|
||||
|
||||
This reporter creates annotated copies of the measured source files. Each
|
||||
.py file is copied as a .py,cover file, with a left-hand margin annotating
|
||||
each line::
|
||||
|
||||
> def h(x):
|
||||
- if 0: #pragma: no cover
|
||||
- pass
|
||||
> if x == 1:
|
||||
! a = 1
|
||||
> else:
|
||||
> a = 2
|
||||
|
||||
> h(2)
|
||||
|
||||
Executed lines use ">", lines not executed use "!", lines excluded from
|
||||
consideration use "-".
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
self.directory: str | None = None
|
||||
|
||||
blank_re = re.compile(r"\s*(#|$)")
|
||||
else_re = re.compile(r"\s*else\s*:\s*(#|$)")
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, directory: str | None = None) -> None:
|
||||
"""Run the report.
|
||||
|
||||
See `coverage.report()` for arguments.
|
||||
|
||||
"""
|
||||
self.directory = directory
|
||||
self.coverage.get_data()
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
self.annotate_file(fr, analysis)
|
||||
|
||||
def annotate_file(self, fr: FileReporter, analysis: Analysis) -> None:
|
||||
"""Annotate a single file.
|
||||
|
||||
`fr` is the FileReporter for the file to annotate.
|
||||
|
||||
"""
|
||||
statements = sorted(analysis.statements)
|
||||
missing = sorted(analysis.missing)
|
||||
excluded = sorted(analysis.excluded)
|
||||
|
||||
if self.directory:
|
||||
ensure_dir(self.directory)
|
||||
dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
|
||||
assert dest_file.endswith("_py")
|
||||
dest_file = dest_file[:-3] + ".py"
|
||||
else:
|
||||
dest_file = fr.filename
|
||||
dest_file += ",cover"
|
||||
|
||||
with open(dest_file, "w", encoding="utf-8") as dest:
|
||||
i = j = 0
|
||||
covered = True
|
||||
source = fr.source()
|
||||
for lineno, line in enumerate(source.splitlines(True), start=1):
|
||||
while i < len(statements) and statements[i] < lineno:
|
||||
i += 1
|
||||
while j < len(missing) and missing[j] < lineno:
|
||||
j += 1
|
||||
if i < len(statements) and statements[i] == lineno:
|
||||
covered = j >= len(missing) or missing[j] > lineno
|
||||
if self.blank_re.match(line):
|
||||
dest.write(" ")
|
||||
elif self.else_re.match(line):
|
||||
# Special logic for lines containing only "else:".
|
||||
if j >= len(missing):
|
||||
dest.write("> ")
|
||||
elif statements[i] == missing[j]:
|
||||
dest.write("! ")
|
||||
else:
|
||||
dest.write("> ")
|
||||
elif lineno in excluded:
|
||||
dest.write("- ")
|
||||
elif covered:
|
||||
dest.write("> ")
|
||||
else:
|
||||
dest.write("! ")
|
||||
|
||||
dest.write(line)
|
||||
185
venv/lib/python3.12/site-packages/coverage/bytecode.py
Normal file
185
venv/lib/python3.12/site-packages/coverage/bytecode.py
Normal file
@@ -0,0 +1,185 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Bytecode analysis for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import dis
|
||||
from collections.abc import Iterator
|
||||
from types import CodeType
|
||||
from typing import Iterable, Optional
|
||||
|
||||
from coverage.types import TArc, TOffset
|
||||
|
||||
|
||||
def code_objects(code: CodeType) -> Iterator[CodeType]:
|
||||
"""Iterate over all the code objects in `code`."""
|
||||
stack = [code]
|
||||
while stack:
|
||||
# We're going to return the code object on the stack, but first
|
||||
# push its children for later returning.
|
||||
code = stack.pop()
|
||||
for c in code.co_consts:
|
||||
if isinstance(c, CodeType):
|
||||
stack.append(c)
|
||||
yield code
|
||||
|
||||
|
||||
def op_set(*op_names: str) -> set[int]:
|
||||
"""Make a set of opcodes from instruction names.
|
||||
|
||||
The names might not exist in this version of Python, skip those if not.
|
||||
"""
|
||||
return {op for name in op_names if (op := dis.opmap.get(name))}
|
||||
|
||||
|
||||
# Opcodes that are unconditional jumps elsewhere.
|
||||
ALWAYS_JUMPS = op_set(
|
||||
"JUMP_BACKWARD",
|
||||
"JUMP_BACKWARD_NO_INTERRUPT",
|
||||
"JUMP_FORWARD",
|
||||
)
|
||||
|
||||
# Opcodes that exit from a function.
|
||||
RETURNS = op_set(
|
||||
"RETURN_VALUE",
|
||||
"RETURN_GENERATOR",
|
||||
)
|
||||
|
||||
# Opcodes that do nothing.
|
||||
NOPS = op_set(
|
||||
"NOP",
|
||||
"NOT_TAKEN",
|
||||
)
|
||||
|
||||
|
||||
class InstructionWalker:
|
||||
"""Utility to step through trails of instructions.
|
||||
|
||||
We have two reasons to need sequences of instructions from a code object:
|
||||
First, in strict sequence to visit all the instructions in the object.
|
||||
This is `walk(follow_jumps=False)`. Second, we want to follow jumps to
|
||||
understand how execution will flow: `walk(follow_jumps=True)`.
|
||||
"""
|
||||
|
||||
def __init__(self, code: CodeType) -> None:
|
||||
self.code = code
|
||||
self.insts: dict[TOffset, dis.Instruction] = {}
|
||||
|
||||
inst = None
|
||||
for inst in dis.get_instructions(code):
|
||||
self.insts[inst.offset] = inst
|
||||
|
||||
assert inst is not None
|
||||
self.max_offset = inst.offset
|
||||
|
||||
def walk(
|
||||
self, *, start_at: TOffset = 0, follow_jumps: bool = True
|
||||
) -> Iterable[dis.Instruction]:
|
||||
"""
|
||||
Yield instructions starting from `start_at`. Follow unconditional
|
||||
jumps if `follow_jumps` is true.
|
||||
"""
|
||||
seen = set()
|
||||
offset = start_at
|
||||
while offset < self.max_offset + 1:
|
||||
if offset in seen:
|
||||
break
|
||||
seen.add(offset)
|
||||
if inst := self.insts.get(offset):
|
||||
yield inst
|
||||
if follow_jumps and inst.opcode in ALWAYS_JUMPS:
|
||||
offset = inst.jump_target
|
||||
continue
|
||||
offset += 2
|
||||
|
||||
|
||||
TBranchTrailsOneSource = dict[Optional[TArc], set[TOffset]]
|
||||
TBranchTrails = dict[TOffset, TBranchTrailsOneSource]
|
||||
|
||||
|
||||
def branch_trails(code: CodeType) -> TBranchTrails:
|
||||
"""
|
||||
Calculate branch trails for `code`.
|
||||
|
||||
Instructions can have a jump_target, where they might jump to next. Some
|
||||
instructions with a jump_target are unconditional jumps (ALWAYS_JUMPS), so
|
||||
they aren't interesting to us, since they aren't the start of a branch
|
||||
possibility.
|
||||
|
||||
Instructions that might or might not jump somewhere else are branch
|
||||
possibilities. For each of those, we track a trail of instructions. These
|
||||
are lists of instruction offsets, the next instructions that can execute.
|
||||
We follow the trail until we get to a new source line. That gives us the
|
||||
arc from the original instruction's line to the new source line.
|
||||
|
||||
"""
|
||||
the_trails: TBranchTrails = collections.defaultdict(lambda: collections.defaultdict(set))
|
||||
iwalker = InstructionWalker(code)
|
||||
for inst in iwalker.walk(follow_jumps=False):
|
||||
if not inst.jump_target:
|
||||
# We only care about instructions with jump targets.
|
||||
continue
|
||||
if inst.opcode in ALWAYS_JUMPS:
|
||||
# We don't care about unconditional jumps.
|
||||
continue
|
||||
|
||||
from_line = inst.line_number
|
||||
if from_line is None:
|
||||
continue
|
||||
|
||||
def add_one_branch_trail(
|
||||
trails: TBranchTrailsOneSource,
|
||||
start_at: TOffset,
|
||||
) -> None:
|
||||
# pylint: disable=cell-var-from-loop
|
||||
inst_offsets: set[TOffset] = set()
|
||||
to_line = None
|
||||
for inst2 in iwalker.walk(start_at=start_at, follow_jumps=True):
|
||||
inst_offsets.add(inst2.offset)
|
||||
if inst2.line_number and inst2.line_number != from_line:
|
||||
to_line = inst2.line_number
|
||||
break
|
||||
elif inst2.jump_target and (inst2.opcode not in ALWAYS_JUMPS):
|
||||
break
|
||||
elif inst2.opcode in RETURNS:
|
||||
to_line = -code.co_firstlineno
|
||||
break
|
||||
if to_line is not None:
|
||||
trails[(from_line, to_line)].update(inst_offsets)
|
||||
else:
|
||||
trails[None] = set()
|
||||
|
||||
# Calculate two trails: one from the next instruction, and one from the
|
||||
# jump_target instruction.
|
||||
trails: TBranchTrailsOneSource = collections.defaultdict(set)
|
||||
add_one_branch_trail(trails, start_at=inst.offset + 2)
|
||||
add_one_branch_trail(trails, start_at=inst.jump_target)
|
||||
the_trails[inst.offset] = trails
|
||||
|
||||
# Sometimes we get BRANCH_RIGHT or BRANCH_LEFT events from instructions
|
||||
# other than the original jump possibility instruction. Register each
|
||||
# trail under all of their offsets so we can pick up in the middle of a
|
||||
# trail if need be.
|
||||
for arc, offsets in trails.items():
|
||||
for offset in offsets:
|
||||
the_trails[offset][arc].update(offsets)
|
||||
|
||||
return the_trails
|
||||
|
||||
|
||||
def always_jumps(code: CodeType) -> dict[TOffset, TOffset]:
|
||||
"""Make a map of unconditional bytecodes jumping to others.
|
||||
|
||||
Only include bytecodes that do no work and go to another bytecode.
|
||||
"""
|
||||
jumps = {}
|
||||
iwalker = InstructionWalker(code)
|
||||
for inst in iwalker.walk(follow_jumps=False):
|
||||
if inst.opcode in ALWAYS_JUMPS:
|
||||
jumps[inst.offset] = inst.jump_target
|
||||
elif inst.opcode in NOPS:
|
||||
jumps[inst.offset] = inst.offset + 2
|
||||
return jumps
|
||||
1178
venv/lib/python3.12/site-packages/coverage/cmdline.py
Normal file
1178
venv/lib/python3.12/site-packages/coverage/cmdline.py
Normal file
File diff suppressed because it is too large
Load Diff
501
venv/lib/python3.12/site-packages/coverage/collector.py
Normal file
501
venv/lib/python3.12/site-packages/coverage/collector.py
Normal file
@@ -0,0 +1,501 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Raw data collector for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import functools
|
||||
import os
|
||||
import sys
|
||||
from collections.abc import Mapping
|
||||
from types import FrameType
|
||||
from typing import Any, Callable, TypeVar, cast
|
||||
|
||||
from coverage import env
|
||||
from coverage.config import CoverageConfig
|
||||
from coverage.core import Core
|
||||
from coverage.data import CoverageData
|
||||
from coverage.debug import short_stack
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import human_sorted_items, isolate_module
|
||||
from coverage.plugin import CoveragePlugin
|
||||
from coverage.types import (
|
||||
TArc,
|
||||
TCheckIncludeFn,
|
||||
TFileDisposition,
|
||||
Tracer,
|
||||
TShouldStartContextFn,
|
||||
TShouldTraceFn,
|
||||
TTraceData,
|
||||
TTraceFn,
|
||||
TWarnFn,
|
||||
)
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class Collector:
|
||||
"""Collects trace data.
|
||||
|
||||
Creates a Tracer object for each thread, since they track stack
|
||||
information. Each Tracer points to the same shared data, contributing
|
||||
traced data points.
|
||||
|
||||
When the Collector is started, it creates a Tracer for the current thread,
|
||||
and installs a function to create Tracers for each new thread started.
|
||||
When the Collector is stopped, all active Tracers are stopped.
|
||||
|
||||
Threads started while the Collector is stopped will never have Tracers
|
||||
associated with them.
|
||||
|
||||
"""
|
||||
|
||||
# The stack of active Collectors. Collectors are added here when started,
|
||||
# and popped when stopped. Collectors on the stack are paused when not
|
||||
# the top, and resumed when they become the top again.
|
||||
_collectors: list[Collector] = []
|
||||
|
||||
# The concurrency settings we support here.
|
||||
LIGHT_THREADS = {"greenlet", "eventlet", "gevent"}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
core: Core,
|
||||
should_trace: TShouldTraceFn,
|
||||
check_include: TCheckIncludeFn,
|
||||
should_start_context: TShouldStartContextFn | None,
|
||||
file_mapper: Callable[[str], str],
|
||||
branch: bool,
|
||||
warn: TWarnFn,
|
||||
concurrency: list[str],
|
||||
) -> None:
|
||||
"""Create a collector.
|
||||
|
||||
`should_trace` is a function, taking a file name and a frame, and
|
||||
returning a `coverage.FileDisposition object`.
|
||||
|
||||
`check_include` is a function taking a file name and a frame. It returns
|
||||
a boolean: True if the file should be traced, False if not.
|
||||
|
||||
`should_start_context` is a function taking a frame, and returning a
|
||||
string. If the frame should be the start of a new context, the string
|
||||
is the new context. If the frame should not be the start of a new
|
||||
context, return None.
|
||||
|
||||
`file_mapper` is a function taking a filename, and returning a Unicode
|
||||
filename. The result is the name that will be recorded in the data
|
||||
file.
|
||||
|
||||
If `branch` is true, then branches will be measured. This involves
|
||||
collecting data on which statements followed each other (arcs). Use
|
||||
`get_arc_data` to get the arc data.
|
||||
|
||||
`warn` is a warning function, taking a single string message argument
|
||||
and an optional slug argument which will be a string or None, to be
|
||||
used if a warning needs to be issued.
|
||||
|
||||
`concurrency` is a list of strings indicating the concurrency libraries
|
||||
in use. Valid values are "greenlet", "eventlet", "gevent", or "thread"
|
||||
(the default). "thread" can be combined with one of the other three.
|
||||
Other values are ignored.
|
||||
|
||||
"""
|
||||
self.core = core
|
||||
self.should_trace = should_trace
|
||||
self.check_include = check_include
|
||||
self.should_start_context = should_start_context
|
||||
self.file_mapper = file_mapper
|
||||
self.branch = branch
|
||||
self.warn = warn
|
||||
self.concurrency = concurrency
|
||||
assert isinstance(self.concurrency, list), f"Expected a list: {self.concurrency!r}"
|
||||
|
||||
self.pid = os.getpid()
|
||||
|
||||
self.covdata: CoverageData
|
||||
self.threading = None
|
||||
self.static_context: str | None = None
|
||||
|
||||
self.origin = short_stack()
|
||||
|
||||
self.concur_id_func = None
|
||||
|
||||
# We can handle a few concurrency options here, but only one at a time.
|
||||
concurrencies = set(self.concurrency)
|
||||
unknown = concurrencies - CoverageConfig.CONCURRENCY_CHOICES
|
||||
if unknown:
|
||||
show = ", ".join(sorted(unknown))
|
||||
raise ConfigError(f"Unknown concurrency choices: {show}")
|
||||
light_threads = concurrencies & self.LIGHT_THREADS
|
||||
if len(light_threads) > 1:
|
||||
show = ", ".join(sorted(light_threads))
|
||||
raise ConfigError(f"Conflicting concurrency settings: {show}")
|
||||
do_threading = False
|
||||
|
||||
tried = "nothing" # to satisfy pylint
|
||||
try:
|
||||
if "greenlet" in concurrencies:
|
||||
tried = "greenlet"
|
||||
import greenlet
|
||||
|
||||
self.concur_id_func = greenlet.getcurrent
|
||||
elif "eventlet" in concurrencies:
|
||||
tried = "eventlet"
|
||||
import eventlet.greenthread # pylint: disable=import-error,useless-suppression
|
||||
|
||||
self.concur_id_func = eventlet.greenthread.getcurrent
|
||||
elif "gevent" in concurrencies:
|
||||
tried = "gevent"
|
||||
import gevent # pylint: disable=import-error,useless-suppression
|
||||
|
||||
self.concur_id_func = gevent.getcurrent
|
||||
|
||||
if "thread" in concurrencies:
|
||||
do_threading = True
|
||||
except ImportError as ex:
|
||||
msg = f"Couldn't trace with concurrency={tried}, the module isn't installed."
|
||||
raise ConfigError(msg) from ex
|
||||
|
||||
if self.concur_id_func and not hasattr(core.tracer_class, "concur_id_func"):
|
||||
raise ConfigError(
|
||||
"Can't support concurrency={} with {}, only threads are supported.".format(
|
||||
tried,
|
||||
self.tracer_name(),
|
||||
),
|
||||
)
|
||||
|
||||
if do_threading or not concurrencies:
|
||||
# It's important to import threading only if we need it. If
|
||||
# it's imported early, and the program being measured uses
|
||||
# gevent, then gevent's monkey-patching won't work properly.
|
||||
import threading
|
||||
|
||||
self.threading = threading
|
||||
|
||||
self.reset()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<Collector at {id(self):#x}: {self.tracer_name()}>"
|
||||
|
||||
def use_data(self, covdata: CoverageData, context: str | None) -> None:
|
||||
"""Use `covdata` for recording data."""
|
||||
self.covdata = covdata
|
||||
self.static_context = context
|
||||
self.covdata.set_context(self.static_context)
|
||||
|
||||
def tracer_name(self) -> str:
|
||||
"""Return the class name of the tracer we're using."""
|
||||
return self.core.tracer_class.__name__
|
||||
|
||||
def _clear_data(self) -> None:
|
||||
"""Clear out existing data, but stay ready for more collection."""
|
||||
# We used to use self.data.clear(), but that would remove filename
|
||||
# keys and data values that were still in use higher up the stack
|
||||
# when we are called as part of switch_context.
|
||||
with self.data_lock or contextlib.nullcontext():
|
||||
for d in self.data.values():
|
||||
d.clear()
|
||||
|
||||
for tracer in self.tracers:
|
||||
tracer.reset_activity()
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Clear collected data, and prepare to collect more."""
|
||||
self.data_lock = self.threading.Lock() if self.threading else None
|
||||
|
||||
# The trace data we are collecting.
|
||||
self.data: TTraceData = {}
|
||||
|
||||
# A dictionary mapping file names to file tracer plugin names that will
|
||||
# handle them.
|
||||
self.file_tracers: dict[str, str] = {}
|
||||
|
||||
self.disabled_plugins: set[str] = set()
|
||||
|
||||
# The .should_trace_cache attribute is a cache from file names to
|
||||
# coverage.FileDisposition objects, or None. When a file is first
|
||||
# considered for tracing, a FileDisposition is obtained from
|
||||
# Coverage.should_trace. Its .trace attribute indicates whether the
|
||||
# file should be traced or not. If it should be, a plugin with dynamic
|
||||
# file names can decide not to trace it based on the dynamic file name
|
||||
# being excluded by the inclusion rules, in which case the
|
||||
# FileDisposition will be replaced by None in the cache.
|
||||
if env.PYPY:
|
||||
import __pypy__ # pylint: disable=import-error
|
||||
|
||||
# Alex Gaynor said:
|
||||
# should_trace_cache is a strictly growing key: once a key is in
|
||||
# it, it never changes. Further, the keys used to access it are
|
||||
# generally constant, given sufficient context. That is to say, at
|
||||
# any given point _trace() is called, pypy is able to know the key.
|
||||
# This is because the key is determined by the physical source code
|
||||
# line, and that's invariant with the call site.
|
||||
#
|
||||
# This property of a dict with immutable keys, combined with
|
||||
# call-site-constant keys is a match for PyPy's module dict,
|
||||
# which is optimized for such workloads.
|
||||
#
|
||||
# This gives a 20% benefit on the workload described at
|
||||
# https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
|
||||
self.should_trace_cache = __pypy__.newdict("module")
|
||||
else:
|
||||
self.should_trace_cache = {}
|
||||
|
||||
# Our active Tracers.
|
||||
self.tracers: list[Tracer] = []
|
||||
|
||||
self._clear_data()
|
||||
|
||||
def lock_data(self) -> None:
|
||||
"""Lock self.data_lock, for use by the C tracer."""
|
||||
if self.data_lock is not None:
|
||||
self.data_lock.acquire()
|
||||
|
||||
def unlock_data(self) -> None:
|
||||
"""Unlock self.data_lock, for use by the C tracer."""
|
||||
if self.data_lock is not None:
|
||||
self.data_lock.release()
|
||||
|
||||
def _start_tracer(self) -> TTraceFn | None:
|
||||
"""Start a new Tracer object, and store it in self.tracers."""
|
||||
tracer = self.core.tracer_class(**self.core.tracer_kwargs)
|
||||
tracer.data = self.data
|
||||
tracer.lock_data = self.lock_data
|
||||
tracer.unlock_data = self.unlock_data
|
||||
tracer.trace_arcs = self.branch
|
||||
tracer.should_trace = self.should_trace
|
||||
tracer.should_trace_cache = self.should_trace_cache
|
||||
tracer.warn = self.warn
|
||||
|
||||
if hasattr(tracer, "concur_id_func"):
|
||||
tracer.concur_id_func = self.concur_id_func
|
||||
if hasattr(tracer, "file_tracers"):
|
||||
tracer.file_tracers = self.file_tracers
|
||||
if hasattr(tracer, "threading"):
|
||||
tracer.threading = self.threading
|
||||
if hasattr(tracer, "check_include"):
|
||||
tracer.check_include = self.check_include
|
||||
if hasattr(tracer, "should_start_context"):
|
||||
tracer.should_start_context = self.should_start_context
|
||||
if hasattr(tracer, "switch_context"):
|
||||
tracer.switch_context = self.switch_context
|
||||
if hasattr(tracer, "disable_plugin"):
|
||||
tracer.disable_plugin = self.disable_plugin
|
||||
|
||||
fn = tracer.start()
|
||||
self.tracers.append(tracer)
|
||||
|
||||
return fn
|
||||
|
||||
# The trace function has to be set individually on each thread before
|
||||
# execution begins. Ironically, the only support the threading module has
|
||||
# for running code before the thread main is the tracing function. So we
|
||||
# install this as a trace function, and the first time it's called, it does
|
||||
# the real trace installation.
|
||||
#
|
||||
# New in 3.12: threading.settrace_all_threads: https://github.com/python/cpython/pull/96681
|
||||
|
||||
def _installation_trace(self, frame: FrameType, event: str, arg: Any) -> TTraceFn | None:
|
||||
"""Called on new threads, installs the real tracer."""
|
||||
# Remove ourselves as the trace function.
|
||||
sys.settrace(None)
|
||||
# Install the real tracer.
|
||||
fn: TTraceFn | None = self._start_tracer()
|
||||
# Invoke the real trace function with the current event, to be sure
|
||||
# not to lose an event.
|
||||
if fn:
|
||||
fn = fn(frame, event, arg)
|
||||
# Return the new trace function to continue tracing in this scope.
|
||||
return fn
|
||||
|
||||
def start(self) -> None:
|
||||
"""Start collecting trace information."""
|
||||
# We may be a new collector in a forked process. The old process'
|
||||
# collectors will be in self._collectors, but they won't be usable.
|
||||
# Find them and discard them.
|
||||
keep_collectors = []
|
||||
for c in self._collectors:
|
||||
if c.pid == self.pid:
|
||||
keep_collectors.append(c)
|
||||
else:
|
||||
c.post_fork()
|
||||
self._collectors[:] = keep_collectors
|
||||
|
||||
if self._collectors:
|
||||
self._collectors[-1].pause()
|
||||
|
||||
self.tracers = []
|
||||
|
||||
try:
|
||||
# Install the tracer on this thread.
|
||||
self._start_tracer()
|
||||
except:
|
||||
if self._collectors:
|
||||
self._collectors[-1].resume()
|
||||
raise
|
||||
|
||||
# If _start_tracer succeeded, then we add ourselves to the global
|
||||
# stack of collectors.
|
||||
self._collectors.append(self)
|
||||
|
||||
# Install our installation tracer in threading, to jump-start other
|
||||
# threads.
|
||||
if self.core.systrace and self.threading:
|
||||
self.threading.settrace(self._installation_trace)
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop collecting trace information."""
|
||||
assert self._collectors
|
||||
if self._collectors[-1] is not self:
|
||||
print("self._collectors:")
|
||||
for c in self._collectors:
|
||||
print(f" {c!r}\n{c.origin}")
|
||||
assert self._collectors[-1] is self, (
|
||||
f"Expected current collector to be {self!r}, but it's {self._collectors[-1]!r}"
|
||||
)
|
||||
|
||||
self.pause()
|
||||
|
||||
# Remove this Collector from the stack, and resume the one underneath (if any).
|
||||
self._collectors.pop()
|
||||
if self._collectors:
|
||||
self._collectors[-1].resume()
|
||||
|
||||
def pause(self) -> None:
|
||||
"""Pause tracing, but be prepared to `resume`."""
|
||||
for tracer in self.tracers:
|
||||
tracer.stop()
|
||||
stats = tracer.get_stats()
|
||||
if stats:
|
||||
print(f"\nCoverage.py {tracer.__class__.__name__} stats:")
|
||||
for k, v in human_sorted_items(stats.items()):
|
||||
print(f"{k:>20}: {v}")
|
||||
if self.threading:
|
||||
self.threading.settrace(None)
|
||||
|
||||
def resume(self) -> None:
|
||||
"""Resume tracing after a `pause`."""
|
||||
for tracer in self.tracers:
|
||||
tracer.start()
|
||||
if self.core.systrace:
|
||||
if self.threading:
|
||||
self.threading.settrace(self._installation_trace)
|
||||
else:
|
||||
self._start_tracer()
|
||||
|
||||
def post_fork(self) -> None:
|
||||
"""After a fork, tracers might need to adjust."""
|
||||
for tracer in self.tracers:
|
||||
if hasattr(tracer, "post_fork"):
|
||||
tracer.post_fork()
|
||||
|
||||
def _activity(self) -> bool:
|
||||
"""Has any activity been traced?
|
||||
|
||||
Returns a boolean, True if any trace function was invoked.
|
||||
|
||||
"""
|
||||
return any(tracer.activity() for tracer in self.tracers)
|
||||
|
||||
def switch_context(self, new_context: str | None) -> None:
|
||||
"""Switch to a new dynamic context."""
|
||||
context: str | None
|
||||
self.flush_data()
|
||||
if self.static_context:
|
||||
context = self.static_context
|
||||
if new_context:
|
||||
context += "|" + new_context
|
||||
else:
|
||||
context = new_context
|
||||
self.covdata.set_context(context)
|
||||
|
||||
def disable_plugin(self, disposition: TFileDisposition) -> None:
|
||||
"""Disable the plugin mentioned in `disposition`."""
|
||||
file_tracer = disposition.file_tracer
|
||||
assert file_tracer is not None
|
||||
plugin = file_tracer._coverage_plugin
|
||||
plugin_name = plugin._coverage_plugin_name
|
||||
self.warn(f"Disabling plug-in {plugin_name!r} due to previous exception")
|
||||
plugin._coverage_enabled = False
|
||||
disposition.trace = False
|
||||
|
||||
@functools.cache # pylint: disable=method-cache-max-size-none
|
||||
def cached_mapped_file(self, filename: str) -> str:
|
||||
"""A locally cached version of file names mapped through file_mapper."""
|
||||
return self.file_mapper(filename)
|
||||
|
||||
def mapped_file_dict(self, d: Mapping[str, T]) -> dict[str, T]:
|
||||
"""Return a dict like d, but with keys modified by file_mapper."""
|
||||
# The call to list(items()) ensures that the GIL protects the dictionary
|
||||
# iterator against concurrent modifications by tracers running
|
||||
# in other threads. We try three times in case of concurrent
|
||||
# access, hoping to get a clean copy.
|
||||
runtime_err = None
|
||||
for _ in range(3): # pragma: part covered
|
||||
try:
|
||||
items = list(d.items())
|
||||
except RuntimeError as ex: # pragma: cant happen
|
||||
runtime_err = ex
|
||||
else:
|
||||
break
|
||||
else: # pragma: cant happen
|
||||
assert isinstance(runtime_err, Exception)
|
||||
raise runtime_err
|
||||
|
||||
return {self.cached_mapped_file(k): v for k, v in items if v}
|
||||
|
||||
def plugin_was_disabled(self, plugin: CoveragePlugin) -> None:
|
||||
"""Record that `plugin` was disabled during the run."""
|
||||
self.disabled_plugins.add(plugin._coverage_plugin_name)
|
||||
|
||||
def flush_data(self) -> bool:
|
||||
"""Save the collected data to our associated `CoverageData`.
|
||||
|
||||
Data may have also been saved along the way. This forces the
|
||||
last of the data to be saved.
|
||||
|
||||
Returns True if there was data to save, False if not.
|
||||
"""
|
||||
if not self._activity():
|
||||
return False
|
||||
|
||||
if self.branch:
|
||||
if self.core.packed_arcs:
|
||||
# Unpack the line number pairs packed into integers. See
|
||||
# tracer.c:CTracer_record_pair for the C code that creates
|
||||
# these packed ints.
|
||||
arc_data: dict[str, list[TArc]] = {}
|
||||
packed_data = cast(dict[str, set[int]], self.data)
|
||||
|
||||
# The list() here and in the inner loop are to get a clean copy
|
||||
# even as tracers are continuing to add data.
|
||||
for fname, packeds in list(packed_data.items()):
|
||||
tuples = []
|
||||
for packed in list(packeds):
|
||||
l1 = packed & 0xFFFFF
|
||||
l2 = (packed & (0xFFFFF << 20)) >> 20
|
||||
if packed & (1 << 40):
|
||||
l1 *= -1
|
||||
if packed & (1 << 41):
|
||||
l2 *= -1
|
||||
tuples.append((l1, l2))
|
||||
arc_data[fname] = tuples
|
||||
else:
|
||||
arc_data = cast(dict[str, list[TArc]], self.data)
|
||||
self.covdata.add_arcs(self.mapped_file_dict(arc_data))
|
||||
else:
|
||||
line_data = cast(dict[str, set[int]], self.data)
|
||||
self.covdata.add_lines(self.mapped_file_dict(line_data))
|
||||
|
||||
file_tracers = {
|
||||
k: v for k, v in self.file_tracers.items() if v not in self.disabled_plugins
|
||||
}
|
||||
self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers))
|
||||
|
||||
self._clear_data()
|
||||
return True
|
||||
717
venv/lib/python3.12/site-packages/coverage/config.py
Normal file
717
venv/lib/python3.12/site-packages/coverage/config.py
Normal file
@@ -0,0 +1,717 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Config file for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import configparser
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, Callable, Final, Mapping, Union
|
||||
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import human_sorted_items, isolate_module, substitute_variables
|
||||
from coverage.tomlconfig import TomlConfigParser, TomlDecodeError
|
||||
from coverage.types import (
|
||||
TConfigSectionIn,
|
||||
TConfigSectionOut,
|
||||
TConfigurable,
|
||||
TConfigValueIn,
|
||||
TConfigValueOut,
|
||||
TPluginConfig,
|
||||
)
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class HandyConfigParser(configparser.ConfigParser):
|
||||
"""Our specialization of ConfigParser."""
|
||||
|
||||
def __init__(self, our_file: bool) -> None:
|
||||
"""Create the HandyConfigParser.
|
||||
|
||||
`our_file` is True if this config file is specifically for coverage,
|
||||
False if we are examining another config file (tox.ini, setup.cfg)
|
||||
for possible settings.
|
||||
"""
|
||||
|
||||
super().__init__(interpolation=None)
|
||||
self.section_prefixes = ["coverage:"]
|
||||
if our_file:
|
||||
self.section_prefixes.append("")
|
||||
|
||||
def read( # type: ignore[override]
|
||||
self,
|
||||
filenames: Iterable[str],
|
||||
encoding_unused: str | None = None,
|
||||
) -> list[str]:
|
||||
"""Read a file name as UTF-8 configuration data."""
|
||||
return super().read(filenames, encoding="utf-8")
|
||||
|
||||
def real_section(self, section: str) -> str | None:
|
||||
"""Get the actual name of a section."""
|
||||
for section_prefix in self.section_prefixes:
|
||||
real_section = section_prefix + section
|
||||
has = super().has_section(real_section)
|
||||
if has:
|
||||
return real_section
|
||||
return None
|
||||
|
||||
def has_option(self, section: str, option: str) -> bool: # type: ignore[override]
|
||||
real_section = self.real_section(section)
|
||||
if real_section is not None:
|
||||
return super().has_option(real_section, option)
|
||||
return False
|
||||
|
||||
def has_section(self, section: str) -> bool: # type: ignore[override]
|
||||
return bool(self.real_section(section))
|
||||
|
||||
def options(self, section: str) -> list[str]: # type: ignore[override]
|
||||
real_section = self.real_section(section)
|
||||
if real_section is not None:
|
||||
return super().options(real_section)
|
||||
raise ConfigError(f"No section: {section!r}")
|
||||
|
||||
def get_section(self, section: str) -> TConfigSectionOut:
|
||||
"""Get the contents of a section, as a dictionary."""
|
||||
d: dict[str, TConfigValueOut] = {}
|
||||
for opt in self.options(section):
|
||||
d[opt] = self.get(section, opt)
|
||||
return d
|
||||
|
||||
def get(self, section: str, option: str, *args: Any, **kwargs: Any) -> str: # type: ignore
|
||||
"""Get a value, replacing environment variables also.
|
||||
|
||||
The arguments are the same as `ConfigParser.get`, but in the found
|
||||
value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
|
||||
environment variable ``WORD``.
|
||||
|
||||
Returns the finished value.
|
||||
|
||||
"""
|
||||
for section_prefix in self.section_prefixes:
|
||||
real_section = section_prefix + section
|
||||
if super().has_option(real_section, option):
|
||||
break
|
||||
else:
|
||||
raise ConfigError(f"No option {option!r} in section: {section!r}")
|
||||
|
||||
v: str = super().get(real_section, option, *args, **kwargs)
|
||||
v = substitute_variables(v, os.environ)
|
||||
return v
|
||||
|
||||
def getfile(self, section: str, option: str) -> str:
|
||||
"""Fix up a file path setting."""
|
||||
path = self.get(section, option)
|
||||
return process_file_value(path)
|
||||
|
||||
def getlist(self, section: str, option: str) -> list[str]:
|
||||
"""Read a list of strings.
|
||||
|
||||
The value of `section` and `option` is treated as a comma- and newline-
|
||||
separated list of strings. Each value is stripped of white space.
|
||||
|
||||
Returns the list of strings.
|
||||
|
||||
"""
|
||||
value_list = self.get(section, option)
|
||||
values = []
|
||||
for value_line in value_list.split("\n"):
|
||||
for value in value_line.split(","):
|
||||
value = value.strip()
|
||||
if value:
|
||||
values.append(value)
|
||||
return values
|
||||
|
||||
def getregexlist(self, section: str, option: str) -> list[str]:
|
||||
"""Read a list of full-line regexes.
|
||||
|
||||
The value of `section` and `option` is treated as a newline-separated
|
||||
list of regexes. Each value is stripped of white space.
|
||||
|
||||
Returns the list of strings.
|
||||
|
||||
"""
|
||||
line_list = self.get(section, option)
|
||||
return process_regexlist(section, option, line_list.splitlines())
|
||||
|
||||
|
||||
TConfigParser = Union[HandyConfigParser, TomlConfigParser]
|
||||
|
||||
|
||||
# The default line exclusion regexes.
|
||||
DEFAULT_EXCLUDE = [
|
||||
r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)",
|
||||
r"^\s*(((async )?def .*?)?\)(\s*->.*?)?:\s*)?\.\.\.\s*(#|$)",
|
||||
r"if (typing\.)?TYPE_CHECKING:",
|
||||
]
|
||||
|
||||
# The default partial branch regexes, to be modified by the user.
|
||||
DEFAULT_PARTIAL = [
|
||||
r"#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)",
|
||||
]
|
||||
|
||||
# The default partial branch regexes, based on Python semantics.
|
||||
# These are any Python branching constructs that can't actually execute all
|
||||
# their branches.
|
||||
DEFAULT_PARTIAL_ALWAYS = [
|
||||
"while (True|1|False|0):",
|
||||
"if (True|1|False|0):",
|
||||
]
|
||||
|
||||
|
||||
class CoverageConfig(TConfigurable, TPluginConfig):
|
||||
"""Coverage.py configuration.
|
||||
|
||||
The attributes of this class are the various settings that control the
|
||||
operation of coverage.py.
|
||||
|
||||
"""
|
||||
|
||||
# pylint: disable=too-many-instance-attributes
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize the configuration attributes to their defaults."""
|
||||
# Metadata about the config.
|
||||
# We tried to read these config files.
|
||||
self.config_files_attempted: list[str] = []
|
||||
# We did read these config files, but maybe didn't find any content for us.
|
||||
self.config_files_read: list[str] = []
|
||||
# The file that gave us our configuration.
|
||||
self.config_file: str | None = None
|
||||
self._config_contents: bytes | None = None
|
||||
|
||||
# Defaults for [run] and [report]
|
||||
self._include = None
|
||||
self._omit = None
|
||||
|
||||
# Defaults for [run]
|
||||
self.branch = False
|
||||
self.command_line: str | None = None
|
||||
self.concurrency: list[str] = []
|
||||
self.context: str | None = None
|
||||
self.core: str | None = None
|
||||
self.cover_pylib = False
|
||||
self.data_file = ".coverage"
|
||||
self.debug: list[str] = []
|
||||
self.debug_file: str | None = None
|
||||
self.disable_warnings: list[str] = []
|
||||
self.dynamic_context: str | None = None
|
||||
self.parallel = False
|
||||
self.patch: list[str] = []
|
||||
self.plugins: list[str] = []
|
||||
self.relative_files = False
|
||||
self.run_include: list[str] = []
|
||||
self.run_omit: list[str] = []
|
||||
self.sigterm = False
|
||||
self.source: list[str] | None = None
|
||||
self.source_pkgs: list[str] = []
|
||||
self.source_dirs: list[str] = []
|
||||
self.timid = False
|
||||
self._crash: str | None = None
|
||||
|
||||
# Defaults for [report]
|
||||
self.exclude_list = DEFAULT_EXCLUDE[:]
|
||||
self.exclude_also: list[str] = []
|
||||
self.fail_under = 0.0
|
||||
self.format: str | None = None
|
||||
self.ignore_errors = False
|
||||
self.include_namespace_packages = False
|
||||
self.report_include: list[str] | None = None
|
||||
self.report_omit: list[str] | None = None
|
||||
self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
|
||||
self.partial_list = DEFAULT_PARTIAL[:]
|
||||
self.partial_also: list[str] = []
|
||||
self.precision = 0
|
||||
self.report_contexts: list[str] | None = None
|
||||
self.show_missing = False
|
||||
self.skip_covered = False
|
||||
self.skip_empty = False
|
||||
self.sort: str | None = None
|
||||
|
||||
# Defaults for [html]
|
||||
self.extra_css: str | None = None
|
||||
self.html_dir = "htmlcov"
|
||||
self.html_skip_covered: bool | None = None
|
||||
self.html_skip_empty: bool | None = None
|
||||
self.html_title = "Coverage report"
|
||||
self.show_contexts = False
|
||||
|
||||
# Defaults for [xml]
|
||||
self.xml_output = "coverage.xml"
|
||||
self.xml_package_depth = 99
|
||||
|
||||
# Defaults for [json]
|
||||
self.json_output = "coverage.json"
|
||||
self.json_pretty_print = False
|
||||
self.json_show_contexts = False
|
||||
|
||||
# Defaults for [lcov]
|
||||
self.lcov_output = "coverage.lcov"
|
||||
self.lcov_line_checksums = False
|
||||
|
||||
# Defaults for [paths]
|
||||
self.paths: dict[str, list[str]] = {}
|
||||
|
||||
# Options for plugins
|
||||
self.plugin_options: dict[str, TConfigSectionOut] = {}
|
||||
|
||||
MUST_BE_LIST = {
|
||||
"debug",
|
||||
"concurrency",
|
||||
"plugins",
|
||||
"report_omit",
|
||||
"report_include",
|
||||
"run_omit",
|
||||
"run_include",
|
||||
"patch",
|
||||
}
|
||||
|
||||
# File paths to make absolute during serialization.
|
||||
# The pairs are (config_key, must_exist).
|
||||
SERIALIZE_ABSPATH = {
|
||||
("data_file", False),
|
||||
("debug_file", False),
|
||||
# `source` can be directories or modules, so don't abspath it if it
|
||||
# doesn't exist.
|
||||
("source", True),
|
||||
("source_dirs", False),
|
||||
}
|
||||
|
||||
def from_args(self, **kwargs: TConfigValueIn) -> None:
|
||||
"""Read config values from `kwargs`."""
|
||||
for k, v in kwargs.items():
|
||||
if v is not None:
|
||||
if k in self.MUST_BE_LIST and isinstance(v, str):
|
||||
v = [v]
|
||||
setattr(self, k, v)
|
||||
|
||||
def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool) -> bool:
|
||||
"""Read configuration from a .rc file.
|
||||
|
||||
`filename` is a file name to read.
|
||||
|
||||
`our_file` is True if this config file is specifically for coverage,
|
||||
False if we are examining another config file (tox.ini, setup.cfg)
|
||||
for possible settings.
|
||||
|
||||
Returns True or False, whether the file could be read, and it had some
|
||||
coverage.py settings in it.
|
||||
|
||||
"""
|
||||
_, ext = os.path.splitext(filename)
|
||||
cp: TConfigParser
|
||||
if ext == ".toml":
|
||||
cp = TomlConfigParser(our_file)
|
||||
else:
|
||||
cp = HandyConfigParser(our_file)
|
||||
|
||||
self.config_files_attempted.append(os.path.abspath(filename))
|
||||
|
||||
try:
|
||||
files_read = cp.read(filename)
|
||||
except (configparser.Error, TomlDecodeError) as err:
|
||||
raise ConfigError(f"Couldn't read config file {filename}: {err}") from err
|
||||
if not files_read:
|
||||
return False
|
||||
|
||||
self.config_files_read.extend(map(os.path.abspath, files_read))
|
||||
|
||||
any_set = False
|
||||
try:
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
was_set = self._set_attr_from_config_option(cp, *option_spec)
|
||||
if was_set:
|
||||
any_set = True
|
||||
except ValueError as err:
|
||||
raise ConfigError(f"Couldn't read config file {filename}: {err}") from err
|
||||
|
||||
# Check that there are no unrecognized options.
|
||||
all_options = collections.defaultdict(set)
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
section, option = option_spec[1].split(":")
|
||||
all_options[section].add(option)
|
||||
|
||||
for section, options in all_options.items():
|
||||
real_section = cp.real_section(section)
|
||||
if real_section:
|
||||
for unknown in set(cp.options(section)) - options:
|
||||
warn(
|
||||
"Unrecognized option '[{}] {}=' in config file {}".format(
|
||||
real_section,
|
||||
unknown,
|
||||
filename,
|
||||
),
|
||||
)
|
||||
|
||||
# [paths] is special
|
||||
if cp.has_section("paths"):
|
||||
for option in cp.options("paths"):
|
||||
self.paths[option] = cp.getlist("paths", option)
|
||||
any_set = True
|
||||
|
||||
# plugins can have options
|
||||
for plugin in self.plugins:
|
||||
if cp.has_section(plugin):
|
||||
self.plugin_options[plugin] = cp.get_section(plugin)
|
||||
any_set = True
|
||||
|
||||
# Was this file used as a config file? If it's specifically our file,
|
||||
# then it was used. If we're piggybacking on someone else's file,
|
||||
# then it was only used if we found some settings in it.
|
||||
if our_file:
|
||||
used = True
|
||||
else:
|
||||
used = any_set
|
||||
|
||||
if used:
|
||||
self.config_file = os.path.abspath(filename)
|
||||
with open(filename, "rb") as f:
|
||||
self._config_contents = f.read()
|
||||
|
||||
return used
|
||||
|
||||
def copy(self) -> CoverageConfig:
|
||||
"""Return a copy of the configuration."""
|
||||
return copy.deepcopy(self)
|
||||
|
||||
CONCURRENCY_CHOICES: Final[set[str]] = {
|
||||
"thread",
|
||||
"gevent",
|
||||
"greenlet",
|
||||
"eventlet",
|
||||
"multiprocessing",
|
||||
}
|
||||
|
||||
CONFIG_FILE_OPTIONS = [
|
||||
# These are *args for _set_attr_from_config_option:
|
||||
# (attr, where, type_="")
|
||||
#
|
||||
# attr is the attribute to set on the CoverageConfig object.
|
||||
# where is the section:name to read from the configuration file.
|
||||
# type_ is the optional type to apply, by using .getTYPE to read the
|
||||
# configuration value from the file.
|
||||
#
|
||||
# [run]
|
||||
("branch", "run:branch", "boolean"),
|
||||
("command_line", "run:command_line"),
|
||||
("concurrency", "run:concurrency", "list"),
|
||||
("context", "run:context"),
|
||||
("core", "run:core"),
|
||||
("cover_pylib", "run:cover_pylib", "boolean"),
|
||||
("data_file", "run:data_file", "file"),
|
||||
("debug", "run:debug", "list"),
|
||||
("debug_file", "run:debug_file", "file"),
|
||||
("disable_warnings", "run:disable_warnings", "list"),
|
||||
("dynamic_context", "run:dynamic_context"),
|
||||
("parallel", "run:parallel", "boolean"),
|
||||
("patch", "run:patch", "list"),
|
||||
("plugins", "run:plugins", "list"),
|
||||
("relative_files", "run:relative_files", "boolean"),
|
||||
("run_include", "run:include", "list"),
|
||||
("run_omit", "run:omit", "list"),
|
||||
("sigterm", "run:sigterm", "boolean"),
|
||||
("source", "run:source", "list"),
|
||||
("source_pkgs", "run:source_pkgs", "list"),
|
||||
("source_dirs", "run:source_dirs", "list"),
|
||||
("timid", "run:timid", "boolean"),
|
||||
("_crash", "run:_crash"),
|
||||
#
|
||||
# [report]
|
||||
("exclude_list", "report:exclude_lines", "regexlist"),
|
||||
("exclude_also", "report:exclude_also", "regexlist"),
|
||||
("fail_under", "report:fail_under", "float"),
|
||||
("format", "report:format"),
|
||||
("ignore_errors", "report:ignore_errors", "boolean"),
|
||||
("include_namespace_packages", "report:include_namespace_packages", "boolean"),
|
||||
("partial_always_list", "report:partial_branches_always", "regexlist"),
|
||||
("partial_list", "report:partial_branches", "regexlist"),
|
||||
("partial_also", "report:partial_also", "regexlist"),
|
||||
("precision", "report:precision", "int"),
|
||||
("report_contexts", "report:contexts", "list"),
|
||||
("report_include", "report:include", "list"),
|
||||
("report_omit", "report:omit", "list"),
|
||||
("show_missing", "report:show_missing", "boolean"),
|
||||
("skip_covered", "report:skip_covered", "boolean"),
|
||||
("skip_empty", "report:skip_empty", "boolean"),
|
||||
("sort", "report:sort"),
|
||||
#
|
||||
# [html]
|
||||
("extra_css", "html:extra_css"),
|
||||
("html_dir", "html:directory", "file"),
|
||||
("html_skip_covered", "html:skip_covered", "boolean"),
|
||||
("html_skip_empty", "html:skip_empty", "boolean"),
|
||||
("html_title", "html:title"),
|
||||
("show_contexts", "html:show_contexts", "boolean"),
|
||||
#
|
||||
# [xml]
|
||||
("xml_output", "xml:output", "file"),
|
||||
("xml_package_depth", "xml:package_depth", "int"),
|
||||
#
|
||||
# [json]
|
||||
("json_output", "json:output", "file"),
|
||||
("json_pretty_print", "json:pretty_print", "boolean"),
|
||||
("json_show_contexts", "json:show_contexts", "boolean"),
|
||||
#
|
||||
# [lcov]
|
||||
("lcov_output", "lcov:output", "file"),
|
||||
("lcov_line_checksums", "lcov:line_checksums", "boolean"),
|
||||
]
|
||||
|
||||
def _set_attr_from_config_option(
|
||||
self,
|
||||
cp: TConfigParser,
|
||||
attr: str,
|
||||
where: str,
|
||||
type_: str = "",
|
||||
) -> bool:
|
||||
"""Set an attribute on self if it exists in the ConfigParser.
|
||||
|
||||
Returns True if the attribute was set.
|
||||
|
||||
"""
|
||||
section, option = where.split(":")
|
||||
if cp.has_option(section, option):
|
||||
method = getattr(cp, f"get{type_}")
|
||||
setattr(self, attr, method(section, option))
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
|
||||
"""Get a dictionary of options for the plugin named `plugin`."""
|
||||
return self.plugin_options.get(plugin, {})
|
||||
|
||||
def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
|
||||
"""Set an option in the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
`value` is the new value for the option.
|
||||
|
||||
"""
|
||||
# Special-cased options.
|
||||
if option_name == "paths":
|
||||
# This is ugly, but type-checks and ensures the values are close
|
||||
# to right.
|
||||
self.paths = {}
|
||||
assert isinstance(value, Mapping)
|
||||
for k, v in value.items():
|
||||
assert isinstance(v, Iterable)
|
||||
self.paths[k] = list(v)
|
||||
return
|
||||
|
||||
# Check all the hard-coded options.
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
attr, where = option_spec[:2]
|
||||
if where == option_name:
|
||||
setattr(self, attr, value)
|
||||
return
|
||||
|
||||
# See if it's a plugin option.
|
||||
plugin_name, _, key = option_name.partition(":")
|
||||
if key and plugin_name in self.plugins:
|
||||
self.plugin_options.setdefault(plugin_name, {})[key] = value # type: ignore[index]
|
||||
return
|
||||
|
||||
# If we get here, we didn't find the option.
|
||||
raise ConfigError(f"No such option: {option_name!r}")
|
||||
|
||||
def get_option(self, option_name: str) -> TConfigValueOut | None:
|
||||
"""Get an option from the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
Returns the value of the option.
|
||||
|
||||
"""
|
||||
# Special-cased options.
|
||||
if option_name == "paths":
|
||||
return self.paths
|
||||
|
||||
# Check all the hard-coded options.
|
||||
for option_spec in self.CONFIG_FILE_OPTIONS:
|
||||
attr, where = option_spec[:2]
|
||||
if where == option_name:
|
||||
return getattr(self, attr) # type: ignore[no-any-return]
|
||||
|
||||
# See if it's a plugin option.
|
||||
plugin_name, _, key = option_name.partition(":")
|
||||
if key and plugin_name in self.plugins:
|
||||
return self.plugin_options.get(plugin_name, {}).get(key)
|
||||
|
||||
# If we get here, we didn't find the option.
|
||||
raise ConfigError(f"No such option: {option_name!r}")
|
||||
|
||||
def post_process(self) -> None:
|
||||
"""Make final adjustments to settings to make them usable."""
|
||||
self.paths = {k: [process_file_value(f) for f in v] for k, v in self.paths.items()}
|
||||
|
||||
self.exclude_list += self.exclude_also
|
||||
self.partial_list += self.partial_also
|
||||
|
||||
if "subprocess" in self.patch:
|
||||
self.parallel = True
|
||||
|
||||
def debug_info(self) -> list[tuple[str, Any]]:
|
||||
"""Make a list of (name, value) pairs for writing debug info."""
|
||||
return human_sorted_items((k, v) for k, v in self.__dict__.items() if not k.startswith("_"))
|
||||
|
||||
def serialize(self) -> str:
|
||||
"""Convert to a string that can be ingested with `deserialize`.
|
||||
|
||||
File paths used by `coverage run` are made absolute to ensure the
|
||||
deserialized config will refer to the same files.
|
||||
"""
|
||||
data = {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
|
||||
for k, must_exist in self.SERIALIZE_ABSPATH:
|
||||
abs_fn = abs_path_if_exists if must_exist else os.path.abspath
|
||||
v = data[k]
|
||||
if isinstance(v, list):
|
||||
v = list(map(abs_fn, v))
|
||||
elif isinstance(v, str):
|
||||
v = abs_fn(v)
|
||||
data[k] = v
|
||||
return base64.b64encode(json.dumps(data).encode()).decode()
|
||||
|
||||
@classmethod
|
||||
def deserialize(cls, config_str: str) -> CoverageConfig:
|
||||
"""Take a string from `serialize`, and make a CoverageConfig."""
|
||||
data = json.loads(base64.b64decode(config_str.encode()).decode())
|
||||
config = cls()
|
||||
config.__dict__.update(data)
|
||||
return config
|
||||
|
||||
|
||||
def process_file_value(path: str) -> str:
|
||||
"""Make adjustments to a file path to make it usable."""
|
||||
return os.path.expanduser(path)
|
||||
|
||||
|
||||
def abs_path_if_exists(path: str) -> str:
|
||||
"""os.path.abspath, but only if the path exists."""
|
||||
if os.path.exists(path):
|
||||
return os.path.abspath(path)
|
||||
else:
|
||||
return path
|
||||
|
||||
|
||||
def process_regexlist(name: str, option: str, values: list[str]) -> list[str]:
|
||||
"""Check the values in a regex list and keep the non-blank ones."""
|
||||
value_list = []
|
||||
for value in values:
|
||||
value = value.strip()
|
||||
try:
|
||||
re.compile(value)
|
||||
except re.error as e:
|
||||
raise ConfigError(f"Invalid [{name}].{option} value {value!r}: {e}") from e
|
||||
if value:
|
||||
value_list.append(value)
|
||||
return value_list
|
||||
|
||||
|
||||
def config_files_to_try(config_file: bool | str) -> list[tuple[str, bool, bool]]:
|
||||
"""What config files should we try to read?
|
||||
|
||||
Returns a list of tuples:
|
||||
(filename, is_our_file, was_file_specified)
|
||||
"""
|
||||
|
||||
# Some API users were specifying ".coveragerc" to mean the same as
|
||||
# True, so make it so.
|
||||
if config_file == ".coveragerc":
|
||||
config_file = True
|
||||
specified_file = config_file is not True
|
||||
if not specified_file:
|
||||
# No file was specified. Check COVERAGE_RCFILE.
|
||||
rcfile = os.getenv("COVERAGE_RCFILE")
|
||||
if rcfile:
|
||||
config_file = rcfile
|
||||
specified_file = True
|
||||
if not specified_file:
|
||||
# Still no file specified. Default to .coveragerc
|
||||
config_file = ".coveragerc"
|
||||
assert isinstance(config_file, str)
|
||||
files_to_try = [
|
||||
(config_file, True, specified_file),
|
||||
("setup.cfg", False, False),
|
||||
("tox.ini", False, False),
|
||||
("pyproject.toml", False, False),
|
||||
]
|
||||
return files_to_try
|
||||
|
||||
|
||||
def read_coverage_config(
|
||||
config_file: bool | str,
|
||||
warn: Callable[[str], None],
|
||||
**kwargs: TConfigValueIn,
|
||||
) -> CoverageConfig:
|
||||
"""Read the coverage.py configuration.
|
||||
|
||||
Arguments:
|
||||
config_file: a boolean or string, see the `Coverage` class for the
|
||||
tricky details.
|
||||
warn: a function to issue warnings.
|
||||
all others: keyword arguments from the `Coverage` class, used for
|
||||
setting values in the configuration.
|
||||
|
||||
Returns:
|
||||
config:
|
||||
config is a CoverageConfig object read from the appropriate
|
||||
configuration file.
|
||||
|
||||
"""
|
||||
# Build the configuration from a number of sources:
|
||||
# 1) defaults:
|
||||
config = CoverageConfig()
|
||||
|
||||
# 2) from a file:
|
||||
if config_file:
|
||||
files_to_try = config_files_to_try(config_file)
|
||||
|
||||
for fname, our_file, specified_file in files_to_try:
|
||||
config_read = config.from_file(fname, warn, our_file=our_file)
|
||||
if config_read:
|
||||
break
|
||||
if specified_file:
|
||||
raise ConfigError(f"Couldn't read {fname!r} as a config file")
|
||||
|
||||
# 3) from environment variables:
|
||||
env_data_file = os.getenv("COVERAGE_FILE")
|
||||
if env_data_file:
|
||||
config.data_file = env_data_file
|
||||
|
||||
# $set_env.py: COVERAGE_DEBUG - Debug options: https://coverage.rtfd.io/cmd.html#debug
|
||||
debugs = os.getenv("COVERAGE_DEBUG")
|
||||
if debugs:
|
||||
config.debug.extend(d.strip() for d in debugs.split(","))
|
||||
|
||||
# Read the COVERAGE_CORE environment variable for backward compatibility,
|
||||
# and because we use it in the test suite to pick a specific core.
|
||||
env_core = os.getenv("COVERAGE_CORE")
|
||||
if env_core:
|
||||
config.core = env_core
|
||||
|
||||
# 4) from constructor arguments:
|
||||
config.from_args(**kwargs)
|
||||
|
||||
# 5) for our benchmark, force settings using a secret environment variable:
|
||||
force_file = os.getenv("COVERAGE_FORCE_CONFIG")
|
||||
if force_file:
|
||||
config.from_file(force_file, warn, our_file=True)
|
||||
|
||||
# Once all the config has been collected, there's a little post-processing
|
||||
# to do.
|
||||
config.post_process()
|
||||
|
||||
return config
|
||||
74
venv/lib/python3.12/site-packages/coverage/context.py
Normal file
74
venv/lib/python3.12/site-packages/coverage/context.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Determine contexts for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Sequence
|
||||
from types import FrameType
|
||||
|
||||
from coverage.types import TShouldStartContextFn
|
||||
|
||||
|
||||
def combine_context_switchers(
|
||||
context_switchers: Sequence[TShouldStartContextFn],
|
||||
) -> TShouldStartContextFn | None:
|
||||
"""Create a single context switcher from multiple switchers.
|
||||
|
||||
`context_switchers` is a list of functions that take a frame as an
|
||||
argument and return a string to use as the new context label.
|
||||
|
||||
Returns a function that composites `context_switchers` functions, or None
|
||||
if `context_switchers` is an empty list.
|
||||
|
||||
When invoked, the combined switcher calls `context_switchers` one-by-one
|
||||
until a string is returned. The combined switcher returns None if all
|
||||
`context_switchers` return None.
|
||||
"""
|
||||
if not context_switchers:
|
||||
return None
|
||||
|
||||
if len(context_switchers) == 1:
|
||||
return context_switchers[0]
|
||||
|
||||
def should_start_context(frame: FrameType) -> str | None:
|
||||
"""The combiner for multiple context switchers."""
|
||||
for switcher in context_switchers:
|
||||
new_context = switcher(frame)
|
||||
if new_context is not None:
|
||||
return new_context
|
||||
return None
|
||||
|
||||
return should_start_context
|
||||
|
||||
|
||||
def should_start_context_test_function(frame: FrameType) -> str | None:
|
||||
"""Is this frame calling a test_* function?"""
|
||||
co_name = frame.f_code.co_name
|
||||
if co_name.startswith("test") or co_name == "runTest":
|
||||
return qualname_from_frame(frame)
|
||||
return None
|
||||
|
||||
|
||||
def qualname_from_frame(frame: FrameType) -> str | None:
|
||||
"""Get a qualified name for the code running in `frame`."""
|
||||
co = frame.f_code
|
||||
fname = co.co_name
|
||||
method = None
|
||||
if co.co_argcount and co.co_varnames[0] == "self":
|
||||
self = frame.f_locals.get("self", None)
|
||||
method = getattr(self, fname, None)
|
||||
|
||||
if method is None:
|
||||
func = frame.f_globals.get(fname)
|
||||
if func is None:
|
||||
return None
|
||||
return f"{func.__module__}.{fname}"
|
||||
|
||||
func = getattr(method, "__func__", None)
|
||||
if func is None:
|
||||
cls = self.__class__
|
||||
return f"{cls.__module__}.{cls.__name__}.{fname}"
|
||||
|
||||
return f"{func.__module__}.{func.__qualname__}"
|
||||
1478
venv/lib/python3.12/site-packages/coverage/control.py
Normal file
1478
venv/lib/python3.12/site-packages/coverage/control.py
Normal file
File diff suppressed because it is too large
Load Diff
118
venv/lib/python3.12/site-packages/coverage/core.py
Normal file
118
venv/lib/python3.12/site-packages/coverage/core.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Management of core choices."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from coverage import env
|
||||
from coverage.config import CoverageConfig
|
||||
from coverage.disposition import FileDisposition
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.pytracer import PyTracer
|
||||
from coverage.sysmon import SysMonitor
|
||||
from coverage.types import TFileDisposition, Tracer, TWarnFn
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
IMPORT_ERROR: str = ""
|
||||
|
||||
try:
|
||||
# Use the C extension code when we can, for speed.
|
||||
import coverage.tracer
|
||||
|
||||
CTRACER_FILE: str | None = getattr(coverage.tracer, "__file__", "unknown")
|
||||
except ImportError as imp_err:
|
||||
# Couldn't import the C extension, maybe it isn't built.
|
||||
# We still need to check the environment variable directly here,
|
||||
# as this code runs before configuration is loaded.
|
||||
if os.getenv("COVERAGE_CORE") == "ctrace": # pragma: part covered
|
||||
# During testing, we use the COVERAGE_CORE environment variable
|
||||
# to indicate that we've fiddled with the environment to test this
|
||||
# fallback code. If we thought we had a C tracer, but couldn't import
|
||||
# it, then exit quickly and clearly instead of dribbling confusing
|
||||
# errors. I'm using sys.exit here instead of an exception because an
|
||||
# exception here causes all sorts of other noise in unittest.
|
||||
sys.stderr.write("*** COVERAGE_CORE is 'ctrace' but can't import CTracer!\n")
|
||||
sys.exit(1)
|
||||
IMPORT_ERROR = str(imp_err)
|
||||
CTRACER_FILE = None
|
||||
|
||||
|
||||
class Core:
|
||||
"""Information about the central technology enabling execution measurement."""
|
||||
|
||||
tracer_class: type[Tracer]
|
||||
tracer_kwargs: dict[str, Any]
|
||||
file_disposition_class: type[TFileDisposition]
|
||||
supports_plugins: bool
|
||||
packed_arcs: bool
|
||||
systrace: bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
warn: TWarnFn,
|
||||
config: CoverageConfig,
|
||||
dynamic_contexts: bool,
|
||||
metacov: bool,
|
||||
) -> None:
|
||||
# Check the conditions that preclude us from using sys.monitoring.
|
||||
reason_no_sysmon = ""
|
||||
if not env.PYBEHAVIOR.pep669:
|
||||
reason_no_sysmon = "isn't available in this version"
|
||||
elif config.branch and not env.PYBEHAVIOR.branch_right_left:
|
||||
reason_no_sysmon = "can't measure branches in this version"
|
||||
elif dynamic_contexts:
|
||||
reason_no_sysmon = "doesn't yet support dynamic contexts"
|
||||
|
||||
core_name: str | None = None
|
||||
if config.timid:
|
||||
core_name = "pytrace"
|
||||
|
||||
if core_name is None:
|
||||
core_name = config.core
|
||||
|
||||
if core_name == "sysmon" and reason_no_sysmon:
|
||||
warn(f"sys.monitoring {reason_no_sysmon}, using default core", slug="no-sysmon")
|
||||
core_name = None
|
||||
|
||||
if core_name is None:
|
||||
if env.SYSMON_DEFAULT and not reason_no_sysmon:
|
||||
core_name = "sysmon"
|
||||
else:
|
||||
core_name = "ctrace"
|
||||
|
||||
if core_name == "ctrace":
|
||||
if not CTRACER_FILE:
|
||||
if IMPORT_ERROR and env.SHIPPING_WHEELS:
|
||||
warn(f"Couldn't import C tracer: {IMPORT_ERROR}", slug="no-ctracer", once=True)
|
||||
core_name = "pytrace"
|
||||
|
||||
self.tracer_kwargs = {}
|
||||
|
||||
if core_name == "sysmon":
|
||||
self.tracer_class = SysMonitor
|
||||
self.tracer_kwargs["tool_id"] = 3 if metacov else 1
|
||||
self.file_disposition_class = FileDisposition
|
||||
self.supports_plugins = False
|
||||
self.packed_arcs = False
|
||||
self.systrace = False
|
||||
elif core_name == "ctrace":
|
||||
self.tracer_class = coverage.tracer.CTracer
|
||||
self.file_disposition_class = coverage.tracer.CFileDisposition
|
||||
self.supports_plugins = True
|
||||
self.packed_arcs = True
|
||||
self.systrace = True
|
||||
elif core_name == "pytrace":
|
||||
self.tracer_class = PyTracer
|
||||
self.file_disposition_class = FileDisposition
|
||||
self.supports_plugins = False
|
||||
self.packed_arcs = False
|
||||
self.systrace = True
|
||||
else:
|
||||
raise ConfigError(f"Unknown core value: {core_name!r}")
|
||||
227
venv/lib/python3.12/site-packages/coverage/data.py
Normal file
227
venv/lib/python3.12/site-packages/coverage/data.py
Normal file
@@ -0,0 +1,227 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Coverage data for coverage.py.
|
||||
|
||||
This file had the 4.x JSON data support, which is now gone. This file still
|
||||
has storage-agnostic helpers, and is kept to avoid changing too many imports.
|
||||
CoverageData is now defined in sqldata.py, and imported here to keep the
|
||||
imports working.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import glob
|
||||
import hashlib
|
||||
import os.path
|
||||
from collections.abc import Iterable
|
||||
from typing import Callable
|
||||
|
||||
from coverage.exceptions import CoverageException, NoDataError
|
||||
from coverage.files import PathAliases
|
||||
from coverage.misc import Hasher, file_be_gone, human_sorted, plural
|
||||
from coverage.sqldata import CoverageData as CoverageData # pylint: disable=useless-import-alias
|
||||
|
||||
|
||||
def line_counts(data: CoverageData, fullpath: bool = False) -> dict[str, int]:
|
||||
"""Return a dict summarizing the line coverage data.
|
||||
|
||||
Keys are based on the file names, and values are the number of executed
|
||||
lines. If `fullpath` is true, then the keys are the full pathnames of
|
||||
the files, otherwise they are the basenames of the files.
|
||||
|
||||
Returns a dict mapping file names to counts of lines.
|
||||
|
||||
"""
|
||||
summ = {}
|
||||
filename_fn: Callable[[str], str]
|
||||
if fullpath:
|
||||
# pylint: disable=unnecessary-lambda-assignment
|
||||
filename_fn = lambda f: f
|
||||
else:
|
||||
filename_fn = os.path.basename
|
||||
for filename in data.measured_files():
|
||||
lines = data.lines(filename)
|
||||
assert lines is not None
|
||||
summ[filename_fn(filename)] = len(lines)
|
||||
return summ
|
||||
|
||||
|
||||
def add_data_to_hash(data: CoverageData, filename: str, hasher: Hasher) -> None:
|
||||
"""Contribute `filename`'s data to the `hasher`.
|
||||
|
||||
`hasher` is a `coverage.misc.Hasher` instance to be updated with
|
||||
the file's data. It should only get the results data, not the run
|
||||
data.
|
||||
|
||||
"""
|
||||
if data.has_arcs():
|
||||
hasher.update(sorted(data.arcs(filename) or []))
|
||||
else:
|
||||
hasher.update(sorted_lines(data, filename))
|
||||
hasher.update(data.file_tracer(filename))
|
||||
|
||||
|
||||
def combinable_files(data_file: str, data_paths: Iterable[str] | None = None) -> list[str]:
|
||||
"""Make a list of data files to be combined.
|
||||
|
||||
`data_file` is a path to a data file. `data_paths` is a list of files or
|
||||
directories of files.
|
||||
|
||||
Returns a list of absolute file paths.
|
||||
"""
|
||||
data_dir, local = os.path.split(os.path.abspath(data_file))
|
||||
|
||||
data_paths = data_paths or [data_dir]
|
||||
files_to_combine = []
|
||||
for p in data_paths:
|
||||
if os.path.isfile(p):
|
||||
files_to_combine.append(os.path.abspath(p))
|
||||
elif os.path.isdir(p):
|
||||
pattern = glob.escape(os.path.join(os.path.abspath(p), local)) + ".*"
|
||||
files_to_combine.extend(glob.glob(pattern))
|
||||
else:
|
||||
raise NoDataError(f"Couldn't combine from non-existent path '{p}'")
|
||||
|
||||
# SQLite might have made journal files alongside our database files.
|
||||
# We never want to combine those.
|
||||
files_to_combine = [fnm for fnm in files_to_combine if not fnm.endswith("-journal")]
|
||||
|
||||
# Sorting isn't usually needed, since it shouldn't matter what order files
|
||||
# are combined, but sorting makes tests more predictable, and makes
|
||||
# debugging more understandable when things go wrong.
|
||||
return sorted(files_to_combine)
|
||||
|
||||
|
||||
def combine_parallel_data(
|
||||
data: CoverageData,
|
||||
aliases: PathAliases | None = None,
|
||||
data_paths: Iterable[str] | None = None,
|
||||
strict: bool = False,
|
||||
keep: bool = False,
|
||||
message: Callable[[str], None] | None = None,
|
||||
) -> None:
|
||||
"""Combine a number of data files together.
|
||||
|
||||
`data` is a CoverageData.
|
||||
|
||||
Treat `data.filename` as a file prefix, and combine the data from all
|
||||
of the data files starting with that prefix plus a dot.
|
||||
|
||||
If `aliases` is provided, it's a `PathAliases` object that is used to
|
||||
re-map paths to match the local machine's.
|
||||
|
||||
If `data_paths` is provided, it is a list of directories or files to
|
||||
combine. Directories are searched for files that start with
|
||||
`data.filename` plus dot as a prefix, and those files are combined.
|
||||
|
||||
If `data_paths` is not provided, then the directory portion of
|
||||
`data.filename` is used as the directory to search for data files.
|
||||
|
||||
Unless `keep` is True every data file found and combined is then deleted
|
||||
from disk. If a file cannot be read, a warning will be issued, and the
|
||||
file will not be deleted.
|
||||
|
||||
If `strict` is true, and no files are found to combine, an error is
|
||||
raised.
|
||||
|
||||
`message` is a function to use for printing messages to the user.
|
||||
|
||||
"""
|
||||
files_to_combine = combinable_files(data.base_filename(), data_paths)
|
||||
|
||||
if strict and not files_to_combine:
|
||||
raise NoDataError("No data to combine")
|
||||
|
||||
if aliases is None:
|
||||
map_path = None
|
||||
else:
|
||||
map_path = functools.cache(aliases.map)
|
||||
|
||||
file_hashes = set()
|
||||
combined_any = False
|
||||
|
||||
for f in files_to_combine:
|
||||
if f == data.data_filename():
|
||||
# Sometimes we are combining into a file which is one of the
|
||||
# parallel files. Skip that file.
|
||||
if data._debug.should("dataio"):
|
||||
data._debug.write(f"Skipping combining ourself: {f!r}")
|
||||
continue
|
||||
|
||||
try:
|
||||
rel_file_name = os.path.relpath(f)
|
||||
except ValueError:
|
||||
# ValueError can be raised under Windows when os.getcwd() returns a
|
||||
# folder from a different drive than the drive of f, in which case
|
||||
# we print the original value of f instead of its relative path
|
||||
rel_file_name = f
|
||||
|
||||
with open(f, "rb") as fobj:
|
||||
hasher = hashlib.new("sha3_256", usedforsecurity=False)
|
||||
hasher.update(fobj.read())
|
||||
sha = hasher.digest()
|
||||
combine_this_one = sha not in file_hashes
|
||||
|
||||
delete_this_one = not keep
|
||||
if combine_this_one:
|
||||
if data._debug.should("dataio"):
|
||||
data._debug.write(f"Combining data file {f!r}")
|
||||
file_hashes.add(sha)
|
||||
try:
|
||||
new_data = CoverageData(f, debug=data._debug)
|
||||
new_data.read()
|
||||
except CoverageException as exc:
|
||||
if data._warn:
|
||||
# The CoverageException has the file name in it, so just
|
||||
# use the message as the warning.
|
||||
data._warn(str(exc))
|
||||
if message:
|
||||
message(f"Couldn't combine data file {rel_file_name}: {exc}")
|
||||
delete_this_one = False
|
||||
else:
|
||||
data.update(new_data, map_path=map_path)
|
||||
combined_any = True
|
||||
if message:
|
||||
message(f"Combined data file {rel_file_name}")
|
||||
else:
|
||||
if message:
|
||||
message(f"Skipping duplicate data {rel_file_name}")
|
||||
|
||||
if delete_this_one:
|
||||
if data._debug.should("dataio"):
|
||||
data._debug.write(f"Deleting data file {f!r}")
|
||||
file_be_gone(f)
|
||||
|
||||
if strict and not combined_any:
|
||||
raise NoDataError("No usable data files")
|
||||
|
||||
|
||||
def debug_data_file(filename: str) -> None:
|
||||
"""Implementation of 'coverage debug data'."""
|
||||
data = CoverageData(filename)
|
||||
filename = data.data_filename()
|
||||
print(f"path: {filename}")
|
||||
if not os.path.exists(filename):
|
||||
print("No data collected: file doesn't exist")
|
||||
return
|
||||
data.read()
|
||||
print(f"has_arcs: {data.has_arcs()!r}")
|
||||
summary = line_counts(data, fullpath=True)
|
||||
filenames = human_sorted(summary.keys())
|
||||
nfiles = len(filenames)
|
||||
print(f"{nfiles} file{plural(nfiles)}:")
|
||||
for f in filenames:
|
||||
line = f"{f}: {summary[f]} line{plural(summary[f])}"
|
||||
plugin = data.file_tracer(f)
|
||||
if plugin:
|
||||
line += f" [{plugin}]"
|
||||
print(line)
|
||||
|
||||
|
||||
def sorted_lines(data: CoverageData, filename: str) -> list[int]:
|
||||
"""Get the sorted lines for a file, for tests."""
|
||||
lines = data.lines(filename)
|
||||
return sorted(lines or [])
|
||||
664
venv/lib/python3.12/site-packages/coverage/debug.py
Normal file
664
venv/lib/python3.12/site-packages/coverage/debug.py
Normal file
@@ -0,0 +1,664 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Control of and utilities for debugging."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import _thread
|
||||
import atexit
|
||||
import contextlib
|
||||
import datetime
|
||||
import functools
|
||||
import inspect
|
||||
import itertools
|
||||
import os
|
||||
import pprint
|
||||
import re
|
||||
import reprlib
|
||||
import sys
|
||||
import traceback
|
||||
import types
|
||||
from collections.abc import Iterable, Iterator, Mapping
|
||||
from typing import IO, Any, Callable, Final, overload
|
||||
|
||||
from coverage.misc import human_sorted_items, isolate_module
|
||||
from coverage.types import AnyCallable, TWritable
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
# When debugging, it can be helpful to force some options, especially when
|
||||
# debugging the configuration mechanisms you usually use to control debugging!
|
||||
# This is a list of forced debugging options.
|
||||
FORCED_DEBUG: list[str] = []
|
||||
FORCED_DEBUG_FILE = None
|
||||
|
||||
|
||||
class DebugControl:
|
||||
"""Control and output for debugging."""
|
||||
|
||||
show_repr_attr = False # For auto_repr
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
options: Iterable[str],
|
||||
output: IO[str] | None,
|
||||
file_name: str | None = None,
|
||||
) -> None:
|
||||
"""Configure the options and output file for debugging."""
|
||||
self.options = list(options) + FORCED_DEBUG
|
||||
self.suppress_callers = False
|
||||
|
||||
filters = []
|
||||
if self.should("process"):
|
||||
filters.append(CwdTracker().filter)
|
||||
filters.append(ProcessTracker().filter)
|
||||
if self.should("pytest"):
|
||||
filters.append(PytestTracker().filter)
|
||||
if self.should("pid"):
|
||||
filters.append(add_pid_and_tid)
|
||||
|
||||
self.output = DebugOutputFile.get_one(
|
||||
output,
|
||||
file_name=file_name,
|
||||
filters=filters,
|
||||
)
|
||||
self.raw_output = self.output.outfile
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<DebugControl options={self.options!r} raw_output={self.raw_output!r}>"
|
||||
|
||||
def should(self, option: str) -> bool:
|
||||
"""Decide whether to output debug information in category `option`."""
|
||||
if option == "callers" and self.suppress_callers:
|
||||
return False
|
||||
return option in self.options
|
||||
|
||||
@contextlib.contextmanager
|
||||
def without_callers(self) -> Iterator[None]:
|
||||
"""A context manager to prevent call stacks from being logged."""
|
||||
old = self.suppress_callers
|
||||
self.suppress_callers = True
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.suppress_callers = old
|
||||
|
||||
def write(self, msg: str, *, exc: BaseException | None = None) -> None:
|
||||
"""Write a line of debug output.
|
||||
|
||||
`msg` is the line to write. A newline will be appended.
|
||||
|
||||
If `exc` is provided, a stack trace of the exception will be written
|
||||
after the message.
|
||||
|
||||
"""
|
||||
self.output.write(msg + "\n")
|
||||
if exc is not None:
|
||||
self.output.write("".join(traceback.format_exception(None, exc, exc.__traceback__)))
|
||||
if self.should("self"):
|
||||
caller_self = inspect.stack()[1][0].f_locals.get("self")
|
||||
if caller_self is not None:
|
||||
self.output.write(f"self: {caller_self!r}\n")
|
||||
if self.should("callers"):
|
||||
dump_stack_frames(out=self.output, skip=1)
|
||||
self.output.flush()
|
||||
|
||||
|
||||
class NoDebugging(DebugControl):
|
||||
"""A replacement for DebugControl that will never try to do anything."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
# pylint: disable=super-init-not-called
|
||||
pass
|
||||
|
||||
def should(self, option: str) -> bool:
|
||||
"""Should we write debug messages? Never."""
|
||||
return False
|
||||
|
||||
@contextlib.contextmanager
|
||||
def without_callers(self) -> Iterator[None]:
|
||||
"""A dummy context manager to satisfy the api."""
|
||||
yield # pragma: never called
|
||||
|
||||
def write(self, msg: str, *, exc: BaseException | None = None) -> None:
|
||||
"""This will never be called."""
|
||||
raise AssertionError("NoDebugging.write should never be called.")
|
||||
|
||||
|
||||
class DevNullDebug(NoDebugging):
|
||||
"""A DebugControl that won't write anywhere."""
|
||||
|
||||
def write(self, msg: str, *, exc: BaseException | None = None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def info_header(label: str) -> str:
|
||||
"""Make a nice header string."""
|
||||
return "--{:-<60s}".format(" " + label + " ")
|
||||
|
||||
|
||||
def info_formatter(info: Iterable[tuple[str, Any]]) -> Iterator[str]:
|
||||
"""Produce a sequence of formatted lines from info.
|
||||
|
||||
`info` is a sequence of pairs (label, data). The produced lines are
|
||||
nicely formatted, ready to print.
|
||||
|
||||
"""
|
||||
info = list(info)
|
||||
if not info:
|
||||
return
|
||||
LABEL_LEN = 30
|
||||
assert all(len(l) < LABEL_LEN for l, _ in info)
|
||||
for label, data in info:
|
||||
if data == []:
|
||||
data = "-none-"
|
||||
prefix = f"{label:>{LABEL_LEN}}: "
|
||||
if isinstance(data, tuple) and len(str(data)) < 30:
|
||||
yield f"{prefix}{data}"
|
||||
elif isinstance(data, (list, set, tuple)):
|
||||
for e in data:
|
||||
yield f"{prefix}{e}"
|
||||
prefix = " " * (LABEL_LEN + 2)
|
||||
else:
|
||||
yield f"{prefix}{data}"
|
||||
|
||||
|
||||
def write_formatted_info(
|
||||
write: Callable[[str], None],
|
||||
header: str,
|
||||
info: Iterable[tuple[str, Any]],
|
||||
) -> None:
|
||||
"""Write a sequence of (label,data) pairs nicely.
|
||||
|
||||
`write` is a function write(str) that accepts each line of output.
|
||||
`header` is a string to start the section. `info` is a sequence of
|
||||
(label, data) pairs, where label is a str, and data can be a single
|
||||
value, or a list/set/tuple.
|
||||
|
||||
"""
|
||||
write(info_header(header))
|
||||
for line in info_formatter(info):
|
||||
write(f" {line}")
|
||||
|
||||
|
||||
def exc_one_line(exc: Exception) -> str:
|
||||
"""Get a one-line summary of an exception, including class name and message."""
|
||||
lines = traceback.format_exception_only(type(exc), exc)
|
||||
return "|".join(l.rstrip() for l in lines)
|
||||
|
||||
|
||||
_FILENAME_REGEXES: list[tuple[str, str]] = [
|
||||
(r".*[/\\]pytest-of-.*[/\\]pytest-\d+([/\\]popen-gw\d+)?", "tmp:"),
|
||||
]
|
||||
_FILENAME_SUBS: list[tuple[str, str]] = []
|
||||
|
||||
|
||||
@overload
|
||||
def short_filename(filename: str) -> str:
|
||||
pass
|
||||
|
||||
|
||||
@overload
|
||||
def short_filename(filename: None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def short_filename(filename: str | None) -> str | None:
|
||||
"""Shorten a file name. Directories are replaced by prefixes like 'syspath:'"""
|
||||
if not _FILENAME_SUBS:
|
||||
for pathdir in sys.path:
|
||||
_FILENAME_SUBS.append((pathdir, "syspath:"))
|
||||
import coverage
|
||||
|
||||
_FILENAME_SUBS.append((os.path.dirname(coverage.__file__), "cov:"))
|
||||
_FILENAME_SUBS.sort(key=(lambda pair: len(pair[0])), reverse=True)
|
||||
if filename is not None:
|
||||
for pat, sub in _FILENAME_REGEXES:
|
||||
filename = re.sub(pat, sub, filename)
|
||||
for before, after in _FILENAME_SUBS:
|
||||
filename = filename.replace(before, after)
|
||||
return filename
|
||||
|
||||
|
||||
def file_summary(filename: str) -> str:
|
||||
"""A one-line summary of a file, for log messages."""
|
||||
try:
|
||||
s = os.stat(filename)
|
||||
except FileNotFoundError:
|
||||
summary = "does not exist"
|
||||
except Exception as e:
|
||||
summary = f"error: {e}"
|
||||
else:
|
||||
mod = datetime.datetime.fromtimestamp(s.st_mtime)
|
||||
summary = f"{s.st_size} bytes, modified {mod}"
|
||||
return summary
|
||||
|
||||
|
||||
def short_stack(
|
||||
skip: int = 0,
|
||||
full: bool = False,
|
||||
frame_ids: bool = False,
|
||||
short_filenames: bool = False,
|
||||
) -> str:
|
||||
"""Return a string summarizing the call stack.
|
||||
|
||||
The string is multi-line, with one line per stack frame. Each line shows
|
||||
the function name, the file name, and the line number:
|
||||
|
||||
...
|
||||
start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py:95
|
||||
import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py:81
|
||||
import_local_file : /Users/ned/coverage/trunk/coverage/backward.py:159
|
||||
...
|
||||
|
||||
`skip` is the number of closest immediate frames to skip, so that debugging
|
||||
functions can call this and not be included in the result.
|
||||
|
||||
If `full` is true, then include all frames. Otherwise, initial "boring"
|
||||
frames (ones in site-packages and earlier) are omitted.
|
||||
|
||||
`short_filenames` will shorten filenames using `short_filename`, to reduce
|
||||
the amount of repetitive noise in stack traces.
|
||||
|
||||
"""
|
||||
# Regexes in initial frames that we don't care about.
|
||||
# fmt: off
|
||||
BORING_PRELUDE = [
|
||||
"<string>", # pytest-xdist has string execution.
|
||||
r"\bigor.py$", # Our test runner.
|
||||
r"\bsite-packages\b", # pytest etc getting to our tests.
|
||||
]
|
||||
# fmt: on
|
||||
|
||||
stack: Iterable[inspect.FrameInfo] = inspect.stack()[:skip:-1]
|
||||
if not full:
|
||||
for pat in BORING_PRELUDE:
|
||||
stack = itertools.dropwhile(
|
||||
(lambda fi, pat=pat: re.search(pat, fi.filename)), # type: ignore[misc]
|
||||
stack,
|
||||
)
|
||||
lines = []
|
||||
for frame_info in stack:
|
||||
line = f"{frame_info.function:>30s} : "
|
||||
if frame_ids:
|
||||
line += f"{id(frame_info.frame):#x} "
|
||||
filename = frame_info.filename
|
||||
if short_filenames:
|
||||
filename = short_filename(filename)
|
||||
line += f"{filename}:{frame_info.lineno}"
|
||||
lines.append(line)
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def dump_stack_frames(out: TWritable, skip: int = 0) -> None:
|
||||
"""Print a summary of the stack to `out`."""
|
||||
out.write(short_stack(skip=skip + 1) + "\n")
|
||||
|
||||
|
||||
def clipped_repr(text: str, numchars: int = 50) -> str:
|
||||
"""`repr(text)`, but limited to `numchars`."""
|
||||
r = reprlib.Repr()
|
||||
r.maxstring = numchars
|
||||
return r.repr(text)
|
||||
|
||||
|
||||
def short_id(id64: int) -> int:
|
||||
"""Given a 64-bit id, make a shorter 16-bit one."""
|
||||
id16 = 0
|
||||
for offset in range(0, 64, 16):
|
||||
id16 ^= id64 >> offset
|
||||
return id16 & 0xFFFF
|
||||
|
||||
|
||||
def add_pid_and_tid(text: str) -> str:
|
||||
"""A filter to add pid and tid to debug messages."""
|
||||
# Thread ids are useful, but too long. Make a shorter one.
|
||||
tid = f"{short_id(_thread.get_ident()):04x}"
|
||||
text = f"{os.getpid():5d}.{tid}: {text}"
|
||||
return text
|
||||
|
||||
|
||||
AUTO_REPR_IGNORE = {"$coverage.object_id"}
|
||||
|
||||
|
||||
def auto_repr(self: Any) -> str:
|
||||
"""A function implementing an automatic __repr__ for debugging."""
|
||||
show_attrs = (
|
||||
(k, v)
|
||||
for k, v in self.__dict__.items()
|
||||
if getattr(v, "show_repr_attr", True)
|
||||
and not inspect.ismethod(v)
|
||||
and k not in AUTO_REPR_IGNORE
|
||||
)
|
||||
return "<{klass} @{id:#x}{attrs}>".format(
|
||||
klass=self.__class__.__name__,
|
||||
id=id(self),
|
||||
attrs="".join(f" {k}={v!r}" for k, v in show_attrs),
|
||||
)
|
||||
|
||||
|
||||
def simplify(v: Any) -> Any: # pragma: debugging
|
||||
"""Turn things which are nearly dict/list/etc into dict/list/etc."""
|
||||
if isinstance(v, dict):
|
||||
return {k: simplify(vv) for k, vv in v.items()}
|
||||
elif isinstance(v, (list, tuple)):
|
||||
return type(v)(simplify(vv) for vv in v)
|
||||
elif hasattr(v, "__dict__"):
|
||||
return simplify({"." + k: v for k, v in v.__dict__.items()})
|
||||
else:
|
||||
return v
|
||||
|
||||
|
||||
def pp(v: Any) -> None: # pragma: debugging
|
||||
"""Debug helper to pretty-print data, including SimpleNamespace objects."""
|
||||
# Might not be needed in 3.9+
|
||||
pprint.pprint(simplify(v))
|
||||
|
||||
|
||||
def filter_text(text: str, filters: Iterable[Callable[[str], str]]) -> str:
|
||||
"""Run `text` through a series of filters.
|
||||
|
||||
`filters` is a list of functions. Each takes a string and returns a
|
||||
string. Each is run in turn. After each filter, the text is split into
|
||||
lines, and each line is passed through the next filter.
|
||||
|
||||
Returns: the final string that results after all of the filters have
|
||||
run.
|
||||
|
||||
"""
|
||||
clean_text = text.rstrip()
|
||||
ending = text[len(clean_text) :]
|
||||
text = clean_text
|
||||
for filter_fn in filters:
|
||||
lines = []
|
||||
for line in text.splitlines():
|
||||
lines.extend(filter_fn(line).splitlines())
|
||||
text = "\n".join(lines)
|
||||
return text + ending
|
||||
|
||||
|
||||
class CwdTracker:
|
||||
"""A class to add cwd info to debug messages."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.cwd: str | None = None
|
||||
|
||||
def filter(self, text: str) -> str:
|
||||
"""Add a cwd message for each new cwd."""
|
||||
cwd = os.getcwd()
|
||||
if cwd != self.cwd:
|
||||
text = f"cwd is now {cwd!r}\n{text}"
|
||||
self.cwd = cwd
|
||||
return text
|
||||
|
||||
|
||||
class ProcessTracker:
|
||||
"""Track process creation for debug logging."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.pid: int = os.getpid()
|
||||
self.did_welcome = False
|
||||
|
||||
def filter(self, text: str) -> str:
|
||||
"""Add a message about how new processes came to be."""
|
||||
welcome = ""
|
||||
pid = os.getpid()
|
||||
if self.pid != pid:
|
||||
welcome = f"New process: forked {self.pid} -> {pid}\n"
|
||||
self.pid = pid
|
||||
elif not self.did_welcome:
|
||||
argv = getattr(sys, "argv", None)
|
||||
welcome = (
|
||||
f"New process: {pid=}, executable: {sys.executable!r}\n"
|
||||
+ f"New process: cmd: {argv!r}\n"
|
||||
+ f"New process parent pid: {os.getppid()!r}\n"
|
||||
)
|
||||
|
||||
if welcome:
|
||||
self.did_welcome = True
|
||||
return welcome + text
|
||||
else:
|
||||
return text
|
||||
|
||||
|
||||
class PytestTracker:
|
||||
"""Track the current pytest test name to add to debug messages."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.test_name: str | None = None
|
||||
|
||||
def filter(self, text: str) -> str:
|
||||
"""Add a message when the pytest test changes."""
|
||||
test_name = os.getenv("PYTEST_CURRENT_TEST")
|
||||
if test_name != self.test_name:
|
||||
text = f"Pytest context: {test_name}\n{text}"
|
||||
self.test_name = test_name
|
||||
return text
|
||||
|
||||
|
||||
class DebugOutputFile:
|
||||
"""A file-like object that includes pid and cwd information."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
outfile: IO[str] | None,
|
||||
filters: Iterable[Callable[[str], str]],
|
||||
):
|
||||
self.outfile = outfile
|
||||
self.filters = list(filters)
|
||||
self.pid = os.getpid()
|
||||
|
||||
@classmethod
|
||||
def get_one(
|
||||
cls,
|
||||
fileobj: IO[str] | None = None,
|
||||
file_name: str | None = None,
|
||||
filters: Iterable[Callable[[str], str]] = (),
|
||||
interim: bool = False,
|
||||
) -> DebugOutputFile:
|
||||
"""Get a DebugOutputFile.
|
||||
|
||||
If `fileobj` is provided, then a new DebugOutputFile is made with it.
|
||||
|
||||
If `fileobj` isn't provided, then a file is chosen (`file_name` if
|
||||
provided, or COVERAGE_DEBUG_FILE, or stderr), and a process-wide
|
||||
singleton DebugOutputFile is made.
|
||||
|
||||
`filters` are the text filters to apply to the stream to annotate with
|
||||
pids, etc.
|
||||
|
||||
If `interim` is true, then a future `get_one` can replace this one.
|
||||
|
||||
"""
|
||||
if fileobj is not None:
|
||||
# Make DebugOutputFile around the fileobj passed.
|
||||
return cls(fileobj, filters)
|
||||
|
||||
the_one, is_interim = cls._get_singleton_data()
|
||||
if the_one is None or is_interim:
|
||||
if file_name is not None:
|
||||
fileobj = open(file_name, "a", encoding="utf-8")
|
||||
else:
|
||||
# $set_env.py: COVERAGE_DEBUG_FILE - Where to write debug output
|
||||
file_name = os.getenv("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
|
||||
if file_name in ["stdout", "stderr"]:
|
||||
fileobj = getattr(sys, file_name)
|
||||
elif file_name:
|
||||
fileobj = open(file_name, "a", encoding="utf-8")
|
||||
atexit.register(fileobj.close)
|
||||
else:
|
||||
fileobj = sys.stderr
|
||||
the_one = cls(fileobj, filters)
|
||||
cls._set_singleton_data(the_one, interim)
|
||||
|
||||
if not (the_one.filters):
|
||||
the_one.filters = list(filters)
|
||||
return the_one
|
||||
|
||||
# Because of the way igor.py deletes and re-imports modules,
|
||||
# this class can be defined more than once. But we really want
|
||||
# a process-wide singleton. So stash it in sys.modules instead of
|
||||
# on a class attribute. Yes, this is aggressively gross.
|
||||
|
||||
SYS_MOD_NAME: Final[str] = "$coverage.debug.DebugOutputFile.the_one"
|
||||
SINGLETON_ATTR: Final[str] = "the_one_and_is_interim"
|
||||
|
||||
@classmethod
|
||||
def _set_singleton_data(cls, the_one: DebugOutputFile, interim: bool) -> None:
|
||||
"""Set the one DebugOutputFile to rule them all."""
|
||||
singleton_module = types.ModuleType(cls.SYS_MOD_NAME)
|
||||
setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim))
|
||||
sys.modules[cls.SYS_MOD_NAME] = singleton_module
|
||||
|
||||
@classmethod
|
||||
def _get_singleton_data(cls) -> tuple[DebugOutputFile | None, bool]:
|
||||
"""Get the one DebugOutputFile."""
|
||||
singleton_module = sys.modules.get(cls.SYS_MOD_NAME)
|
||||
return getattr(singleton_module, cls.SINGLETON_ATTR, (None, True))
|
||||
|
||||
@classmethod
|
||||
def _del_singleton_data(cls) -> None:
|
||||
"""Delete the one DebugOutputFile, just for tests to use."""
|
||||
if cls.SYS_MOD_NAME in sys.modules:
|
||||
del sys.modules[cls.SYS_MOD_NAME]
|
||||
|
||||
def write(self, text: str) -> None:
|
||||
"""Just like file.write, but filter through all our filters."""
|
||||
assert self.outfile is not None
|
||||
if not self.outfile.closed:
|
||||
self.outfile.write(filter_text(text, self.filters))
|
||||
self.outfile.flush()
|
||||
|
||||
def flush(self) -> None:
|
||||
"""Flush our file."""
|
||||
assert self.outfile is not None
|
||||
if not self.outfile.closed:
|
||||
self.outfile.flush()
|
||||
|
||||
|
||||
def log(msg: str, stack: bool = False) -> None: # pragma: debugging
|
||||
"""Write a log message as forcefully as possible."""
|
||||
out = DebugOutputFile.get_one(interim=True)
|
||||
out.write(msg + "\n")
|
||||
if stack:
|
||||
dump_stack_frames(out=out, skip=1)
|
||||
|
||||
|
||||
def decorate_methods(
|
||||
decorator: Callable[..., Any],
|
||||
butnot: Iterable[str] = (),
|
||||
private: bool = False,
|
||||
) -> Callable[..., Any]: # pragma: debugging
|
||||
"""A class decorator to apply a decorator to methods."""
|
||||
|
||||
def _decorator(cls): # type: ignore[no-untyped-def]
|
||||
for name, meth in inspect.getmembers(cls, inspect.isroutine):
|
||||
if name not in cls.__dict__:
|
||||
continue
|
||||
if name != "__init__":
|
||||
if not private and name.startswith("_"):
|
||||
continue
|
||||
if name in butnot:
|
||||
continue
|
||||
setattr(cls, name, decorator(meth))
|
||||
return cls
|
||||
|
||||
return _decorator
|
||||
|
||||
|
||||
def break_in_pudb(func: AnyCallable) -> AnyCallable: # pragma: debugging
|
||||
"""A function decorator to stop in the debugger for each call."""
|
||||
|
||||
@functools.wraps(func)
|
||||
def _wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
import pudb
|
||||
|
||||
sys.stdout = sys.__stdout__
|
||||
pudb.set_trace()
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return _wrapper
|
||||
|
||||
|
||||
OBJ_IDS = itertools.count()
|
||||
CALLS = itertools.count()
|
||||
OBJ_ID_ATTR = "$coverage.object_id"
|
||||
|
||||
|
||||
def show_calls(
|
||||
show_args: bool = True,
|
||||
show_stack: bool = False,
|
||||
show_return: bool = False,
|
||||
) -> Callable[..., Any]: # pragma: debugging
|
||||
"""A method decorator to debug-log each call to the function."""
|
||||
|
||||
def _decorator(func: AnyCallable) -> AnyCallable:
|
||||
@functools.wraps(func)
|
||||
def _wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
oid = getattr(self, OBJ_ID_ATTR, None)
|
||||
if oid is None:
|
||||
oid = f"{os.getpid():08d} {next(OBJ_IDS):04d}"
|
||||
setattr(self, OBJ_ID_ATTR, oid)
|
||||
extra = ""
|
||||
if show_args:
|
||||
eargs = ", ".join(map(repr, args))
|
||||
ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items())
|
||||
extra += "("
|
||||
extra += eargs
|
||||
if eargs and ekwargs:
|
||||
extra += ", "
|
||||
extra += ekwargs
|
||||
extra += ")"
|
||||
if show_stack:
|
||||
extra += " @ "
|
||||
extra += "; ".join(short_stack(short_filenames=True).splitlines())
|
||||
callid = next(CALLS)
|
||||
msg = f"{oid} {callid:04d} {func.__name__}{extra}\n"
|
||||
DebugOutputFile.get_one(interim=True).write(msg)
|
||||
ret = func(self, *args, **kwargs)
|
||||
if show_return:
|
||||
msg = f"{oid} {callid:04d} {func.__name__} return {ret!r}\n"
|
||||
DebugOutputFile.get_one(interim=True).write(msg)
|
||||
return ret
|
||||
|
||||
return _wrapper
|
||||
|
||||
return _decorator
|
||||
|
||||
|
||||
def relevant_environment_display(env: Mapping[str, str]) -> list[tuple[str, str]]:
|
||||
"""Filter environment variables for a debug display.
|
||||
|
||||
Select variables to display (with COV or PY in the name, or HOME, TEMP, or
|
||||
TMP), and also cloak sensitive values with asterisks.
|
||||
|
||||
Arguments:
|
||||
env: a dict of environment variable names and values.
|
||||
|
||||
Returns:
|
||||
A list of pairs (name, value) to show.
|
||||
|
||||
"""
|
||||
SLUGS = {"COV", "PY"}
|
||||
INCLUDE = {"HOME", "TEMP", "TMP"}
|
||||
CLOAK = {"API", "TOKEN", "KEY", "SECRET", "PASS", "SIGNATURE"}
|
||||
TRUNCATE = {"COVERAGE_PROCESS_CONFIG"}
|
||||
TRUNCATE_LEN = 60
|
||||
|
||||
to_show = []
|
||||
for name, val in env.items():
|
||||
show = False
|
||||
if name in INCLUDE:
|
||||
show = True
|
||||
elif any(slug in name for slug in SLUGS):
|
||||
show = True
|
||||
if show:
|
||||
if any(slug in name for slug in CLOAK):
|
||||
val = re.sub(r"\w", "*", val)
|
||||
if name in TRUNCATE:
|
||||
if len(val) > TRUNCATE_LEN:
|
||||
val = val[: TRUNCATE_LEN - 3] + "..."
|
||||
to_show.append((name, val))
|
||||
return human_sorted_items(to_show)
|
||||
59
venv/lib/python3.12/site-packages/coverage/disposition.py
Normal file
59
venv/lib/python3.12/site-packages/coverage/disposition.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Simple value objects for tracking what to do with files."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from coverage.types import TFileDisposition
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.plugin import FileTracer
|
||||
|
||||
|
||||
class FileDisposition:
|
||||
"""A simple value type for recording what to do with a file."""
|
||||
|
||||
original_filename: str
|
||||
canonical_filename: str
|
||||
source_filename: str | None
|
||||
trace: bool
|
||||
reason: str
|
||||
file_tracer: FileTracer | None
|
||||
has_dynamic_filename: bool
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<FileDisposition {self.canonical_filename!r}: trace={self.trace}>"
|
||||
|
||||
|
||||
# FileDisposition "methods": FileDisposition is a pure value object, so it can
|
||||
# be implemented in either C or Python. Acting on them is done with these
|
||||
# functions.
|
||||
|
||||
|
||||
def disposition_init(cls: type[TFileDisposition], original_filename: str) -> TFileDisposition:
|
||||
"""Construct and initialize a new FileDisposition object."""
|
||||
disp = cls()
|
||||
disp.original_filename = original_filename
|
||||
disp.canonical_filename = original_filename
|
||||
disp.source_filename = None
|
||||
disp.trace = False
|
||||
disp.reason = ""
|
||||
disp.file_tracer = None
|
||||
disp.has_dynamic_filename = False
|
||||
return disp
|
||||
|
||||
|
||||
def disposition_debug_msg(disp: TFileDisposition) -> str:
|
||||
"""Make a nice debug message of what the FileDisposition is doing."""
|
||||
if disp.trace:
|
||||
msg = f"Tracing {disp.original_filename!r}"
|
||||
if disp.original_filename != disp.source_filename:
|
||||
msg += f" as {disp.source_filename!r}"
|
||||
if disp.file_tracer:
|
||||
msg += f": will be traced by {disp.file_tracer!r}"
|
||||
else:
|
||||
msg = f"Not tracing {disp.original_filename!r}: {disp.reason}"
|
||||
return msg
|
||||
198
venv/lib/python3.12/site-packages/coverage/env.py
Normal file
198
venv/lib/python3.12/site-packages/coverage/env.py
Normal file
@@ -0,0 +1,198 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Determine facts about the environment."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, Final
|
||||
|
||||
# debug_info() at the bottom wants to show all the globals, but not imports.
|
||||
# Grab the global names here to know which names to not show. Nothing defined
|
||||
# above this line will be in the output.
|
||||
_UNINTERESTING_GLOBALS = list(globals())
|
||||
# These names also shouldn't be shown.
|
||||
_UNINTERESTING_GLOBALS += ["PYBEHAVIOR", "debug_info"]
|
||||
|
||||
# Operating systems.
|
||||
WINDOWS = sys.platform == "win32"
|
||||
LINUX = sys.platform.startswith("linux")
|
||||
MACOS = sys.platform == "darwin"
|
||||
|
||||
# Python implementations.
|
||||
CPYTHON = (platform.python_implementation() == "CPython") # fmt: skip
|
||||
PYPY = (platform.python_implementation() == "PyPy") # fmt: skip
|
||||
|
||||
# Python versions. We amend version_info with one more value, a zero if an
|
||||
# official version, or 1 if built from source beyond an official version.
|
||||
# Only use sys.version_info directly where tools like mypy need it to understand
|
||||
# version-specfic code, otherwise use PYVERSION.
|
||||
PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),)
|
||||
|
||||
if PYPY:
|
||||
# Minimum now is 7.3.16
|
||||
PYPYVERSION = tuple(sys.pypy_version_info) # type: ignore[attr-defined]
|
||||
else:
|
||||
PYPYVERSION = (0,)
|
||||
|
||||
# Do we have a GIL?
|
||||
GIL = getattr(sys, "_is_gil_enabled", lambda: True)()
|
||||
|
||||
# Do we ship compiled coveragepy wheels for this version?
|
||||
SHIPPING_WHEELS = CPYTHON and PYVERSION[:2] <= (3, 13)
|
||||
|
||||
# Should we default to sys.monitoring?
|
||||
SYSMON_DEFAULT = CPYTHON and PYVERSION >= (3, 14)
|
||||
|
||||
|
||||
# Python behavior.
|
||||
class PYBEHAVIOR:
|
||||
"""Flags indicating this Python's behavior."""
|
||||
|
||||
# Does Python conform to PEP626, Precise line numbers for debugging and other tools.
|
||||
# https://www.python.org/dev/peps/pep-0626
|
||||
pep626 = (PYVERSION > (3, 10, 0, "alpha", 4)) # fmt: skip
|
||||
|
||||
# Is "if __debug__" optimized away?
|
||||
optimize_if_debug = not pep626
|
||||
|
||||
# Is "if not __debug__" optimized away? The exact details have changed
|
||||
# across versions.
|
||||
optimize_if_not_debug = 1 if pep626 else 2
|
||||
|
||||
# 3.7 changed how functions with only docstrings are numbered.
|
||||
docstring_only_function = (not PYPY) and (PYVERSION <= (3, 10))
|
||||
|
||||
# Lines after break/continue/return/raise are no longer compiled into the
|
||||
# bytecode. They used to be marked as missing, now they aren't executable.
|
||||
omit_after_jump = pep626 or PYPY
|
||||
|
||||
# PyPy has always omitted statements after return.
|
||||
omit_after_return = omit_after_jump or PYPY
|
||||
|
||||
# Optimize away unreachable try-else clauses.
|
||||
optimize_unreachable_try_else = pep626
|
||||
|
||||
# Modules used to have firstlineno equal to the line number of the first
|
||||
# real line of code. Now they always start at 1.
|
||||
module_firstline_1 = pep626
|
||||
|
||||
# Are "if 0:" lines (and similar) kept in the compiled code?
|
||||
keep_constant_test = pep626
|
||||
|
||||
# When leaving a with-block, do we visit the with-line again for the exit?
|
||||
# For example, wwith.py:
|
||||
#
|
||||
# with open("/tmp/test", "w") as f1:
|
||||
# a = 2
|
||||
# with open("/tmp/test2", "w") as f3:
|
||||
# print(4)
|
||||
#
|
||||
# % python3.9 -m trace -t wwith.py | grep wwith
|
||||
# --- modulename: wwith, funcname: <module>
|
||||
# wwith.py(1): with open("/tmp/test", "w") as f1:
|
||||
# wwith.py(2): a = 2
|
||||
# wwith.py(3): with open("/tmp/test2", "w") as f3:
|
||||
# wwith.py(4): print(4)
|
||||
#
|
||||
# % python3.10 -m trace -t wwith.py | grep wwith
|
||||
# --- modulename: wwith, funcname: <module>
|
||||
# wwith.py(1): with open("/tmp/test", "w") as f1:
|
||||
# wwith.py(2): a = 2
|
||||
# wwith.py(3): with open("/tmp/test2", "w") as f3:
|
||||
# wwith.py(4): print(4)
|
||||
# wwith.py(3): with open("/tmp/test2", "w") as f3:
|
||||
# wwith.py(1): with open("/tmp/test", "w") as f1:
|
||||
#
|
||||
exit_through_with = (PYVERSION >= (3, 10, 0, "beta")) # fmt: skip
|
||||
|
||||
# When leaving a with-block, do we visit the with-line exactly,
|
||||
# or the context managers in inner-out order?
|
||||
#
|
||||
# mwith.py:
|
||||
# with (
|
||||
# open("/tmp/one", "w") as f2,
|
||||
# open("/tmp/two", "w") as f3,
|
||||
# open("/tmp/three", "w") as f4,
|
||||
# ):
|
||||
# print("hello 6")
|
||||
#
|
||||
# % python3.11 -m trace -t mwith.py | grep mwith
|
||||
# --- modulename: mwith, funcname: <module>
|
||||
# mwith.py(2): open("/tmp/one", "w") as f2,
|
||||
# mwith.py(1): with (
|
||||
# mwith.py(2): open("/tmp/one", "w") as f2,
|
||||
# mwith.py(3): open("/tmp/two", "w") as f3,
|
||||
# mwith.py(1): with (
|
||||
# mwith.py(3): open("/tmp/two", "w") as f3,
|
||||
# mwith.py(4): open("/tmp/three", "w") as f4,
|
||||
# mwith.py(1): with (
|
||||
# mwith.py(4): open("/tmp/three", "w") as f4,
|
||||
# mwith.py(6): print("hello 6")
|
||||
# mwith.py(1): with (
|
||||
#
|
||||
# % python3.12 -m trace -t mwith.py | grep mwith
|
||||
# --- modulename: mwith, funcname: <module>
|
||||
# mwith.py(2): open("/tmp/one", "w") as f2,
|
||||
# mwith.py(3): open("/tmp/two", "w") as f3,
|
||||
# mwith.py(4): open("/tmp/three", "w") as f4,
|
||||
# mwith.py(6): print("hello 6")
|
||||
# mwith.py(4): open("/tmp/three", "w") as f4,
|
||||
# mwith.py(3): open("/tmp/two", "w") as f3,
|
||||
# mwith.py(2): open("/tmp/one", "w") as f2,
|
||||
|
||||
exit_with_through_ctxmgr = (PYVERSION >= (3, 12, 6)) # fmt: skip
|
||||
|
||||
# Match-case construct.
|
||||
match_case = (PYVERSION >= (3, 10)) # fmt: skip
|
||||
|
||||
# Some words are keywords in some places, identifiers in other places.
|
||||
soft_keywords = (PYVERSION >= (3, 10)) # fmt: skip
|
||||
|
||||
# f-strings are parsed as code, pep 701
|
||||
fstring_syntax = (PYVERSION >= (3, 12)) # fmt: skip
|
||||
|
||||
# PEP669 Low Impact Monitoring: https://peps.python.org/pep-0669/
|
||||
pep669: Final[bool] = bool(getattr(sys, "monitoring", None))
|
||||
|
||||
# Where does frame.f_lasti point when yielding from a generator?
|
||||
# It used to point at the YIELD, in 3.13 it points at the RESUME,
|
||||
# then it went back to the YIELD.
|
||||
# https://github.com/python/cpython/issues/113728
|
||||
lasti_is_yield = (PYVERSION[:2] != (3, 13)) # fmt: skip
|
||||
|
||||
# PEP649 and PEP749: Deferred annotations
|
||||
deferred_annotations = (PYVERSION >= (3, 14)) # fmt: skip
|
||||
|
||||
# Does sys.monitoring support BRANCH_RIGHT and BRANCH_LEFT? The names
|
||||
# were added in early 3.14 alphas, but didn't work entirely correctly until
|
||||
# after 3.14.0a5.
|
||||
branch_right_left = pep669 and (PYVERSION > (3, 14, 0, "alpha", 5, 0))
|
||||
|
||||
|
||||
# Coverage.py specifics, about testing scenarios. See tests/testenv.py also.
|
||||
|
||||
# Are we coverage-measuring ourselves?
|
||||
METACOV = os.getenv("COVERAGE_COVERAGE") is not None
|
||||
|
||||
# Are we running our test suite?
|
||||
# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
|
||||
# test-specific behavior like AST checking.
|
||||
TESTING = os.getenv("COVERAGE_TESTING") == "True"
|
||||
|
||||
|
||||
def debug_info() -> Iterable[tuple[str, Any]]:
|
||||
"""Return a list of (name, value) pairs for printing debug information."""
|
||||
info = [
|
||||
(name, value)
|
||||
for name, value in globals().items()
|
||||
if not name.startswith("_") and name not in _UNINTERESTING_GLOBALS
|
||||
]
|
||||
info += [
|
||||
(name, value) for name, value in PYBEHAVIOR.__dict__.items() if not name.startswith("_")
|
||||
]
|
||||
return sorted(info)
|
||||
80
venv/lib/python3.12/site-packages/coverage/exceptions.py
Normal file
80
venv/lib/python3.12/site-packages/coverage/exceptions.py
Normal file
@@ -0,0 +1,80 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Exceptions coverage.py can raise."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
class _BaseCoverageException(Exception):
|
||||
"""The base-base of all Coverage exceptions."""
|
||||
|
||||
def __init__(self, *args: Any, slug: str | None = None) -> None:
|
||||
super().__init__(*args)
|
||||
self.slug = slug
|
||||
|
||||
|
||||
class CoverageException(_BaseCoverageException):
|
||||
"""The base class of all exceptions raised by Coverage.py."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ConfigError(_BaseCoverageException):
|
||||
"""A problem with a config file, or a value in one."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class DataError(CoverageException):
|
||||
"""An error in using a data file."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NoDataError(CoverageException):
|
||||
"""We didn't have data to work with."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NoSource(CoverageException):
|
||||
"""We couldn't find the source for a module."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NoCode(NoSource):
|
||||
"""We couldn't find any code at all."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class NotPython(CoverageException):
|
||||
"""A source file turned out not to be parsable Python."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class PluginError(CoverageException):
|
||||
"""A plugin misbehaved."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class _ExceptionDuringRun(CoverageException):
|
||||
"""An exception happened while running customer code.
|
||||
|
||||
Construct it with three arguments, the values from `sys.exc_info`.
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CoverageWarning(Warning):
|
||||
"""A warning from Coverage.py."""
|
||||
|
||||
pass
|
||||
329
venv/lib/python3.12/site-packages/coverage/execfile.py
Normal file
329
venv/lib/python3.12/site-packages/coverage/execfile.py
Normal file
@@ -0,0 +1,329 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Execute files of Python code."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
import inspect
|
||||
import marshal
|
||||
import os
|
||||
import struct
|
||||
import sys
|
||||
from importlib.machinery import ModuleSpec
|
||||
from types import CodeType, ModuleType
|
||||
from typing import Any
|
||||
|
||||
from coverage.exceptions import CoverageException, NoCode, NoSource, _ExceptionDuringRun
|
||||
from coverage.files import canonical_filename, python_reported_file
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.python import get_python_source
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
|
||||
|
||||
|
||||
class DummyLoader:
|
||||
"""A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
|
||||
|
||||
Currently only implements the .fullname attribute
|
||||
"""
|
||||
|
||||
def __init__(self, fullname: str, *_args: Any) -> None:
|
||||
self.fullname = fullname
|
||||
|
||||
|
||||
def find_module(
|
||||
modulename: str,
|
||||
) -> tuple[str | None, str, ModuleSpec]:
|
||||
"""Find the module named `modulename`.
|
||||
|
||||
Returns the file path of the module, the name of the enclosing
|
||||
package, and the spec.
|
||||
"""
|
||||
try:
|
||||
spec = importlib.util.find_spec(modulename)
|
||||
except ImportError as err:
|
||||
raise NoSource(str(err)) from err
|
||||
if not spec:
|
||||
raise NoSource(f"No module named {modulename!r}")
|
||||
pathname = spec.origin
|
||||
packagename = spec.name
|
||||
if spec.submodule_search_locations:
|
||||
mod_main = modulename + ".__main__"
|
||||
spec = importlib.util.find_spec(mod_main)
|
||||
if not spec:
|
||||
raise NoSource(
|
||||
f"No module named {mod_main}; "
|
||||
+ f"{modulename!r} is a package and cannot be directly executed",
|
||||
)
|
||||
pathname = spec.origin
|
||||
packagename = spec.name
|
||||
packagename = packagename.rpartition(".")[0]
|
||||
return pathname, packagename, spec
|
||||
|
||||
|
||||
class PyRunner:
|
||||
"""Multi-stage execution of Python code.
|
||||
|
||||
This is meant to emulate real Python execution as closely as possible.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, args: list[str], as_module: bool = False) -> None:
|
||||
self.args = args
|
||||
self.as_module = as_module
|
||||
|
||||
self.arg0 = args[0]
|
||||
self.package: str | None = None
|
||||
self.modulename: str | None = None
|
||||
self.pathname: str | None = None
|
||||
self.loader: DummyLoader | None = None
|
||||
self.spec: ModuleSpec | None = None
|
||||
|
||||
def prepare(self) -> None:
|
||||
"""Set sys.path properly.
|
||||
|
||||
This needs to happen before any importing, and without importing anything.
|
||||
"""
|
||||
path0: str | None
|
||||
if getattr(sys.flags, "safe_path", False):
|
||||
# See https://docs.python.org/3/using/cmdline.html#cmdoption-P
|
||||
path0 = None
|
||||
elif self.as_module:
|
||||
path0 = os.getcwd()
|
||||
elif os.path.isdir(self.arg0):
|
||||
# Running a directory means running the __main__.py file in that
|
||||
# directory.
|
||||
path0 = self.arg0
|
||||
else:
|
||||
path0 = os.path.abspath(os.path.dirname(self.arg0))
|
||||
|
||||
if os.path.isdir(sys.path[0]):
|
||||
# sys.path fakery. If we are being run as a command, then sys.path[0]
|
||||
# is the directory of the "coverage" script. If this is so, replace
|
||||
# sys.path[0] with the directory of the file we're running, or the
|
||||
# current directory when running modules. If it isn't so, then we
|
||||
# don't know what's going on, and just leave it alone.
|
||||
top_file = inspect.stack()[-1][0].f_code.co_filename
|
||||
sys_path_0_abs = os.path.abspath(sys.path[0])
|
||||
top_file_dir_abs = os.path.abspath(os.path.dirname(top_file))
|
||||
sys_path_0_abs = canonical_filename(sys_path_0_abs)
|
||||
top_file_dir_abs = canonical_filename(top_file_dir_abs)
|
||||
if sys_path_0_abs != top_file_dir_abs:
|
||||
path0 = None
|
||||
|
||||
else:
|
||||
# sys.path[0] is a file. Is the next entry the directory containing
|
||||
# that file?
|
||||
if sys.path[1] == os.path.dirname(sys.path[0]):
|
||||
# Can it be right to always remove that?
|
||||
del sys.path[1]
|
||||
|
||||
if path0 is not None:
|
||||
sys.path[0] = python_reported_file(path0)
|
||||
|
||||
def _prepare2(self) -> None:
|
||||
"""Do more preparation to run Python code.
|
||||
|
||||
Includes finding the module to run and adjusting sys.argv[0].
|
||||
This method is allowed to import code.
|
||||
|
||||
"""
|
||||
if self.as_module:
|
||||
self.modulename = self.arg0
|
||||
pathname, self.package, self.spec = find_module(self.modulename)
|
||||
if self.spec is not None:
|
||||
self.modulename = self.spec.name
|
||||
self.loader = DummyLoader(self.modulename)
|
||||
assert pathname is not None
|
||||
self.pathname = os.path.abspath(pathname)
|
||||
self.args[0] = self.arg0 = self.pathname
|
||||
elif os.path.isdir(self.arg0):
|
||||
# Running a directory means running the __main__.py file in that
|
||||
# directory.
|
||||
for ext in [".py", ".pyc", ".pyo"]:
|
||||
try_filename = os.path.join(self.arg0, f"__main__{ext}")
|
||||
# 3.8.10 changed how files are reported when running a
|
||||
# directory.
|
||||
try_filename = os.path.abspath(try_filename)
|
||||
if os.path.exists(try_filename):
|
||||
self.arg0 = try_filename
|
||||
break
|
||||
else:
|
||||
raise NoSource(f"Can't find '__main__' module in '{self.arg0}'")
|
||||
|
||||
# Make a spec. I don't know if this is the right way to do it.
|
||||
try_filename = python_reported_file(try_filename)
|
||||
self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename)
|
||||
self.spec.has_location = True
|
||||
self.package = ""
|
||||
self.loader = DummyLoader("__main__")
|
||||
else:
|
||||
self.loader = DummyLoader("__main__")
|
||||
|
||||
self.arg0 = python_reported_file(self.arg0)
|
||||
|
||||
def run(self) -> None:
|
||||
"""Run the Python code!"""
|
||||
|
||||
self._prepare2()
|
||||
|
||||
# Create a module to serve as __main__
|
||||
main_mod = ModuleType("__main__")
|
||||
|
||||
from_pyc = self.arg0.endswith((".pyc", ".pyo"))
|
||||
main_mod.__file__ = self.arg0
|
||||
if from_pyc:
|
||||
main_mod.__file__ = main_mod.__file__[:-1]
|
||||
if self.package is not None:
|
||||
main_mod.__package__ = self.package
|
||||
main_mod.__loader__ = self.loader # type: ignore[assignment]
|
||||
if self.spec is not None:
|
||||
main_mod.__spec__ = self.spec
|
||||
|
||||
main_mod.__builtins__ = sys.modules["builtins"] # type: ignore[attr-defined]
|
||||
|
||||
sys.modules["__main__"] = main_mod
|
||||
|
||||
# Set sys.argv properly.
|
||||
sys.argv = self.args
|
||||
|
||||
try:
|
||||
# Make a code object somehow.
|
||||
if from_pyc:
|
||||
code = make_code_from_pyc(self.arg0)
|
||||
else:
|
||||
code = make_code_from_py(self.arg0)
|
||||
except CoverageException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
msg = f"Couldn't run '{self.arg0}' as Python code: {exc.__class__.__name__}: {exc}"
|
||||
raise CoverageException(msg) from exc
|
||||
|
||||
# Execute the code object.
|
||||
# Return to the original directory in case the test code exits in
|
||||
# a non-existent directory.
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
exec(code, main_mod.__dict__)
|
||||
except SystemExit: # pylint: disable=try-except-raise
|
||||
# The user called sys.exit(). Just pass it along to the upper
|
||||
# layers, where it will be handled.
|
||||
raise
|
||||
except Exception:
|
||||
# Something went wrong while executing the user code.
|
||||
# Get the exc_info, and pack them into an exception that we can
|
||||
# throw up to the outer loop. We peel one layer off the traceback
|
||||
# so that the coverage.py code doesn't appear in the final printed
|
||||
# traceback.
|
||||
typ, err, tb = sys.exc_info()
|
||||
assert typ is not None
|
||||
assert err is not None
|
||||
assert tb is not None
|
||||
|
||||
# PyPy3 weirdness. If I don't access __context__, then somehow it
|
||||
# is non-None when the exception is reported at the upper layer,
|
||||
# and a nested exception is shown to the user. This getattr fixes
|
||||
# it somehow? https://bitbucket.org/pypy/pypy/issue/1903
|
||||
getattr(err, "__context__", None)
|
||||
|
||||
# Call the excepthook.
|
||||
try:
|
||||
assert err.__traceback__ is not None
|
||||
err.__traceback__ = err.__traceback__.tb_next
|
||||
sys.excepthook(typ, err, tb.tb_next)
|
||||
except SystemExit: # pylint: disable=try-except-raise
|
||||
raise
|
||||
except Exception as exc:
|
||||
# Getting the output right in the case of excepthook
|
||||
# shenanigans is kind of involved.
|
||||
sys.stderr.write("Error in sys.excepthook:\n")
|
||||
typ2, err2, tb2 = sys.exc_info()
|
||||
assert typ2 is not None
|
||||
assert err2 is not None
|
||||
assert tb2 is not None
|
||||
err2.__suppress_context__ = True
|
||||
assert err2.__traceback__ is not None
|
||||
err2.__traceback__ = err2.__traceback__.tb_next
|
||||
sys.__excepthook__(typ2, err2, tb2.tb_next)
|
||||
sys.stderr.write("\nOriginal exception was:\n")
|
||||
raise _ExceptionDuringRun(typ, err, tb.tb_next) from exc
|
||||
else:
|
||||
sys.exit(1)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
|
||||
def run_python_module(args: list[str]) -> None:
|
||||
"""Run a Python module, as though with ``python -m name args...``.
|
||||
|
||||
`args` is the argument array to present as sys.argv, including the first
|
||||
element naming the module being executed.
|
||||
|
||||
This is a helper for tests, to encapsulate how to use PyRunner.
|
||||
|
||||
"""
|
||||
runner = PyRunner(args, as_module=True)
|
||||
runner.prepare()
|
||||
runner.run()
|
||||
|
||||
|
||||
def run_python_file(args: list[str]) -> None:
|
||||
"""Run a Python file as if it were the main program on the command line.
|
||||
|
||||
`args` is the argument array to present as sys.argv, including the first
|
||||
element naming the file being executed. `package` is the name of the
|
||||
enclosing package, if any.
|
||||
|
||||
This is a helper for tests, to encapsulate how to use PyRunner.
|
||||
|
||||
"""
|
||||
runner = PyRunner(args, as_module=False)
|
||||
runner.prepare()
|
||||
runner.run()
|
||||
|
||||
|
||||
def make_code_from_py(filename: str) -> CodeType:
|
||||
"""Get source from `filename` and make a code object of it."""
|
||||
try:
|
||||
source = get_python_source(filename)
|
||||
except (OSError, NoSource) as exc:
|
||||
raise NoSource(f"No file to run: '{filename}'") from exc
|
||||
|
||||
code = compile(source, filename, mode="exec", dont_inherit=True)
|
||||
return code
|
||||
|
||||
|
||||
def make_code_from_pyc(filename: str) -> CodeType:
|
||||
"""Get a code object from a .pyc file."""
|
||||
try:
|
||||
fpyc = open(filename, "rb")
|
||||
except OSError as exc:
|
||||
raise NoCode(f"No file to run: '{filename}'") from exc
|
||||
|
||||
with fpyc:
|
||||
# First four bytes are a version-specific magic number. It has to
|
||||
# match or we won't run the file.
|
||||
magic = fpyc.read(4)
|
||||
if magic != PYC_MAGIC_NUMBER:
|
||||
raise NoCode(f"Bad magic number in .pyc file: {magic!r} != {PYC_MAGIC_NUMBER!r}")
|
||||
|
||||
flags = struct.unpack("<L", fpyc.read(4))[0]
|
||||
hash_based = flags & 0x01
|
||||
if hash_based:
|
||||
fpyc.read(8) # Skip the hash.
|
||||
else:
|
||||
# Skip the junk in the header that we don't need.
|
||||
fpyc.read(4) # Skip the moddate.
|
||||
fpyc.read(4) # Skip the size.
|
||||
|
||||
# The rest of the file is the code object we want.
|
||||
code = marshal.load(fpyc)
|
||||
assert isinstance(code, CodeType)
|
||||
|
||||
return code
|
||||
553
venv/lib/python3.12/site-packages/coverage/files.py
Normal file
553
venv/lib/python3.12/site-packages/coverage/files.py
Normal file
@@ -0,0 +1,553 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""File wrangling."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import ntpath
|
||||
import os
|
||||
import os.path
|
||||
import posixpath
|
||||
import re
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from typing import Callable
|
||||
|
||||
from coverage import env
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import human_sorted, isolate_module, join_regex
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
RELATIVE_DIR: str = ""
|
||||
CANONICAL_FILENAME_CACHE: dict[str, str] = {}
|
||||
|
||||
|
||||
def set_relative_directory() -> None:
|
||||
"""Set the directory that `relative_filename` will be relative to."""
|
||||
global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
|
||||
|
||||
# The current directory
|
||||
abs_curdir = abs_file(os.curdir)
|
||||
if not abs_curdir.endswith(os.sep):
|
||||
# Suffix with separator only if not at the system root
|
||||
abs_curdir = abs_curdir + os.sep
|
||||
|
||||
# The absolute path to our current directory.
|
||||
RELATIVE_DIR = os.path.normcase(abs_curdir)
|
||||
|
||||
# Cache of results of calling the canonical_filename() method, to
|
||||
# avoid duplicating work.
|
||||
CANONICAL_FILENAME_CACHE = {}
|
||||
|
||||
|
||||
def relative_directory() -> str:
|
||||
"""Return the directory that `relative_filename` is relative to."""
|
||||
return RELATIVE_DIR
|
||||
|
||||
|
||||
def relative_filename(filename: str) -> str:
|
||||
"""Return the relative form of `filename`.
|
||||
|
||||
The file name will be relative to the current directory when the
|
||||
`set_relative_directory` was called.
|
||||
|
||||
"""
|
||||
fnorm = os.path.normcase(filename)
|
||||
if fnorm.startswith(RELATIVE_DIR):
|
||||
filename = filename[len(RELATIVE_DIR) :]
|
||||
return filename
|
||||
|
||||
|
||||
def canonical_filename(filename: str) -> str:
|
||||
"""Return a canonical file name for `filename`.
|
||||
|
||||
An absolute path with no redundant components and normalized case.
|
||||
|
||||
"""
|
||||
if filename not in CANONICAL_FILENAME_CACHE:
|
||||
cf = filename
|
||||
if not os.path.isabs(filename):
|
||||
for path in [os.curdir] + sys.path:
|
||||
if path is None:
|
||||
continue # type: ignore[unreachable]
|
||||
f = os.path.join(path, filename)
|
||||
try:
|
||||
exists = os.path.exists(f)
|
||||
except UnicodeError:
|
||||
exists = False
|
||||
if exists:
|
||||
cf = f
|
||||
break
|
||||
cf = abs_file(cf)
|
||||
CANONICAL_FILENAME_CACHE[filename] = cf
|
||||
return CANONICAL_FILENAME_CACHE[filename]
|
||||
|
||||
|
||||
def flat_rootname(filename: str) -> str:
|
||||
"""A base for a flat file name to correspond to this file.
|
||||
|
||||
Useful for writing files about the code where you want all the files in
|
||||
the same directory, but need to differentiate same-named files from
|
||||
different directories.
|
||||
|
||||
For example, the file a/b/c.py will return 'z_86bbcbe134d28fd2_c_py'
|
||||
|
||||
"""
|
||||
dirname, basename = ntpath.split(filename)
|
||||
if dirname:
|
||||
fp = hashlib.new(
|
||||
"sha3_256",
|
||||
dirname.encode("UTF-8"),
|
||||
usedforsecurity=False,
|
||||
).hexdigest()[:16]
|
||||
prefix = f"z_{fp}_"
|
||||
else:
|
||||
prefix = ""
|
||||
return prefix + basename.replace(".", "_")
|
||||
|
||||
|
||||
if env.WINDOWS:
|
||||
_ACTUAL_PATH_CACHE: dict[str, str] = {}
|
||||
_ACTUAL_PATH_LIST_CACHE: dict[str, list[str]] = {}
|
||||
|
||||
def actual_path(path: str) -> str:
|
||||
"""Get the actual path of `path`, including the correct case."""
|
||||
if path in _ACTUAL_PATH_CACHE:
|
||||
return _ACTUAL_PATH_CACHE[path]
|
||||
|
||||
head, tail = os.path.split(path)
|
||||
if not tail:
|
||||
# This means head is the drive spec: normalize it.
|
||||
actpath = head.upper()
|
||||
elif not head:
|
||||
actpath = tail
|
||||
else:
|
||||
head = actual_path(head)
|
||||
if head in _ACTUAL_PATH_LIST_CACHE:
|
||||
files = _ACTUAL_PATH_LIST_CACHE[head]
|
||||
else:
|
||||
try:
|
||||
files = os.listdir(head)
|
||||
except Exception:
|
||||
# This will raise OSError, or this bizarre TypeError:
|
||||
# https://bugs.python.org/issue1776160
|
||||
files = []
|
||||
_ACTUAL_PATH_LIST_CACHE[head] = files
|
||||
normtail = os.path.normcase(tail)
|
||||
for f in files:
|
||||
if os.path.normcase(f) == normtail:
|
||||
tail = f
|
||||
break
|
||||
actpath = os.path.join(head, tail)
|
||||
_ACTUAL_PATH_CACHE[path] = actpath
|
||||
return actpath
|
||||
|
||||
else:
|
||||
|
||||
def actual_path(path: str) -> str:
|
||||
"""The actual path for non-Windows platforms."""
|
||||
return path
|
||||
|
||||
|
||||
def abs_file(path: str) -> str:
|
||||
"""Return the absolute normalized form of `path`."""
|
||||
return actual_path(os.path.abspath(os.path.realpath(path)))
|
||||
|
||||
|
||||
def zip_location(filename: str) -> tuple[str, str] | None:
|
||||
"""Split a filename into a zipfile / inner name pair.
|
||||
|
||||
Only return a pair if the zipfile exists. No check is made if the inner
|
||||
name is in the zipfile.
|
||||
|
||||
"""
|
||||
for ext in [".zip", ".whl", ".egg", ".pex", ".par"]:
|
||||
zipbase, extension, inner = filename.partition(ext + sep(filename))
|
||||
if extension:
|
||||
zipfile = zipbase + ext
|
||||
if os.path.exists(zipfile):
|
||||
return zipfile, inner
|
||||
return None
|
||||
|
||||
|
||||
def source_exists(path: str) -> bool:
|
||||
"""Determine if a source file path exists."""
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
|
||||
if zip_location(path):
|
||||
# If zip_location returns anything, then it's a zipfile that
|
||||
# exists. That's good enough for us.
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def python_reported_file(filename: str) -> str:
|
||||
"""Return the string as Python would describe this file name."""
|
||||
return os.path.abspath(filename)
|
||||
|
||||
|
||||
def isabs_anywhere(filename: str) -> bool:
|
||||
"""Is `filename` an absolute path on any OS?"""
|
||||
return ntpath.isabs(filename) or posixpath.isabs(filename)
|
||||
|
||||
|
||||
def prep_patterns(patterns: Iterable[str]) -> list[str]:
|
||||
"""Prepare the file patterns for use in a `GlobMatcher`.
|
||||
|
||||
If a pattern starts with a wildcard, it is used as a pattern
|
||||
as-is. If it does not start with a wildcard, then it is made
|
||||
absolute with the current directory.
|
||||
|
||||
If `patterns` is None, an empty list is returned.
|
||||
|
||||
"""
|
||||
prepped = []
|
||||
for p in patterns or []:
|
||||
prepped.append(p)
|
||||
if not p.startswith(("*", "?")):
|
||||
prepped.append(abs_file(p))
|
||||
return prepped
|
||||
|
||||
|
||||
class TreeMatcher:
|
||||
"""A matcher for files in a tree.
|
||||
|
||||
Construct with a list of paths, either files or directories. Paths match
|
||||
with the `match` method if they are one of the files, or if they are
|
||||
somewhere in a subtree rooted at one of the directories.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, paths: Iterable[str], name: str = "unknown") -> None:
|
||||
self.original_paths: list[str] = human_sorted(paths)
|
||||
self.paths = [os.path.normcase(p) for p in paths]
|
||||
self.name = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<TreeMatcher {self.name} {self.original_paths!r}>"
|
||||
|
||||
def info(self) -> list[str]:
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.original_paths
|
||||
|
||||
def match(self, fpath: str) -> bool:
|
||||
"""Does `fpath` indicate a file in one of our trees?"""
|
||||
fpath = os.path.normcase(fpath)
|
||||
for p in self.paths:
|
||||
if fpath.startswith(p):
|
||||
if fpath == p:
|
||||
# This is the same file!
|
||||
return True
|
||||
if fpath[len(p)] == os.sep:
|
||||
# This is a file in the directory
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class ModuleMatcher:
|
||||
"""A matcher for modules in a tree."""
|
||||
|
||||
def __init__(self, module_names: Iterable[str], name: str = "unknown") -> None:
|
||||
self.modules = list(module_names)
|
||||
self.name = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<ModuleMatcher {self.name} {self.modules!r}>"
|
||||
|
||||
def info(self) -> list[str]:
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.modules
|
||||
|
||||
def match(self, module_name: str) -> bool:
|
||||
"""Does `module_name` indicate a module in one of our packages?"""
|
||||
if not module_name:
|
||||
return False
|
||||
|
||||
for m in self.modules:
|
||||
if module_name.startswith(m):
|
||||
if module_name == m:
|
||||
return True
|
||||
if module_name[len(m)] == ".":
|
||||
# This is a module in the package
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class GlobMatcher:
|
||||
"""A matcher for files by file name pattern."""
|
||||
|
||||
def __init__(self, pats: Iterable[str], name: str = "unknown") -> None:
|
||||
self.pats = list(pats)
|
||||
self.re = globs_to_regex(self.pats, case_insensitive=env.WINDOWS)
|
||||
self.name = name
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<GlobMatcher {self.name} {self.pats!r}>"
|
||||
|
||||
def info(self) -> list[str]:
|
||||
"""A list of strings for displaying when dumping state."""
|
||||
return self.pats
|
||||
|
||||
def match(self, fpath: str) -> bool:
|
||||
"""Does `fpath` match one of our file name patterns?"""
|
||||
return self.re.match(fpath) is not None
|
||||
|
||||
|
||||
def sep(s: str) -> str:
|
||||
"""Find the path separator used in this string, or os.sep if none."""
|
||||
if sep_match := re.search(r"[\\/]", s):
|
||||
the_sep = sep_match[0]
|
||||
else:
|
||||
the_sep = os.sep
|
||||
return the_sep
|
||||
|
||||
|
||||
# Tokenizer for _glob_to_regex.
|
||||
# None as a sub means disallowed.
|
||||
# fmt: off
|
||||
G2RX_TOKENS = [(re.compile(rx), sub) for rx, sub in [
|
||||
(r"\*\*\*+", None), # Can't have ***
|
||||
(r"[^/]+\*\*+", None), # Can't have x**
|
||||
(r"\*\*+[^/]+", None), # Can't have **x
|
||||
(r"\*\*/\*\*", None), # Can't have **/**
|
||||
(r"^\*+/", r"(.*[/\\\\])?"), # ^*/ matches any prefix-slash, or nothing.
|
||||
(r"/\*+$", r"[/\\\\].*"), # /*$ matches any slash-suffix.
|
||||
(r"\*\*/", r"(.*[/\\\\])?"), # **/ matches any subdirs, including none
|
||||
(r"/", r"[/\\\\]"), # / matches either slash or backslash
|
||||
(r"\*", r"[^/\\\\]*"), # * matches any number of non slash-likes
|
||||
(r"\?", r"[^/\\\\]"), # ? matches one non slash-like
|
||||
(r"\[.*?\]", r"\g<0>"), # [a-f] matches [a-f]
|
||||
(r"[a-zA-Z0-9_-]+", r"\g<0>"), # word chars match themselves
|
||||
(r"[\[\]]", None), # Can't have single square brackets
|
||||
(r".", r"\\\g<0>"), # Anything else is escaped to be safe
|
||||
]]
|
||||
# fmt: on
|
||||
|
||||
|
||||
def _glob_to_regex(pattern: str) -> str:
|
||||
"""Convert a file-path glob pattern into a regex."""
|
||||
# Turn all backslashes into slashes to simplify the tokenizer.
|
||||
pattern = pattern.replace("\\", "/")
|
||||
if "/" not in pattern:
|
||||
pattern = f"**/{pattern}"
|
||||
path_rx = []
|
||||
pos = 0
|
||||
while pos < len(pattern):
|
||||
for rx, sub in G2RX_TOKENS: # pragma: always breaks
|
||||
if m := rx.match(pattern, pos=pos):
|
||||
if sub is None:
|
||||
raise ConfigError(f"File pattern can't include {m[0]!r}")
|
||||
path_rx.append(m.expand(sub))
|
||||
pos = m.end()
|
||||
break
|
||||
return "".join(path_rx)
|
||||
|
||||
|
||||
def globs_to_regex(
|
||||
patterns: Iterable[str],
|
||||
case_insensitive: bool = False,
|
||||
partial: bool = False,
|
||||
) -> re.Pattern[str]:
|
||||
"""Convert glob patterns to a compiled regex that matches any of them.
|
||||
|
||||
Slashes are always converted to match either slash or backslash, for
|
||||
Windows support, even when running elsewhere.
|
||||
|
||||
If the pattern has no slash or backslash, then it is interpreted as
|
||||
matching a file name anywhere it appears in the tree. Otherwise, the glob
|
||||
pattern must match the whole file path.
|
||||
|
||||
If `partial` is true, then the pattern will match if the target string
|
||||
starts with the pattern. Otherwise, it must match the entire string.
|
||||
|
||||
Returns: a compiled regex object. Use the .match method to compare target
|
||||
strings.
|
||||
|
||||
"""
|
||||
flags = 0
|
||||
if case_insensitive:
|
||||
flags |= re.IGNORECASE
|
||||
rx = join_regex(map(_glob_to_regex, patterns))
|
||||
if not partial:
|
||||
rx = rf"(?:{rx})\Z"
|
||||
compiled = re.compile(rx, flags=flags)
|
||||
return compiled
|
||||
|
||||
|
||||
class PathAliases:
|
||||
"""A collection of aliases for paths.
|
||||
|
||||
When combining data files from remote machines, often the paths to source
|
||||
code are different, for example, due to OS differences, or because of
|
||||
serialized checkouts on continuous integration machines.
|
||||
|
||||
A `PathAliases` object tracks a list of pattern/result pairs, and can
|
||||
map a path through those aliases to produce a unified path.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
debugfn: Callable[[str], None] | None = None,
|
||||
relative: bool = False,
|
||||
) -> None:
|
||||
# A list of (original_pattern, regex, result)
|
||||
self.aliases: list[tuple[str, re.Pattern[str], str]] = []
|
||||
self.debugfn = debugfn or (lambda msg: 0)
|
||||
self.relative = relative
|
||||
self.pprinted = False
|
||||
|
||||
def pprint(self) -> None:
|
||||
"""Dump the important parts of the PathAliases, for debugging."""
|
||||
self.debugfn(f"Aliases (relative={self.relative}):")
|
||||
for original_pattern, regex, result in self.aliases:
|
||||
self.debugfn(f" Rule: {original_pattern!r} -> {result!r} using regex {regex.pattern!r}")
|
||||
|
||||
def add(self, pattern: str, result: str) -> None:
|
||||
"""Add the `pattern`/`result` pair to the list of aliases.
|
||||
|
||||
`pattern` is an `glob`-style pattern. `result` is a simple
|
||||
string. When mapping paths, if a path starts with a match against
|
||||
`pattern`, then that match is replaced with `result`. This models
|
||||
isomorphic source trees being rooted at different places on two
|
||||
different machines.
|
||||
|
||||
`pattern` can't end with a wildcard component, since that would
|
||||
match an entire tree, and not just its root.
|
||||
|
||||
"""
|
||||
original_pattern = pattern
|
||||
pattern_sep = sep(pattern)
|
||||
|
||||
if len(pattern) > 1:
|
||||
pattern = pattern.rstrip(r"\/")
|
||||
|
||||
# The pattern can't end with a wildcard component.
|
||||
if pattern.endswith("*"):
|
||||
raise ConfigError("Pattern must not end with wildcards.")
|
||||
|
||||
# The pattern is meant to match a file path. Let's make it absolute
|
||||
# unless it already is, or is meant to match any prefix.
|
||||
if not self.relative:
|
||||
if not pattern.startswith("*") and not isabs_anywhere(pattern + pattern_sep):
|
||||
pattern = abs_file(pattern)
|
||||
if not pattern.endswith(pattern_sep):
|
||||
pattern += pattern_sep
|
||||
|
||||
# Make a regex from the pattern.
|
||||
regex = globs_to_regex([pattern], case_insensitive=True, partial=True)
|
||||
|
||||
# Normalize the result: it must end with a path separator.
|
||||
result_sep = sep(result)
|
||||
result = result.rstrip(r"\/") + result_sep
|
||||
self.aliases.append((original_pattern, regex, result))
|
||||
|
||||
def map(self, path: str, exists: Callable[[str], bool] = source_exists) -> str:
|
||||
"""Map `path` through the aliases.
|
||||
|
||||
`path` is checked against all of the patterns. The first pattern to
|
||||
match is used to replace the root of the path with the result root.
|
||||
Only one pattern is ever used. If no patterns match, `path` is
|
||||
returned unchanged.
|
||||
|
||||
The separator style in the result is made to match that of the result
|
||||
in the alias.
|
||||
|
||||
`exists` is a function to determine if the resulting path actually
|
||||
exists.
|
||||
|
||||
Returns the mapped path. If a mapping has happened, this is a
|
||||
canonical path. If no mapping has happened, it is the original value
|
||||
of `path` unchanged.
|
||||
|
||||
"""
|
||||
if not self.pprinted:
|
||||
self.pprint()
|
||||
self.pprinted = True
|
||||
|
||||
for original_pattern, regex, result in self.aliases:
|
||||
if m := regex.match(path):
|
||||
new = path.replace(m[0], result)
|
||||
new = new.replace(sep(path), sep(result))
|
||||
if not self.relative:
|
||||
new = canonical_filename(new)
|
||||
dot_start = result.startswith(("./", ".\\")) and len(result) > 2
|
||||
if new.startswith(("./", ".\\")) and not dot_start:
|
||||
new = new[2:]
|
||||
if not exists(new):
|
||||
self.debugfn(
|
||||
f"Rule {original_pattern!r} changed {path!r} to {new!r} "
|
||||
+ "which doesn't exist, continuing",
|
||||
)
|
||||
continue
|
||||
self.debugfn(
|
||||
f"Matched path {path!r} to rule {original_pattern!r} -> {result!r}, "
|
||||
+ f"producing {new!r}",
|
||||
)
|
||||
return new
|
||||
|
||||
# If we get here, no pattern matched.
|
||||
|
||||
if self.relative:
|
||||
path = relative_filename(path)
|
||||
|
||||
if self.relative and not isabs_anywhere(path):
|
||||
# Auto-generate a pattern to implicitly match relative files
|
||||
parts = re.split(r"[/\\]", path)
|
||||
if len(parts) > 1:
|
||||
dir1 = parts[0]
|
||||
pattern = f"*/{dir1}"
|
||||
regex_pat = rf"^(.*[\\/])?{re.escape(dir1)}[\\/]"
|
||||
result = f"{dir1}{os.sep}"
|
||||
# Only add a new pattern if we don't already have this pattern.
|
||||
if not any(p == pattern for p, _, _ in self.aliases):
|
||||
self.debugfn(
|
||||
f"Generating rule: {pattern!r} -> {result!r} using regex {regex_pat!r}",
|
||||
)
|
||||
self.aliases.append((pattern, re.compile(regex_pat), result))
|
||||
return self.map(path, exists=exists)
|
||||
|
||||
self.debugfn(f"No rules match, path {path!r} is unchanged")
|
||||
return path
|
||||
|
||||
|
||||
def find_python_files(dirname: str, include_namespace_packages: bool) -> Iterable[str]:
|
||||
"""Yield all of the importable Python files in `dirname`, recursively.
|
||||
|
||||
To be importable, the files have to be in a directory with a __init__.py,
|
||||
except for `dirname` itself, which isn't required to have one. The
|
||||
assumption is that `dirname` was specified directly, so the user knows
|
||||
best, but sub-directories are checked for a __init__.py to be sure we only
|
||||
find the importable files.
|
||||
|
||||
If `include_namespace_packages` is True, then the check for __init__.py
|
||||
files is skipped.
|
||||
|
||||
Files with strange characters are skipped, since they couldn't have been
|
||||
imported, and are probably editor side-files.
|
||||
|
||||
"""
|
||||
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
|
||||
if not include_namespace_packages:
|
||||
if i > 0 and "__init__.py" not in filenames:
|
||||
# If a directory doesn't have __init__.py, then it isn't
|
||||
# importable and neither are its files
|
||||
del dirnames[:]
|
||||
continue
|
||||
for filename in filenames:
|
||||
# We're only interested in files that look like reasonable Python
|
||||
# files: Must end with .py or .pyw, and must not have certain funny
|
||||
# characters that probably mean they are editor junk.
|
||||
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
|
||||
yield os.path.join(dirpath, filename)
|
||||
|
||||
|
||||
# Globally set the relative directory.
|
||||
set_relative_directory()
|
||||
856
venv/lib/python3.12/site-packages/coverage/html.py
Normal file
856
venv/lib/python3.12/site-packages/coverage/html.py
Normal file
@@ -0,0 +1,856 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""HTML reporting for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import dataclasses
|
||||
import datetime
|
||||
import functools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import string
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import coverage
|
||||
from coverage.data import CoverageData, add_data_to_hash
|
||||
from coverage.exceptions import NoDataError
|
||||
from coverage.files import flat_rootname
|
||||
from coverage.misc import (
|
||||
Hasher,
|
||||
ensure_dir,
|
||||
file_be_gone,
|
||||
format_local_datetime,
|
||||
human_sorted,
|
||||
isolate_module,
|
||||
plural,
|
||||
stdout_link,
|
||||
)
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, AnalysisNarrower, Numbers
|
||||
from coverage.templite import Templite
|
||||
from coverage.types import TLineNo, TMorf
|
||||
from coverage.version import __url__
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
from coverage.plugins import FileReporter
|
||||
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
def data_filename(fname: str) -> str:
|
||||
"""Return the path to an "htmlfiles" data file of ours."""
|
||||
static_dir = os.path.join(os.path.dirname(__file__), "htmlfiles")
|
||||
static_filename = os.path.join(static_dir, fname)
|
||||
return static_filename
|
||||
|
||||
|
||||
def read_data(fname: str) -> str:
|
||||
"""Return the contents of a data file of ours."""
|
||||
with open(data_filename(fname), encoding="utf-8") as data_file:
|
||||
return data_file.read()
|
||||
|
||||
|
||||
def write_html(fname: str, html: str) -> None:
|
||||
"""Write `html` to `fname`, properly encoded."""
|
||||
html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
|
||||
with open(fname, "wb") as fout:
|
||||
fout.write(html.encode("ascii", "xmlcharrefreplace"))
|
||||
|
||||
|
||||
@dataclass
|
||||
class LineData:
|
||||
"""The data for each source line of HTML output."""
|
||||
|
||||
tokens: list[tuple[str, str]]
|
||||
number: TLineNo
|
||||
category: str
|
||||
contexts: list[str]
|
||||
contexts_label: str
|
||||
context_list: list[str]
|
||||
short_annotations: list[str]
|
||||
long_annotations: list[str]
|
||||
html: str = ""
|
||||
context_str: str | None = None
|
||||
annotate: str | None = None
|
||||
annotate_long: str | None = None
|
||||
css_class: str = ""
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileData:
|
||||
"""The data for each source file of HTML output."""
|
||||
|
||||
relative_filename: str
|
||||
nums: Numbers
|
||||
lines: list[LineData]
|
||||
|
||||
|
||||
@dataclass
|
||||
class IndexItem:
|
||||
"""Information for each index entry, to render an index page."""
|
||||
|
||||
url: str = ""
|
||||
file: str = ""
|
||||
description: str = ""
|
||||
nums: Numbers = field(default_factory=Numbers)
|
||||
|
||||
|
||||
@dataclass
|
||||
class IndexPage:
|
||||
"""Data for each index page."""
|
||||
|
||||
noun: str
|
||||
plural: str
|
||||
filename: str
|
||||
summaries: list[IndexItem]
|
||||
totals: Numbers
|
||||
skipped_covered_count: int
|
||||
skipped_empty_count: int
|
||||
|
||||
|
||||
class HtmlDataGeneration:
|
||||
"""Generate structured data to be turned into HTML reports."""
|
||||
|
||||
EMPTY = "(empty)"
|
||||
|
||||
def __init__(self, cov: Coverage) -> None:
|
||||
self.coverage = cov
|
||||
self.config = self.coverage.config
|
||||
self.data = self.coverage.get_data()
|
||||
self.has_arcs = self.data.has_arcs()
|
||||
if self.config.show_contexts:
|
||||
if self.data.measured_contexts() == {""}:
|
||||
self.coverage._warn("No contexts were measured")
|
||||
self.data.set_query_contexts(self.config.report_contexts)
|
||||
|
||||
def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData:
|
||||
"""Produce the data needed for one file's report."""
|
||||
if self.has_arcs:
|
||||
missing_branch_arcs = analysis.missing_branch_arcs()
|
||||
arcs_executed = analysis.arcs_executed
|
||||
else:
|
||||
missing_branch_arcs = {}
|
||||
arcs_executed = []
|
||||
|
||||
if self.config.show_contexts:
|
||||
contexts_by_lineno = self.data.contexts_by_lineno(analysis.filename)
|
||||
|
||||
lines = []
|
||||
branch_stats = analysis.branch_stats()
|
||||
multiline_map = {}
|
||||
if hasattr(fr, "multiline_map"):
|
||||
multiline_map = fr.multiline_map()
|
||||
|
||||
for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
|
||||
# Figure out how to mark this line.
|
||||
category = category2 = ""
|
||||
short_annotations = []
|
||||
long_annotations = []
|
||||
|
||||
if lineno in analysis.excluded:
|
||||
category = "exc"
|
||||
elif lineno in analysis.missing:
|
||||
category = "mis"
|
||||
elif self.has_arcs and lineno in missing_branch_arcs:
|
||||
category = "par"
|
||||
mba = missing_branch_arcs[lineno]
|
||||
if len(mba) == branch_stats[lineno][0]:
|
||||
# None of the branches were taken from this line.
|
||||
short_annotations.append("anywhere")
|
||||
long_annotations.append(
|
||||
f"line {lineno} didn't jump anywhere: it always raised an exception."
|
||||
)
|
||||
else:
|
||||
for b in missing_branch_arcs[lineno]:
|
||||
if b < 0:
|
||||
short_annotations.append("exit")
|
||||
else:
|
||||
short_annotations.append(str(b))
|
||||
long_annotations.append(
|
||||
fr.missing_arc_description(lineno, b, arcs_executed)
|
||||
)
|
||||
elif lineno in analysis.statements:
|
||||
category = "run"
|
||||
elif first_line := multiline_map.get(lineno):
|
||||
if first_line in analysis.excluded:
|
||||
category2 = "exc2"
|
||||
elif first_line in analysis.missing:
|
||||
category2 = "mis2"
|
||||
elif self.has_arcs and first_line in missing_branch_arcs:
|
||||
category2 = "par2"
|
||||
# I don't understand why this last condition is marked as
|
||||
# partial. If I add an else with an exception, the exception
|
||||
# is raised.
|
||||
elif first_line in analysis.statements: # pragma: part covered
|
||||
category2 = "run2"
|
||||
|
||||
contexts = []
|
||||
contexts_label = ""
|
||||
context_list = []
|
||||
if category and self.config.show_contexts:
|
||||
contexts = human_sorted(c or self.EMPTY for c in contexts_by_lineno.get(lineno, ()))
|
||||
if contexts == [self.EMPTY]:
|
||||
contexts_label = self.EMPTY
|
||||
else:
|
||||
contexts_label = f"{len(contexts)} ctx"
|
||||
context_list = contexts
|
||||
|
||||
lines.append(
|
||||
LineData(
|
||||
tokens=tokens,
|
||||
number=lineno,
|
||||
category=category or category2,
|
||||
contexts=contexts,
|
||||
contexts_label=contexts_label,
|
||||
context_list=context_list,
|
||||
short_annotations=short_annotations,
|
||||
long_annotations=long_annotations,
|
||||
)
|
||||
)
|
||||
|
||||
file_data = FileData(
|
||||
relative_filename=fr.relative_filename(),
|
||||
nums=analysis.numbers,
|
||||
lines=lines,
|
||||
)
|
||||
|
||||
return file_data
|
||||
|
||||
|
||||
class FileToReport:
|
||||
"""A file we're considering reporting."""
|
||||
|
||||
def __init__(self, fr: FileReporter, analysis: Analysis) -> None:
|
||||
self.fr = fr
|
||||
self.analysis = analysis
|
||||
self.rootname = flat_rootname(fr.relative_filename())
|
||||
self.html_filename = self.rootname + ".html"
|
||||
self.prev_html = self.next_html = ""
|
||||
|
||||
|
||||
HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~"
|
||||
|
||||
|
||||
@functools.cache
|
||||
def encode_int(n: int) -> str:
|
||||
"""Create a short HTML-safe string from an integer, using HTML_SAFE."""
|
||||
if n == 0:
|
||||
return HTML_SAFE[0]
|
||||
|
||||
r = []
|
||||
while n:
|
||||
n, t = divmod(n, len(HTML_SAFE))
|
||||
r.append(HTML_SAFE[t])
|
||||
return "".join(r)
|
||||
|
||||
|
||||
def copy_with_cache_bust(src: str, dest_dir: str) -> str:
|
||||
"""Copy `src` to `dest_dir`, adding a hash to the name.
|
||||
|
||||
Returns the updated destination file name with hash.
|
||||
"""
|
||||
with open(src, "rb") as f:
|
||||
text = f.read()
|
||||
h = Hasher()
|
||||
h.update(text)
|
||||
cache_bust = h.hexdigest()[:8]
|
||||
src_base = os.path.basename(src)
|
||||
dest = src_base.replace(".", f"_cb_{cache_bust}.")
|
||||
with open(os.path.join(dest_dir, dest), "wb") as f:
|
||||
f.write(text)
|
||||
return dest
|
||||
|
||||
|
||||
class HtmlReporter:
|
||||
"""HTML reporting."""
|
||||
|
||||
# These files will be copied from the htmlfiles directory to the output
|
||||
# directory.
|
||||
STATIC_FILES = [
|
||||
"style.css",
|
||||
"coverage_html.js",
|
||||
"keybd_closed.png",
|
||||
"favicon_32.png",
|
||||
]
|
||||
|
||||
def __init__(self, cov: Coverage) -> None:
|
||||
self.coverage = cov
|
||||
self.config = self.coverage.config
|
||||
self.directory = self.config.html_dir
|
||||
|
||||
self.skip_covered = self.config.html_skip_covered
|
||||
if self.skip_covered is None:
|
||||
self.skip_covered = self.config.skip_covered
|
||||
self.skip_empty = self.config.html_skip_empty
|
||||
if self.skip_empty is None:
|
||||
self.skip_empty = self.config.skip_empty
|
||||
|
||||
title = self.config.html_title
|
||||
|
||||
self.extra_css = bool(self.config.extra_css)
|
||||
|
||||
self.data = self.coverage.get_data()
|
||||
self.has_arcs = self.data.has_arcs()
|
||||
|
||||
self.index_pages: dict[str, IndexPage] = {
|
||||
"file": self.new_index_page("file", "files"),
|
||||
}
|
||||
self.incr = IncrementalChecker(self.directory)
|
||||
self.datagen = HtmlDataGeneration(self.coverage)
|
||||
self.directory_was_empty = False
|
||||
self.first_fr = None
|
||||
self.final_fr = None
|
||||
|
||||
self.template_globals = {
|
||||
# Functions available in the templates.
|
||||
"escape": escape,
|
||||
"pair": pair,
|
||||
"len": len,
|
||||
# Constants for this report.
|
||||
"__url__": __url__,
|
||||
"__version__": coverage.__version__,
|
||||
"title": title,
|
||||
"time_stamp": format_local_datetime(datetime.datetime.now()),
|
||||
"extra_css": self.extra_css,
|
||||
"has_arcs": self.has_arcs,
|
||||
"show_contexts": self.config.show_contexts,
|
||||
"statics": {},
|
||||
# Constants for all reports.
|
||||
# These css classes determine which lines are highlighted by default.
|
||||
"category": {
|
||||
"exc": "exc show_exc",
|
||||
"mis": "mis show_mis",
|
||||
"par": "par run show_par",
|
||||
"run": "run",
|
||||
"exc2": "exc exc2 show_exc",
|
||||
"mis2": "mis mis2 show_mis",
|
||||
"par2": "par par2 ru2 show_par",
|
||||
"run2": "run run2",
|
||||
},
|
||||
}
|
||||
self.index_tmpl = Templite(read_data("index.html"), self.template_globals)
|
||||
self.pyfile_html_source = read_data("pyfile.html")
|
||||
self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
|
||||
|
||||
def new_index_page(self, noun: str, plural_noun: str) -> IndexPage:
|
||||
"""Create an IndexPage for a kind of region."""
|
||||
return IndexPage(
|
||||
noun=noun,
|
||||
plural=plural_noun,
|
||||
filename="index.html" if noun == "file" else f"{noun}_index.html",
|
||||
summaries=[],
|
||||
totals=Numbers(precision=self.config.precision),
|
||||
skipped_covered_count=0,
|
||||
skipped_empty_count=0,
|
||||
)
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None) -> float:
|
||||
"""Generate an HTML report for `morfs`.
|
||||
|
||||
`morfs` is a list of modules or file names.
|
||||
|
||||
"""
|
||||
# Read the status data and check that this run used the same
|
||||
# global data as the last run.
|
||||
self.incr.read()
|
||||
self.incr.check_global_data(self.config, self.pyfile_html_source)
|
||||
|
||||
# Process all the files. For each page we need to supply a link
|
||||
# to the next and previous page.
|
||||
files_to_report = []
|
||||
|
||||
have_data = False
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
have_data = True
|
||||
ftr = FileToReport(fr, analysis)
|
||||
if self.should_report(analysis, self.index_pages["file"]):
|
||||
files_to_report.append(ftr)
|
||||
else:
|
||||
file_be_gone(os.path.join(self.directory, ftr.html_filename))
|
||||
|
||||
if not have_data:
|
||||
raise NoDataError("No data to report.")
|
||||
|
||||
self.make_directory()
|
||||
self.make_local_static_report_files()
|
||||
|
||||
if files_to_report:
|
||||
for ftr1, ftr2 in zip(files_to_report[:-1], files_to_report[1:]):
|
||||
ftr1.next_html = ftr2.html_filename
|
||||
ftr2.prev_html = ftr1.html_filename
|
||||
files_to_report[0].prev_html = "index.html"
|
||||
files_to_report[-1].next_html = "index.html"
|
||||
|
||||
for ftr in files_to_report:
|
||||
self.write_html_page(ftr)
|
||||
for noun, plural_noun in ftr.fr.code_region_kinds():
|
||||
if noun not in self.index_pages:
|
||||
self.index_pages[noun] = self.new_index_page(noun, plural_noun)
|
||||
|
||||
# Write the index page.
|
||||
if files_to_report:
|
||||
first_html = files_to_report[0].html_filename
|
||||
final_html = files_to_report[-1].html_filename
|
||||
else:
|
||||
first_html = final_html = "index.html"
|
||||
self.write_file_index_page(first_html, final_html)
|
||||
|
||||
# Write function and class index pages.
|
||||
self.write_region_index_pages(files_to_report)
|
||||
|
||||
return (
|
||||
self.index_pages["file"].totals.n_statements
|
||||
and self.index_pages["file"].totals.pc_covered
|
||||
)
|
||||
|
||||
def make_directory(self) -> None:
|
||||
"""Make sure our htmlcov directory exists."""
|
||||
ensure_dir(self.directory)
|
||||
if not os.listdir(self.directory):
|
||||
self.directory_was_empty = True
|
||||
|
||||
def copy_static_file(self, src: str, slug: str = "") -> None:
|
||||
"""Copy a static file into the output directory with cache busting."""
|
||||
dest = copy_with_cache_bust(src, self.directory)
|
||||
if not slug:
|
||||
slug = os.path.basename(src).replace(".", "_")
|
||||
self.template_globals["statics"][slug] = dest # type: ignore
|
||||
|
||||
def make_local_static_report_files(self) -> None:
|
||||
"""Make local instances of static files for HTML report."""
|
||||
|
||||
# The files we provide must always be copied.
|
||||
for static in self.STATIC_FILES:
|
||||
self.copy_static_file(data_filename(static))
|
||||
|
||||
# The user may have extra CSS they want copied.
|
||||
if self.extra_css:
|
||||
assert self.config.extra_css is not None
|
||||
self.copy_static_file(self.config.extra_css, slug="extra_css")
|
||||
|
||||
# Only write the .gitignore file if the directory was originally empty.
|
||||
# .gitignore can't be copied from the source tree because if it was in
|
||||
# the source tree, it would stop the static files from being checked in.
|
||||
if self.directory_was_empty:
|
||||
with open(os.path.join(self.directory, ".gitignore"), "w", encoding="utf-8") as fgi:
|
||||
fgi.write("# Created by coverage.py\n*\n")
|
||||
|
||||
def should_report(self, analysis: Analysis, index_page: IndexPage) -> bool:
|
||||
"""Determine if we'll report this file or region."""
|
||||
# Get the numbers for this file.
|
||||
nums = analysis.numbers
|
||||
index_page.totals += nums
|
||||
|
||||
if self.skip_covered:
|
||||
# Don't report on 100% files.
|
||||
no_missing_lines = (nums.n_missing == 0) # fmt: skip
|
||||
no_missing_branches = (nums.n_partial_branches == 0) # fmt: skip
|
||||
if no_missing_lines and no_missing_branches:
|
||||
index_page.skipped_covered_count += 1
|
||||
return False
|
||||
|
||||
if self.skip_empty:
|
||||
# Don't report on empty files.
|
||||
if nums.n_statements == 0:
|
||||
index_page.skipped_empty_count += 1
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def write_html_page(self, ftr: FileToReport) -> None:
|
||||
"""Generate an HTML page for one source file.
|
||||
|
||||
If the page on disk is already correct based on our incremental status
|
||||
checking, then the page doesn't have to be generated, and this function
|
||||
only does page summary bookkeeping.
|
||||
|
||||
"""
|
||||
# Find out if the page on disk is already correct.
|
||||
if self.incr.can_skip_file(self.data, ftr.fr, ftr.rootname):
|
||||
self.index_pages["file"].summaries.append(self.incr.index_info(ftr.rootname))
|
||||
return
|
||||
|
||||
# Write the HTML page for this source file.
|
||||
file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis)
|
||||
|
||||
contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts)
|
||||
context_codes = {y: i for (i, y) in enumerate(x[0] for x in contexts.most_common())}
|
||||
if context_codes:
|
||||
contexts_json = json.dumps(
|
||||
{encode_int(v): k for (k, v) in context_codes.items()},
|
||||
indent=2,
|
||||
)
|
||||
else:
|
||||
contexts_json = None
|
||||
|
||||
for ldata in file_data.lines:
|
||||
# Build the HTML for the line.
|
||||
html_parts = []
|
||||
for tok_type, tok_text in ldata.tokens:
|
||||
if tok_type == "ws":
|
||||
html_parts.append(escape(tok_text))
|
||||
else:
|
||||
tok_html = escape(tok_text) or " "
|
||||
html_parts.append(f'<span class="{tok_type}">{tok_html}</span>')
|
||||
ldata.html = "".join(html_parts)
|
||||
if ldata.context_list:
|
||||
encoded_contexts = [
|
||||
encode_int(context_codes[c_context]) for c_context in ldata.context_list
|
||||
]
|
||||
code_width = max(len(ec) for ec in encoded_contexts)
|
||||
ldata.context_str = str(code_width) + "".join(
|
||||
ec.ljust(code_width) for ec in encoded_contexts
|
||||
)
|
||||
else:
|
||||
ldata.context_str = ""
|
||||
|
||||
if ldata.short_annotations:
|
||||
# 202F is NARROW NO-BREAK SPACE.
|
||||
# 219B is RIGHTWARDS ARROW WITH STROKE.
|
||||
ldata.annotate = ", ".join(
|
||||
f"{ldata.number} ↛ {d}" for d in ldata.short_annotations
|
||||
)
|
||||
else:
|
||||
ldata.annotate = None
|
||||
|
||||
if ldata.long_annotations:
|
||||
longs = ldata.long_annotations
|
||||
# A line can only have two branch destinations. If there were
|
||||
# two missing, we would have written one as "always raised."
|
||||
assert len(longs) == 1, (
|
||||
f"Had long annotations in {ftr.fr.relative_filename()}: {longs}"
|
||||
)
|
||||
ldata.annotate_long = longs[0]
|
||||
else:
|
||||
ldata.annotate_long = None
|
||||
|
||||
css_classes = []
|
||||
if ldata.category:
|
||||
css_classes.append(
|
||||
self.template_globals["category"][ldata.category], # type: ignore[index]
|
||||
)
|
||||
ldata.css_class = " ".join(css_classes) or "pln"
|
||||
|
||||
html_path = os.path.join(self.directory, ftr.html_filename)
|
||||
html = self.source_tmpl.render(
|
||||
{
|
||||
**file_data.__dict__,
|
||||
"contexts_json": contexts_json,
|
||||
"prev_html": ftr.prev_html,
|
||||
"next_html": ftr.next_html,
|
||||
}
|
||||
)
|
||||
write_html(html_path, html)
|
||||
|
||||
# Save this file's information for the index page.
|
||||
index_info = IndexItem(
|
||||
url=ftr.html_filename,
|
||||
file=escape(ftr.fr.relative_filename()),
|
||||
nums=ftr.analysis.numbers,
|
||||
)
|
||||
self.index_pages["file"].summaries.append(index_info)
|
||||
self.incr.set_index_info(ftr.rootname, index_info)
|
||||
|
||||
def write_file_index_page(self, first_html: str, final_html: str) -> None:
|
||||
"""Write the file index page for this report."""
|
||||
index_file = self.write_index_page(
|
||||
self.index_pages["file"],
|
||||
first_html=first_html,
|
||||
final_html=final_html,
|
||||
)
|
||||
|
||||
print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}")
|
||||
self.coverage._message(f"Wrote HTML report to {print_href}")
|
||||
|
||||
# Write the latest hashes for next time.
|
||||
self.incr.write()
|
||||
|
||||
def write_region_index_pages(self, files_to_report: Iterable[FileToReport]) -> None:
|
||||
"""Write the other index pages for this report."""
|
||||
for ftr in files_to_report:
|
||||
region_nouns = [pair[0] for pair in ftr.fr.code_region_kinds()]
|
||||
num_lines = len(ftr.fr.source().splitlines())
|
||||
regions = ftr.fr.code_regions()
|
||||
|
||||
for noun in region_nouns:
|
||||
page_data = self.index_pages[noun]
|
||||
|
||||
outside_lines = set(range(1, num_lines + 1))
|
||||
for region in regions:
|
||||
if region.kind != noun:
|
||||
continue
|
||||
outside_lines -= region.lines
|
||||
|
||||
narrower = AnalysisNarrower(ftr.analysis)
|
||||
narrower.add_regions(r.lines for r in regions if r.kind == noun)
|
||||
narrower.add_regions([outside_lines])
|
||||
|
||||
for region in regions:
|
||||
if region.kind != noun:
|
||||
continue
|
||||
analysis = narrower.narrow(region.lines)
|
||||
if not self.should_report(analysis, page_data):
|
||||
continue
|
||||
sorting_name = region.name.rpartition(".")[-1].lstrip("_")
|
||||
page_data.summaries.append(
|
||||
IndexItem(
|
||||
url=f"{ftr.html_filename}#t{region.start}",
|
||||
file=escape(ftr.fr.relative_filename()),
|
||||
description=(
|
||||
f"<data value='{escape(sorting_name)}'>"
|
||||
+ escape(region.name)
|
||||
+ "</data>"
|
||||
),
|
||||
nums=analysis.numbers,
|
||||
)
|
||||
)
|
||||
|
||||
analysis = narrower.narrow(outside_lines)
|
||||
if self.should_report(analysis, page_data):
|
||||
page_data.summaries.append(
|
||||
IndexItem(
|
||||
url=ftr.html_filename,
|
||||
file=escape(ftr.fr.relative_filename()),
|
||||
description=(
|
||||
"<data value=''>"
|
||||
+ f"<span class='no-noun'>(no {escape(noun)})</span>"
|
||||
+ "</data>"
|
||||
),
|
||||
nums=analysis.numbers,
|
||||
)
|
||||
)
|
||||
|
||||
for noun, index_page in self.index_pages.items():
|
||||
if noun != "file":
|
||||
self.write_index_page(index_page)
|
||||
|
||||
def write_index_page(self, index_page: IndexPage, **kwargs: str) -> str:
|
||||
"""Write an index page specified by `index_page`.
|
||||
|
||||
Returns the filename created.
|
||||
"""
|
||||
skipped_covered_msg = skipped_empty_msg = ""
|
||||
if n := index_page.skipped_covered_count:
|
||||
word = plural(n, index_page.noun, index_page.plural)
|
||||
skipped_covered_msg = f"{n} {word} skipped due to complete coverage."
|
||||
if n := index_page.skipped_empty_count:
|
||||
word = plural(n, index_page.noun, index_page.plural)
|
||||
skipped_empty_msg = f"{n} empty {word} skipped."
|
||||
|
||||
index_buttons = [
|
||||
{
|
||||
"label": ip.plural.title(),
|
||||
"url": ip.filename if ip.noun != index_page.noun else "",
|
||||
"current": ip.noun == index_page.noun,
|
||||
}
|
||||
for ip in self.index_pages.values()
|
||||
]
|
||||
render_data = {
|
||||
"regions": index_page.summaries,
|
||||
"totals": index_page.totals,
|
||||
"noun": index_page.noun,
|
||||
"region_noun": index_page.noun if index_page.noun != "file" else "",
|
||||
"skip_covered": self.skip_covered,
|
||||
"skipped_covered_msg": skipped_covered_msg,
|
||||
"skipped_empty_msg": skipped_empty_msg,
|
||||
"first_html": "",
|
||||
"final_html": "",
|
||||
"index_buttons": index_buttons,
|
||||
}
|
||||
render_data.update(kwargs)
|
||||
html = self.index_tmpl.render(render_data)
|
||||
|
||||
index_file = os.path.join(self.directory, index_page.filename)
|
||||
write_html(index_file, html)
|
||||
return index_file
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileInfo:
|
||||
"""Summary of the information from last rendering, to avoid duplicate work."""
|
||||
|
||||
hash: str = ""
|
||||
index: IndexItem = field(default_factory=IndexItem)
|
||||
|
||||
|
||||
class IncrementalChecker:
|
||||
"""Logic and data to support incremental reporting.
|
||||
|
||||
When generating an HTML report, often only a few of the source files have
|
||||
changed since the last time we made the HTML report. This means previously
|
||||
created HTML pages can be reused without generating them again, speeding
|
||||
the command.
|
||||
|
||||
This class manages a JSON data file that captures enough information to
|
||||
know whether an HTML page for a .py file needs to be regenerated or not.
|
||||
The data file also needs to store all the information needed to create the
|
||||
entry for the file on the index page so that if the HTML page is reused,
|
||||
the index page can still be created to refer to it.
|
||||
|
||||
The data looks like::
|
||||
|
||||
{
|
||||
"note": "This file is an internal implementation detail ...",
|
||||
// A fixed number indicating the data format. STATUS_FORMAT
|
||||
"format": 5,
|
||||
// The version of coverage.py
|
||||
"version": "7.4.4",
|
||||
// A hash of a number of global things, including the configuration
|
||||
// settings and the pyfile.html template itself.
|
||||
"globals": "540ee119c15d52a68a53fe6f0897346d",
|
||||
"files": {
|
||||
// An entry for each source file keyed by the flat_rootname().
|
||||
"z_7b071bdc2a35fa80___init___py": {
|
||||
// Hash of the source, the text of the .py file.
|
||||
"hash": "e45581a5b48f879f301c0f30bf77a50c",
|
||||
// Information for the index.html file.
|
||||
"index": {
|
||||
"url": "z_7b071bdc2a35fa80___init___py.html",
|
||||
"file": "cogapp/__init__.py",
|
||||
"description": "",
|
||||
// The Numbers for this file.
|
||||
"nums": { "precision": 2, "n_files": 1, "n_statements": 43, ... }
|
||||
}
|
||||
},
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
STATUS_FILE = "status.json"
|
||||
STATUS_FORMAT = 5
|
||||
NOTE = (
|
||||
"This file is an internal implementation detail to speed up HTML report"
|
||||
+ " generation. Its format can change at any time. You might be looking"
|
||||
+ " for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json"
|
||||
)
|
||||
|
||||
def __init__(self, directory: str) -> None:
|
||||
self.directory = directory
|
||||
self._reset()
|
||||
|
||||
def _reset(self) -> None:
|
||||
"""Initialize to empty. Causes all files to be reported."""
|
||||
self.globals = ""
|
||||
self.files: dict[str, FileInfo] = {}
|
||||
|
||||
def read(self) -> None:
|
||||
"""Read the information we stored last time."""
|
||||
try:
|
||||
status_file = os.path.join(self.directory, self.STATUS_FILE)
|
||||
with open(status_file, encoding="utf-8") as fstatus:
|
||||
status = json.load(fstatus)
|
||||
except (OSError, ValueError):
|
||||
# Status file is missing or malformed.
|
||||
usable = False
|
||||
else:
|
||||
if status["format"] != self.STATUS_FORMAT:
|
||||
usable = False
|
||||
elif status["version"] != coverage.__version__:
|
||||
usable = False
|
||||
else:
|
||||
usable = True
|
||||
|
||||
if usable:
|
||||
self.files = {}
|
||||
for filename, filedict in status["files"].items():
|
||||
indexdict = filedict["index"]
|
||||
index_item = IndexItem(**indexdict)
|
||||
index_item.nums = Numbers(**indexdict["nums"])
|
||||
fileinfo = FileInfo(
|
||||
hash=filedict["hash"],
|
||||
index=index_item,
|
||||
)
|
||||
self.files[filename] = fileinfo
|
||||
self.globals = status["globals"]
|
||||
else:
|
||||
self._reset()
|
||||
|
||||
def write(self) -> None:
|
||||
"""Write the current status."""
|
||||
status_file = os.path.join(self.directory, self.STATUS_FILE)
|
||||
status_data = {
|
||||
"note": self.NOTE,
|
||||
"format": self.STATUS_FORMAT,
|
||||
"version": coverage.__version__,
|
||||
"globals": self.globals,
|
||||
"files": {fname: dataclasses.asdict(finfo) for fname, finfo in self.files.items()},
|
||||
}
|
||||
with open(status_file, "w", encoding="utf-8") as fout:
|
||||
json.dump(status_data, fout, separators=(",", ":"))
|
||||
|
||||
def check_global_data(self, *data: Any) -> None:
|
||||
"""Check the global data that can affect incremental reporting.
|
||||
|
||||
Pass in whatever global information could affect the content of the
|
||||
HTML pages. If the global data has changed since last time, this will
|
||||
clear the data so that all files are regenerated.
|
||||
|
||||
"""
|
||||
m = Hasher()
|
||||
for d in data:
|
||||
m.update(d)
|
||||
these_globals = m.hexdigest()
|
||||
if self.globals != these_globals:
|
||||
self._reset()
|
||||
self.globals = these_globals
|
||||
|
||||
def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool:
|
||||
"""Can we skip reporting this file?
|
||||
|
||||
`data` is a CoverageData object, `fr` is a `FileReporter`, and
|
||||
`rootname` is the name being used for the file.
|
||||
|
||||
Returns True if the HTML page is fine as-is, False if we need to recreate
|
||||
the HTML page.
|
||||
|
||||
"""
|
||||
m = Hasher()
|
||||
m.update(fr.source().encode("utf-8"))
|
||||
add_data_to_hash(data, fr.filename, m)
|
||||
this_hash = m.hexdigest()
|
||||
|
||||
file_info = self.files.setdefault(rootname, FileInfo())
|
||||
|
||||
if this_hash == file_info.hash:
|
||||
# Nothing has changed to require the file to be reported again.
|
||||
return True
|
||||
else:
|
||||
# File has changed, record the latest hash and force regeneration.
|
||||
file_info.hash = this_hash
|
||||
return False
|
||||
|
||||
def index_info(self, fname: str) -> IndexItem:
|
||||
"""Get the information for index.html for `fname`."""
|
||||
return self.files.get(fname, FileInfo()).index
|
||||
|
||||
def set_index_info(self, fname: str, info: IndexItem) -> None:
|
||||
"""Set the information for index.html for `fname`."""
|
||||
self.files.setdefault(fname, FileInfo()).index = info
|
||||
|
||||
|
||||
# Helpers for templates and generating HTML
|
||||
|
||||
|
||||
def escape(t: str) -> str:
|
||||
"""HTML-escape the text in `t`.
|
||||
|
||||
This is only suitable for HTML text, not attributes.
|
||||
|
||||
"""
|
||||
# Convert HTML special chars into HTML entities.
|
||||
return t.replace("&", "&").replace("<", "<")
|
||||
|
||||
|
||||
def pair(ratio: tuple[int, int]) -> str:
|
||||
"""Format a pair of numbers so JavaScript can read them in an attribute."""
|
||||
return "{} {}".format(*ratio)
|
||||
@@ -0,0 +1,733 @@
|
||||
// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
// Coverage.py HTML report browser code.
|
||||
/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
|
||||
/*global coverage: true, document, window, $ */
|
||||
|
||||
coverage = {};
|
||||
|
||||
// General helpers
|
||||
function debounce(callback, wait) {
|
||||
let timeoutId = null;
|
||||
return function(...args) {
|
||||
clearTimeout(timeoutId);
|
||||
timeoutId = setTimeout(() => {
|
||||
callback.apply(this, args);
|
||||
}, wait);
|
||||
};
|
||||
};
|
||||
|
||||
function checkVisible(element) {
|
||||
const rect = element.getBoundingClientRect();
|
||||
const viewBottom = Math.max(document.documentElement.clientHeight, window.innerHeight);
|
||||
const viewTop = 30;
|
||||
return !(rect.bottom < viewTop || rect.top >= viewBottom);
|
||||
}
|
||||
|
||||
function on_click(sel, fn) {
|
||||
const elt = document.querySelector(sel);
|
||||
if (elt) {
|
||||
elt.addEventListener("click", fn);
|
||||
}
|
||||
}
|
||||
|
||||
// Helpers for table sorting
|
||||
function getCellValue(row, column = 0) {
|
||||
const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
|
||||
if (cell.childElementCount == 1) {
|
||||
var child = cell.firstElementChild;
|
||||
if (child.tagName === "A") {
|
||||
child = child.firstElementChild;
|
||||
}
|
||||
if (child instanceof HTMLDataElement && child.value) {
|
||||
return child.value;
|
||||
}
|
||||
}
|
||||
return cell.innerText || cell.textContent;
|
||||
}
|
||||
|
||||
function rowComparator(rowA, rowB, column = 0) {
|
||||
let valueA = getCellValue(rowA, column);
|
||||
let valueB = getCellValue(rowB, column);
|
||||
if (!isNaN(valueA) && !isNaN(valueB)) {
|
||||
return valueA - valueB;
|
||||
}
|
||||
return valueA.localeCompare(valueB, undefined, {numeric: true});
|
||||
}
|
||||
|
||||
function sortColumn(th) {
|
||||
// Get the current sorting direction of the selected header,
|
||||
// clear state on other headers and then set the new sorting direction.
|
||||
const currentSortOrder = th.getAttribute("aria-sort");
|
||||
[...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none"));
|
||||
var direction;
|
||||
if (currentSortOrder === "none") {
|
||||
direction = th.dataset.defaultSortOrder || "ascending";
|
||||
}
|
||||
else if (currentSortOrder === "ascending") {
|
||||
direction = "descending";
|
||||
}
|
||||
else {
|
||||
direction = "ascending";
|
||||
}
|
||||
th.setAttribute("aria-sort", direction);
|
||||
|
||||
const column = [...th.parentElement.cells].indexOf(th)
|
||||
|
||||
// Sort all rows and afterwards append them in order to move them in the DOM.
|
||||
Array.from(th.closest("table").querySelectorAll("tbody tr"))
|
||||
.sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (direction === "ascending" ? 1 : -1))
|
||||
.forEach(tr => tr.parentElement.appendChild(tr));
|
||||
|
||||
// Save the sort order for next time.
|
||||
if (th.id !== "region") {
|
||||
let th_id = "file"; // Sort by file if we don't have a column id
|
||||
let current_direction = direction;
|
||||
const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE);
|
||||
if (stored_list) {
|
||||
({th_id, direction} = JSON.parse(stored_list))
|
||||
}
|
||||
localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({
|
||||
"th_id": th.id,
|
||||
"direction": current_direction
|
||||
}));
|
||||
if (th.id !== th_id || document.getElementById("region")) {
|
||||
// Sort column has changed, unset sorting by function or class.
|
||||
localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({
|
||||
"by_region": false,
|
||||
"region_direction": current_direction
|
||||
}));
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Sort column has changed to by function or class, remember that.
|
||||
localStorage.setItem(coverage.SORTED_BY_REGION, JSON.stringify({
|
||||
"by_region": true,
|
||||
"region_direction": direction
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key.
|
||||
coverage.assign_shortkeys = function () {
|
||||
document.querySelectorAll("[data-shortcut]").forEach(element => {
|
||||
document.addEventListener("keypress", event => {
|
||||
if (event.target.tagName.toLowerCase() === "input") {
|
||||
return; // ignore keypress from search filter
|
||||
}
|
||||
if (event.key === element.dataset.shortcut) {
|
||||
element.click();
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
// Create the events for the filter box.
|
||||
coverage.wire_up_filter = function () {
|
||||
// Populate the filter and hide100 inputs if there are saved values for them.
|
||||
const saved_filter_value = localStorage.getItem(coverage.FILTER_STORAGE);
|
||||
if (saved_filter_value) {
|
||||
document.getElementById("filter").value = saved_filter_value;
|
||||
}
|
||||
const saved_hide100_value = localStorage.getItem(coverage.HIDE100_STORAGE);
|
||||
if (saved_hide100_value) {
|
||||
document.getElementById("hide100").checked = JSON.parse(saved_hide100_value);
|
||||
}
|
||||
|
||||
// Cache elements.
|
||||
const table = document.querySelector("table.index");
|
||||
const table_body_rows = table.querySelectorAll("tbody tr");
|
||||
const no_rows = document.getElementById("no_rows");
|
||||
|
||||
// Observe filter keyevents.
|
||||
const filter_handler = (event => {
|
||||
// Keep running total of each metric, first index contains number of shown rows
|
||||
const totals = new Array(table.rows[0].cells.length).fill(0);
|
||||
// Accumulate the percentage as fraction
|
||||
totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection
|
||||
|
||||
var text = document.getElementById("filter").value;
|
||||
// Store filter value
|
||||
localStorage.setItem(coverage.FILTER_STORAGE, text);
|
||||
const casefold = (text === text.toLowerCase());
|
||||
const hide100 = document.getElementById("hide100").checked;
|
||||
// Store hide value.
|
||||
localStorage.setItem(coverage.HIDE100_STORAGE, JSON.stringify(hide100));
|
||||
|
||||
// Hide / show elements.
|
||||
table_body_rows.forEach(row => {
|
||||
var show = false;
|
||||
// Check the text filter.
|
||||
for (let column = 0; column < totals.length; column++) {
|
||||
cell = row.cells[column];
|
||||
if (cell.classList.contains("name")) {
|
||||
var celltext = cell.textContent;
|
||||
if (casefold) {
|
||||
celltext = celltext.toLowerCase();
|
||||
}
|
||||
if (celltext.includes(text)) {
|
||||
show = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the "hide covered" filter.
|
||||
if (show && hide100) {
|
||||
const [numer, denom] = row.cells[row.cells.length - 1].dataset.ratio.split(" ");
|
||||
show = (numer !== denom);
|
||||
}
|
||||
|
||||
if (!show) {
|
||||
// hide
|
||||
row.classList.add("hidden");
|
||||
return;
|
||||
}
|
||||
|
||||
// show
|
||||
row.classList.remove("hidden");
|
||||
totals[0]++;
|
||||
|
||||
for (let column = 0; column < totals.length; column++) {
|
||||
// Accumulate dynamic totals
|
||||
cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
|
||||
if (cell.classList.contains("name")) {
|
||||
continue;
|
||||
}
|
||||
if (column === totals.length - 1) {
|
||||
// Last column contains percentage
|
||||
const [numer, denom] = cell.dataset.ratio.split(" ");
|
||||
totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection
|
||||
totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection
|
||||
}
|
||||
else {
|
||||
totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Show placeholder if no rows will be displayed.
|
||||
if (!totals[0]) {
|
||||
// Show placeholder, hide table.
|
||||
no_rows.style.display = "block";
|
||||
table.style.display = "none";
|
||||
return;
|
||||
}
|
||||
|
||||
// Hide placeholder, show table.
|
||||
no_rows.style.display = null;
|
||||
table.style.display = null;
|
||||
|
||||
const footer = table.tFoot.rows[0];
|
||||
// Calculate new dynamic sum values based on visible rows.
|
||||
for (let column = 0; column < totals.length; column++) {
|
||||
// Get footer cell element.
|
||||
const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection
|
||||
if (cell.classList.contains("name")) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Set value into dynamic footer cell element.
|
||||
if (column === totals.length - 1) {
|
||||
// Percentage column uses the numerator and denominator,
|
||||
// and adapts to the number of decimal places.
|
||||
const match = /\.([0-9]+)/.exec(cell.textContent);
|
||||
const places = match ? match[1].length : 0;
|
||||
const { numer, denom } = totals[column]; // nosemgrep: eslint.detect-object-injection
|
||||
cell.dataset.ratio = `${numer} ${denom}`;
|
||||
// Check denom to prevent NaN if filtered files contain no statements
|
||||
cell.textContent = denom
|
||||
? `${(numer * 100 / denom).toFixed(places)}%`
|
||||
: `${(100).toFixed(places)}%`;
|
||||
}
|
||||
else {
|
||||
cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
document.getElementById("filter").addEventListener("input", debounce(filter_handler));
|
||||
document.getElementById("hide100").addEventListener("input", debounce(filter_handler));
|
||||
|
||||
// Trigger change event on setup, to force filter on page refresh
|
||||
// (filter value may still be present).
|
||||
document.getElementById("filter").dispatchEvent(new Event("input"));
|
||||
document.getElementById("hide100").dispatchEvent(new Event("input"));
|
||||
};
|
||||
coverage.FILTER_STORAGE = "COVERAGE_FILTER_VALUE";
|
||||
coverage.HIDE100_STORAGE = "COVERAGE_HIDE100_VALUE";
|
||||
|
||||
// Set up the click-to-sort columns.
|
||||
coverage.wire_up_sorting = function () {
|
||||
document.querySelectorAll("[data-sortable] th[aria-sort]").forEach(
|
||||
th => th.addEventListener("click", e => sortColumn(e.target))
|
||||
);
|
||||
|
||||
// Look for a localStorage item containing previous sort settings:
|
||||
let th_id = "file", direction = "ascending";
|
||||
const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE);
|
||||
if (stored_list) {
|
||||
({th_id, direction} = JSON.parse(stored_list));
|
||||
}
|
||||
let by_region = false, region_direction = "ascending";
|
||||
const sorted_by_region = localStorage.getItem(coverage.SORTED_BY_REGION);
|
||||
if (sorted_by_region) {
|
||||
({
|
||||
by_region,
|
||||
region_direction
|
||||
} = JSON.parse(sorted_by_region));
|
||||
}
|
||||
|
||||
const region_id = "region";
|
||||
if (by_region && document.getElementById(region_id)) {
|
||||
direction = region_direction;
|
||||
}
|
||||
// If we are in a page that has a column with id of "region", sort on
|
||||
// it if the last sort was by function or class.
|
||||
let th;
|
||||
if (document.getElementById(region_id)) {
|
||||
th = document.getElementById(by_region ? region_id : th_id);
|
||||
}
|
||||
else {
|
||||
th = document.getElementById(th_id);
|
||||
}
|
||||
th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending");
|
||||
th.click()
|
||||
};
|
||||
|
||||
coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2";
|
||||
coverage.SORTED_BY_REGION = "COVERAGE_SORT_REGION";
|
||||
|
||||
// Loaded on index.html
|
||||
coverage.index_ready = function () {
|
||||
coverage.assign_shortkeys();
|
||||
coverage.wire_up_filter();
|
||||
coverage.wire_up_sorting();
|
||||
|
||||
on_click(".button_prev_file", coverage.to_prev_file);
|
||||
on_click(".button_next_file", coverage.to_next_file);
|
||||
|
||||
on_click(".button_show_hide_help", coverage.show_hide_help);
|
||||
};
|
||||
|
||||
// -- pyfile stuff --
|
||||
|
||||
coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
|
||||
|
||||
coverage.pyfile_ready = function () {
|
||||
// If we're directed to a particular line number, highlight the line.
|
||||
var frag = location.hash;
|
||||
if (frag.length > 2 && frag[1] === "t") {
|
||||
document.querySelector(frag).closest(".n").classList.add("highlight");
|
||||
coverage.set_sel(parseInt(frag.substr(2), 10));
|
||||
}
|
||||
else {
|
||||
coverage.set_sel(0);
|
||||
}
|
||||
|
||||
on_click(".button_toggle_run", coverage.toggle_lines);
|
||||
on_click(".button_toggle_mis", coverage.toggle_lines);
|
||||
on_click(".button_toggle_exc", coverage.toggle_lines);
|
||||
on_click(".button_toggle_par", coverage.toggle_lines);
|
||||
|
||||
on_click(".button_next_chunk", coverage.to_next_chunk_nicely);
|
||||
on_click(".button_prev_chunk", coverage.to_prev_chunk_nicely);
|
||||
on_click(".button_top_of_page", coverage.to_top);
|
||||
on_click(".button_first_chunk", coverage.to_first_chunk);
|
||||
|
||||
on_click(".button_prev_file", coverage.to_prev_file);
|
||||
on_click(".button_next_file", coverage.to_next_file);
|
||||
on_click(".button_to_index", coverage.to_index);
|
||||
|
||||
on_click(".button_show_hide_help", coverage.show_hide_help);
|
||||
|
||||
coverage.filters = undefined;
|
||||
try {
|
||||
coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE);
|
||||
} catch(err) {}
|
||||
|
||||
if (coverage.filters) {
|
||||
coverage.filters = JSON.parse(coverage.filters);
|
||||
}
|
||||
else {
|
||||
coverage.filters = {run: false, exc: true, mis: true, par: true};
|
||||
}
|
||||
|
||||
for (cls in coverage.filters) {
|
||||
coverage.set_line_visibilty(cls, coverage.filters[cls]); // nosemgrep: eslint.detect-object-injection
|
||||
}
|
||||
|
||||
coverage.assign_shortkeys();
|
||||
coverage.init_scroll_markers();
|
||||
coverage.wire_up_sticky_header();
|
||||
|
||||
document.querySelectorAll("[id^=ctxs]").forEach(
|
||||
cbox => cbox.addEventListener("click", coverage.expand_contexts)
|
||||
);
|
||||
|
||||
// Rebuild scroll markers when the window height changes.
|
||||
window.addEventListener("resize", coverage.build_scroll_markers);
|
||||
};
|
||||
|
||||
coverage.toggle_lines = function (event) {
|
||||
const btn = event.target.closest("button");
|
||||
const category = btn.value
|
||||
const show = !btn.classList.contains("show_" + category);
|
||||
coverage.set_line_visibilty(category, show);
|
||||
coverage.build_scroll_markers();
|
||||
coverage.filters[category] = show;
|
||||
try {
|
||||
localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters));
|
||||
} catch(err) {}
|
||||
};
|
||||
|
||||
coverage.set_line_visibilty = function (category, should_show) {
|
||||
const cls = "show_" + category;
|
||||
const btn = document.querySelector(".button_toggle_" + category);
|
||||
if (btn) {
|
||||
if (should_show) {
|
||||
document.querySelectorAll("#source ." + category).forEach(e => e.classList.add(cls));
|
||||
btn.classList.add(cls);
|
||||
}
|
||||
else {
|
||||
document.querySelectorAll("#source ." + category).forEach(e => e.classList.remove(cls));
|
||||
btn.classList.remove(cls);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Return the nth line div.
|
||||
coverage.line_elt = function (n) {
|
||||
return document.getElementById("t" + n)?.closest("p");
|
||||
};
|
||||
|
||||
// Set the selection. b and e are line numbers.
|
||||
coverage.set_sel = function (b, e) {
|
||||
// The first line selected.
|
||||
coverage.sel_begin = b;
|
||||
// The next line not selected.
|
||||
coverage.sel_end = (e === undefined) ? b+1 : e;
|
||||
};
|
||||
|
||||
coverage.to_top = function () {
|
||||
coverage.set_sel(0, 1);
|
||||
coverage.scroll_window(0);
|
||||
};
|
||||
|
||||
coverage.to_first_chunk = function () {
|
||||
coverage.set_sel(0, 1);
|
||||
coverage.to_next_chunk();
|
||||
};
|
||||
|
||||
coverage.to_prev_file = function () {
|
||||
window.location = document.getElementById("prevFileLink").href;
|
||||
}
|
||||
|
||||
coverage.to_next_file = function () {
|
||||
window.location = document.getElementById("nextFileLink").href;
|
||||
}
|
||||
|
||||
coverage.to_index = function () {
|
||||
location.href = document.getElementById("indexLink").href;
|
||||
}
|
||||
|
||||
coverage.show_hide_help = function () {
|
||||
const helpCheck = document.getElementById("help_panel_state")
|
||||
helpCheck.checked = !helpCheck.checked;
|
||||
}
|
||||
|
||||
// Return a string indicating what kind of chunk this line belongs to,
|
||||
// or null if not a chunk.
|
||||
coverage.chunk_indicator = function (line_elt) {
|
||||
const classes = line_elt?.className;
|
||||
if (!classes) {
|
||||
return null;
|
||||
}
|
||||
const match = classes.match(/\bshow_\w+\b/);
|
||||
if (!match) {
|
||||
return null;
|
||||
}
|
||||
return match[0];
|
||||
};
|
||||
|
||||
coverage.to_next_chunk = function () {
|
||||
const c = coverage;
|
||||
|
||||
// Find the start of the next colored chunk.
|
||||
var probe = c.sel_end;
|
||||
var chunk_indicator, probe_line;
|
||||
while (true) {
|
||||
probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
chunk_indicator = c.chunk_indicator(probe_line);
|
||||
if (chunk_indicator) {
|
||||
break;
|
||||
}
|
||||
probe++;
|
||||
}
|
||||
|
||||
// There's a next chunk, `probe` points to it.
|
||||
var begin = probe;
|
||||
|
||||
// Find the end of this chunk.
|
||||
var next_indicator = chunk_indicator;
|
||||
while (next_indicator === chunk_indicator) {
|
||||
probe++;
|
||||
probe_line = c.line_elt(probe);
|
||||
next_indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
c.set_sel(begin, probe);
|
||||
c.show_selection();
|
||||
};
|
||||
|
||||
coverage.to_prev_chunk = function () {
|
||||
const c = coverage;
|
||||
|
||||
// Find the end of the prev colored chunk.
|
||||
var probe = c.sel_begin-1;
|
||||
var probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
var chunk_indicator = c.chunk_indicator(probe_line);
|
||||
while (probe > 1 && !chunk_indicator) {
|
||||
probe--;
|
||||
probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
chunk_indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
|
||||
// There's a prev chunk, `probe` points to its last line.
|
||||
var end = probe+1;
|
||||
|
||||
// Find the beginning of this chunk.
|
||||
var prev_indicator = chunk_indicator;
|
||||
while (prev_indicator === chunk_indicator) {
|
||||
probe--;
|
||||
if (probe <= 0) {
|
||||
return;
|
||||
}
|
||||
probe_line = c.line_elt(probe);
|
||||
prev_indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
c.set_sel(probe+1, end);
|
||||
c.show_selection();
|
||||
};
|
||||
|
||||
// Returns 0, 1, or 2: how many of the two ends of the selection are on
|
||||
// the screen right now?
|
||||
coverage.selection_ends_on_screen = function () {
|
||||
if (coverage.sel_begin === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const begin = coverage.line_elt(coverage.sel_begin);
|
||||
const end = coverage.line_elt(coverage.sel_end-1);
|
||||
|
||||
return (
|
||||
(checkVisible(begin) ? 1 : 0)
|
||||
+ (checkVisible(end) ? 1 : 0)
|
||||
);
|
||||
};
|
||||
|
||||
coverage.to_next_chunk_nicely = function () {
|
||||
if (coverage.selection_ends_on_screen() === 0) {
|
||||
// The selection is entirely off the screen:
|
||||
// Set the top line on the screen as selection.
|
||||
|
||||
// This will select the top-left of the viewport
|
||||
// As this is most likely the span with the line number we take the parent
|
||||
const line = document.elementFromPoint(0, 0).parentElement;
|
||||
if (line.parentElement !== document.getElementById("source")) {
|
||||
// The element is not a source line but the header or similar
|
||||
coverage.select_line_or_chunk(1);
|
||||
}
|
||||
else {
|
||||
// We extract the line number from the id
|
||||
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
|
||||
}
|
||||
}
|
||||
coverage.to_next_chunk();
|
||||
};
|
||||
|
||||
coverage.to_prev_chunk_nicely = function () {
|
||||
if (coverage.selection_ends_on_screen() === 0) {
|
||||
// The selection is entirely off the screen:
|
||||
// Set the lowest line on the screen as selection.
|
||||
|
||||
// This will select the bottom-left of the viewport
|
||||
// As this is most likely the span with the line number we take the parent
|
||||
const line = document.elementFromPoint(document.documentElement.clientHeight-1, 0).parentElement;
|
||||
if (line.parentElement !== document.getElementById("source")) {
|
||||
// The element is not a source line but the header or similar
|
||||
coverage.select_line_or_chunk(coverage.lines_len);
|
||||
}
|
||||
else {
|
||||
// We extract the line number from the id
|
||||
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
|
||||
}
|
||||
}
|
||||
coverage.to_prev_chunk();
|
||||
};
|
||||
|
||||
// Select line number lineno, or if it is in a colored chunk, select the
|
||||
// entire chunk
|
||||
coverage.select_line_or_chunk = function (lineno) {
|
||||
var c = coverage;
|
||||
var probe_line = c.line_elt(lineno);
|
||||
if (!probe_line) {
|
||||
return;
|
||||
}
|
||||
var the_indicator = c.chunk_indicator(probe_line);
|
||||
if (the_indicator) {
|
||||
// The line is in a highlighted chunk.
|
||||
// Search backward for the first line.
|
||||
var probe = lineno;
|
||||
var indicator = the_indicator;
|
||||
while (probe > 0 && indicator === the_indicator) {
|
||||
probe--;
|
||||
probe_line = c.line_elt(probe);
|
||||
if (!probe_line) {
|
||||
break;
|
||||
}
|
||||
indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
var begin = probe + 1;
|
||||
|
||||
// Search forward for the last line.
|
||||
probe = lineno;
|
||||
indicator = the_indicator;
|
||||
while (indicator === the_indicator) {
|
||||
probe++;
|
||||
probe_line = c.line_elt(probe);
|
||||
indicator = c.chunk_indicator(probe_line);
|
||||
}
|
||||
|
||||
coverage.set_sel(begin, probe);
|
||||
}
|
||||
else {
|
||||
coverage.set_sel(lineno);
|
||||
}
|
||||
};
|
||||
|
||||
coverage.show_selection = function () {
|
||||
// Highlight the lines in the chunk
|
||||
document.querySelectorAll("#source .highlight").forEach(e => e.classList.remove("highlight"));
|
||||
for (let probe = coverage.sel_begin; probe < coverage.sel_end; probe++) {
|
||||
coverage.line_elt(probe).querySelector(".n").classList.add("highlight");
|
||||
}
|
||||
|
||||
coverage.scroll_to_selection();
|
||||
};
|
||||
|
||||
coverage.scroll_to_selection = function () {
|
||||
// Scroll the page if the chunk isn't fully visible.
|
||||
if (coverage.selection_ends_on_screen() < 2) {
|
||||
const element = coverage.line_elt(coverage.sel_begin);
|
||||
coverage.scroll_window(element.offsetTop - 60);
|
||||
}
|
||||
};
|
||||
|
||||
coverage.scroll_window = function (to_pos) {
|
||||
window.scroll({top: to_pos, behavior: "smooth"});
|
||||
};
|
||||
|
||||
coverage.init_scroll_markers = function () {
|
||||
// Init some variables
|
||||
coverage.lines_len = document.querySelectorAll("#source > p").length;
|
||||
|
||||
// Build html
|
||||
coverage.build_scroll_markers();
|
||||
};
|
||||
|
||||
coverage.build_scroll_markers = function () {
|
||||
const temp_scroll_marker = document.getElementById("scroll_marker")
|
||||
if (temp_scroll_marker) temp_scroll_marker.remove();
|
||||
// Don't build markers if the window has no scroll bar.
|
||||
if (document.body.scrollHeight <= window.innerHeight) {
|
||||
return;
|
||||
}
|
||||
|
||||
const marker_scale = window.innerHeight / document.body.scrollHeight;
|
||||
const line_height = Math.min(Math.max(3, window.innerHeight / coverage.lines_len), 10);
|
||||
|
||||
let previous_line = -99, last_mark, last_top;
|
||||
|
||||
const scroll_marker = document.createElement("div");
|
||||
scroll_marker.id = "scroll_marker";
|
||||
document.getElementById("source").querySelectorAll(
|
||||
"p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par"
|
||||
).forEach(element => {
|
||||
const line_top = Math.floor(element.offsetTop * marker_scale);
|
||||
const line_number = parseInt(element.querySelector(".n a").id.substr(1));
|
||||
|
||||
if (line_number === previous_line + 1) {
|
||||
// If this solid missed block just make previous mark higher.
|
||||
last_mark.style.height = `${line_top + line_height - last_top}px`;
|
||||
}
|
||||
else {
|
||||
// Add colored line in scroll_marker block.
|
||||
last_mark = document.createElement("div");
|
||||
last_mark.id = `m${line_number}`;
|
||||
last_mark.classList.add("marker");
|
||||
last_mark.style.height = `${line_height}px`;
|
||||
last_mark.style.top = `${line_top}px`;
|
||||
scroll_marker.append(last_mark);
|
||||
last_top = line_top;
|
||||
}
|
||||
|
||||
previous_line = line_number;
|
||||
});
|
||||
|
||||
// Append last to prevent layout calculation
|
||||
document.body.append(scroll_marker);
|
||||
};
|
||||
|
||||
coverage.wire_up_sticky_header = function () {
|
||||
const header = document.querySelector("header");
|
||||
const header_bottom = (
|
||||
header.querySelector(".content h2").getBoundingClientRect().top -
|
||||
header.getBoundingClientRect().top
|
||||
);
|
||||
|
||||
function updateHeader() {
|
||||
if (window.scrollY > header_bottom) {
|
||||
header.classList.add("sticky");
|
||||
}
|
||||
else {
|
||||
header.classList.remove("sticky");
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener("scroll", updateHeader);
|
||||
updateHeader();
|
||||
};
|
||||
|
||||
coverage.expand_contexts = function (e) {
|
||||
var ctxs = e.target.parentNode.querySelector(".ctxs");
|
||||
|
||||
if (!ctxs.classList.contains("expanded")) {
|
||||
var ctxs_text = ctxs.textContent;
|
||||
var width = Number(ctxs_text[0]);
|
||||
ctxs.textContent = "";
|
||||
for (var i = 1; i < ctxs_text.length; i += width) {
|
||||
key = ctxs_text.substring(i, i + width).trim();
|
||||
ctxs.appendChild(document.createTextNode(contexts[key]));
|
||||
ctxs.appendChild(document.createElement("br"));
|
||||
}
|
||||
ctxs.classList.add("expanded");
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener("DOMContentLoaded", () => {
|
||||
if (document.body.classList.contains("indexfile")) {
|
||||
coverage.index_ready();
|
||||
}
|
||||
else {
|
||||
coverage.pyfile_ready();
|
||||
}
|
||||
});
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 1.7 KiB |
164
venv/lib/python3.12/site-packages/coverage/htmlfiles/index.html
Normal file
164
venv/lib/python3.12/site-packages/coverage/htmlfiles/index.html
Normal file
@@ -0,0 +1,164 @@
|
||||
{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
|
||||
{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<title>{{ title|escape }}</title>
|
||||
<link rel="icon" sizes="32x32" href="{{ statics.favicon_32_png }}">
|
||||
<link rel="stylesheet" href="{{ statics.style_css }}" type="text/css">
|
||||
{% if extra_css %}
|
||||
<link rel="stylesheet" href="{{ statics.extra_css }}" type="text/css">
|
||||
{% endif %}
|
||||
<script src="{{ statics.coverage_html_js }}" defer></script>
|
||||
</head>
|
||||
<body class="indexfile">
|
||||
|
||||
<header>
|
||||
<div class="content">
|
||||
<h1>{{ title|escape }}:
|
||||
<span class="pc_cov">{{totals.pc_covered_str}}%</span>
|
||||
</h1>
|
||||
|
||||
<aside id="help_panel_wrapper">
|
||||
<input id="help_panel_state" type="checkbox">
|
||||
<label for="help_panel_state">
|
||||
<img id="keyboard_icon" src="{{ statics.keybd_closed_png }}" alt="Show/hide keyboard shortcuts">
|
||||
</label>
|
||||
<div id="help_panel">
|
||||
<p class="legend">Shortcuts on this page</p>
|
||||
<div class="keyhelp">
|
||||
<p>
|
||||
<kbd>f</kbd>
|
||||
{% if region_noun %}
|
||||
<kbd>n</kbd>
|
||||
{% endif %}
|
||||
<kbd>s</kbd>
|
||||
<kbd>m</kbd>
|
||||
<kbd>x</kbd>
|
||||
{% if has_arcs %}
|
||||
<kbd>b</kbd>
|
||||
<kbd>p</kbd>
|
||||
{% endif %}
|
||||
<kbd>c</kbd>
|
||||
change column sorting
|
||||
</p>
|
||||
<p>
|
||||
<kbd>[</kbd>
|
||||
<kbd>]</kbd>
|
||||
prev/next file
|
||||
</p>
|
||||
<p>
|
||||
<kbd>?</kbd> show/hide this help
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<form id="filter_container">
|
||||
<input id="filter" type="text" value="" placeholder="filter...">
|
||||
<div>
|
||||
<input id="hide100" type="checkbox" {% if skip_covered %}checked disabled {% endif %}>
|
||||
<label for="hide100">hide covered</label>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
<h2>
|
||||
{% for ibtn in index_buttons %}
|
||||
<a class="button{% if ibtn.current %} current{% endif %}"{% if ibtn.url %} href="{{ ibtn.url }}"{% endif %}>{{ ibtn.label }}</a>{#-#}
|
||||
{% endfor %}
|
||||
</h2>
|
||||
|
||||
<p class="text">
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main id="index">
|
||||
<table class="index" data-sortable>
|
||||
<thead>
|
||||
{# The title="" attr doesn't work in Safari. #}
|
||||
<tr class="tablehead" title="Click to sort">
|
||||
<th id="file" class="name left" aria-sort="none" data-shortcut="f">File<span class="arrows"></span></th>
|
||||
{% if region_noun %}
|
||||
<th id="region" class="name left" aria-sort="none" data-default-sort-order="ascending" data-shortcut="n">{{ region_noun }}<span class="arrows"></span></th>
|
||||
{% endif %}
|
||||
<th id="statements" aria-sort="none" data-default-sort-order="descending" data-shortcut="s">statements<span class="arrows"></span></th>
|
||||
<th id="missing" aria-sort="none" data-default-sort-order="descending" data-shortcut="m">missing<span class="arrows"></span></th>
|
||||
<th id="excluded" aria-sort="none" data-default-sort-order="descending" data-shortcut="x">excluded<span class="arrows"></span></th>
|
||||
{% if has_arcs %}
|
||||
<th id="branches" aria-sort="none" data-default-sort-order="descending" data-shortcut="b">branches<span class="arrows"></span></th>
|
||||
<th id="partial" aria-sort="none" data-default-sort-order="descending" data-shortcut="p">partial<span class="arrows"></span></th>
|
||||
{% endif %}
|
||||
<th id="coverage" class="right" aria-sort="none" data-shortcut="c">coverage<span class="arrows"></span></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{% for region in regions %}
|
||||
<tr class="region">
|
||||
<td class="name left"><a href="{{region.url}}">{{region.file}}</a></td>
|
||||
{% if region_noun %}
|
||||
<td class="name left"><a href="{{region.url}}">{{region.description}}</a></td>
|
||||
{% endif %}
|
||||
<td>{{region.nums.n_statements}}</td>
|
||||
<td>{{region.nums.n_missing}}</td>
|
||||
<td>{{region.nums.n_excluded}}</td>
|
||||
{% if has_arcs %}
|
||||
<td>{{region.nums.n_branches}}</td>
|
||||
<td>{{region.nums.n_partial_branches}}</td>
|
||||
{% endif %}
|
||||
<td class="right" data-ratio="{{region.nums.ratio_covered|pair}}">{{region.nums.pc_covered_str}}%</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
<tfoot>
|
||||
<tr class="total">
|
||||
<td class="name left">Total</td>
|
||||
{% if region_noun %}
|
||||
<td class="name left"> </td>
|
||||
{% endif %}
|
||||
<td>{{totals.n_statements}}</td>
|
||||
<td>{{totals.n_missing}}</td>
|
||||
<td>{{totals.n_excluded}}</td>
|
||||
{% if has_arcs %}
|
||||
<td>{{totals.n_branches}}</td>
|
||||
<td>{{totals.n_partial_branches}}</td>
|
||||
{% endif %}
|
||||
<td class="right" data-ratio="{{totals.ratio_covered|pair}}">{{totals.pc_covered_str}}%</td>
|
||||
</tr>
|
||||
</tfoot>
|
||||
</table>
|
||||
|
||||
<p id="no_rows">
|
||||
No items found using the specified filter.
|
||||
</p>
|
||||
|
||||
{% if skipped_covered_msg %}
|
||||
<p>{{ skipped_covered_msg }}</p>
|
||||
{% endif %}
|
||||
{% if skipped_empty_msg %}
|
||||
<p>{{ skipped_empty_msg }}</p>
|
||||
{% endif %}
|
||||
</main>
|
||||
|
||||
<footer>
|
||||
<div class="content">
|
||||
<p>
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
<aside class="hidden">
|
||||
<a id="prevFileLink" class="nav" href="{{ final_html }}"></a>
|
||||
<a id="nextFileLink" class="nav" href="{{ first_html }}"></a>
|
||||
<button type="button" class="button_prev_file" data-shortcut="["></button>
|
||||
<button type="button" class="button_next_file" data-shortcut="]"></button>
|
||||
<button type="button" class="button_show_hide_help" data-shortcut="?"></button>
|
||||
</aside>
|
||||
</footer>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 8.8 KiB |
149
venv/lib/python3.12/site-packages/coverage/htmlfiles/pyfile.html
Normal file
149
venv/lib/python3.12/site-packages/coverage/htmlfiles/pyfile.html
Normal file
@@ -0,0 +1,149 @@
|
||||
{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
|
||||
{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<title>Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}%</title>
|
||||
<link rel="icon" sizes="32x32" href="{{ statics.favicon_32_png }}">
|
||||
<link rel="stylesheet" href="{{ statics.style_css }}" type="text/css">
|
||||
{% if extra_css %}
|
||||
<link rel="stylesheet" href="{{ statics.extra_css }}" type="text/css">
|
||||
{% endif %}
|
||||
|
||||
{% if contexts_json %}
|
||||
<script type="text/javascript">
|
||||
contexts = {{ contexts_json }}
|
||||
</script>
|
||||
{% endif %}
|
||||
|
||||
<script src="{{ statics.coverage_html_js }}" defer></script>
|
||||
</head>
|
||||
<body class="pyfile">
|
||||
|
||||
<header>
|
||||
<div class="content">
|
||||
<h1>
|
||||
<span class="text">Coverage for </span><b>{{relative_filename|escape}}</b>:
|
||||
<span class="pc_cov">{{nums.pc_covered_str}}%</span>
|
||||
</h1>
|
||||
|
||||
<aside id="help_panel_wrapper">
|
||||
<input id="help_panel_state" type="checkbox">
|
||||
<label for="help_panel_state">
|
||||
<img id="keyboard_icon" src="{{ statics.keybd_closed_png }}" alt="Show/hide keyboard shortcuts">
|
||||
</label>
|
||||
<div id="help_panel">
|
||||
<p class="legend">Shortcuts on this page</p>
|
||||
<div class="keyhelp">
|
||||
<p>
|
||||
<kbd>r</kbd>
|
||||
<kbd>m</kbd>
|
||||
<kbd>x</kbd>
|
||||
{% if has_arcs %}
|
||||
<kbd>p</kbd>
|
||||
{% endif %}
|
||||
toggle line displays
|
||||
</p>
|
||||
<p>
|
||||
<kbd>j</kbd>
|
||||
<kbd>k</kbd>
|
||||
next/prev highlighted chunk
|
||||
</p>
|
||||
<p>
|
||||
<kbd>0</kbd> (zero) top of page
|
||||
</p>
|
||||
<p>
|
||||
<kbd>1</kbd> (one) first highlighted chunk
|
||||
</p>
|
||||
<p>
|
||||
<kbd>[</kbd>
|
||||
<kbd>]</kbd>
|
||||
prev/next file
|
||||
</p>
|
||||
<p>
|
||||
<kbd>u</kbd> up to the index
|
||||
</p>
|
||||
<p>
|
||||
<kbd>?</kbd> show/hide this help
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<h2>
|
||||
<span class="text">{{nums.n_statements}} statements </span>
|
||||
<button type="button" class="{{category.run}} button_toggle_run" value="run" data-shortcut="r" title="Toggle lines run">{{nums.n_executed}}<span class="text"> run</span></button>
|
||||
<button type="button" class="{{category.mis}} button_toggle_mis" value="mis" data-shortcut="m" title="Toggle lines missing">{{nums.n_missing}}<span class="text"> missing</span></button>
|
||||
<button type="button" class="{{category.exc}} button_toggle_exc" value="exc" data-shortcut="x" title="Toggle lines excluded">{{nums.n_excluded}}<span class="text"> excluded</span></button>
|
||||
{% if has_arcs %}
|
||||
<button type="button" class="{{category.par}} button_toggle_par" value="par" data-shortcut="p" title="Toggle lines partially run">{{nums.n_partial_branches}}<span class="text"> partial</span></button>
|
||||
{% endif %}
|
||||
</h2>
|
||||
|
||||
<p class="text">
|
||||
<a id="prevFileLink" class="nav" href="{{ prev_html }}">« prev</a>
|
||||
<a id="indexLink" class="nav" href="index.html">^ index</a>
|
||||
<a id="nextFileLink" class="nav" href="{{ next_html }}">» next</a>
|
||||
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
|
||||
<aside class="hidden">
|
||||
<button type="button" class="button_next_chunk" data-shortcut="j"></button>
|
||||
<button type="button" class="button_prev_chunk" data-shortcut="k"></button>
|
||||
<button type="button" class="button_top_of_page" data-shortcut="0"></button>
|
||||
<button type="button" class="button_first_chunk" data-shortcut="1"></button>
|
||||
<button type="button" class="button_prev_file" data-shortcut="["></button>
|
||||
<button type="button" class="button_next_file" data-shortcut="]"></button>
|
||||
<button type="button" class="button_to_index" data-shortcut="u"></button>
|
||||
<button type="button" class="button_show_hide_help" data-shortcut="?"></button>
|
||||
</aside>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<main id="source">
|
||||
{% for line in lines -%}
|
||||
{% joined %}
|
||||
<p class="{{line.css_class}}">
|
||||
<span class="n"><a id="t{{line.number}}" href="#t{{line.number}}">{{line.number}}</a></span>
|
||||
<span class="t">{{line.html}} </span>
|
||||
{% if line.context_list %}
|
||||
<input type="checkbox" id="ctxs{{line.number}}">
|
||||
{% endif %}
|
||||
{# Things that should float right in the line. #}
|
||||
<span class="r">
|
||||
{% if line.annotate %}
|
||||
<span class="annotate short">{{line.annotate}}</span>
|
||||
<span class="annotate long">{{line.annotate_long}}</span>
|
||||
{% endif %}
|
||||
{% if line.contexts %}
|
||||
<label for="ctxs{{line.number}}" class="ctx">{{ line.contexts_label }}</label>
|
||||
{% endif %}
|
||||
</span>
|
||||
{# Things that should appear below the line. #}
|
||||
{% if line.context_str %}
|
||||
<span class="ctxs">{{ line.context_str }}</span>
|
||||
{% endif %}
|
||||
</p>
|
||||
{% endjoined %}
|
||||
{% endfor %}
|
||||
</main>
|
||||
|
||||
<footer>
|
||||
<div class="content">
|
||||
<p>
|
||||
<a class="nav" href="{{ prev_html }}">« prev</a>
|
||||
<a class="nav" href="index.html">^ index</a>
|
||||
<a class="nav" href="{{ next_html }}">» next</a>
|
||||
|
||||
<a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
|
||||
created at {{ time_stamp }}
|
||||
</p>
|
||||
</div>
|
||||
</footer>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
377
venv/lib/python3.12/site-packages/coverage/htmlfiles/style.css
Normal file
377
venv/lib/python3.12/site-packages/coverage/htmlfiles/style.css
Normal file
@@ -0,0 +1,377 @@
|
||||
@charset "UTF-8";
|
||||
/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
|
||||
/* Don't edit this .css file. Edit the .scss file instead! */
|
||||
html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; }
|
||||
|
||||
body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { body { color: #eee; } }
|
||||
|
||||
html > body { font-size: 16px; }
|
||||
|
||||
a:active, a:focus { outline: 2px dashed #007acc; }
|
||||
|
||||
p { font-size: .875em; line-height: 1.4em; }
|
||||
|
||||
table { border-collapse: collapse; }
|
||||
|
||||
td { vertical-align: top; }
|
||||
|
||||
table tr.hidden { display: none !important; }
|
||||
|
||||
p#no_rows { display: none; font-size: 1.15em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; }
|
||||
|
||||
a.nav { text-decoration: none; color: inherit; }
|
||||
|
||||
a.nav:hover { text-decoration: underline; color: inherit; }
|
||||
|
||||
.hidden { display: none; }
|
||||
|
||||
header { background: #f8f8f8; width: 100%; z-index: 2; border-bottom: 1px solid #ccc; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header { background: black; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header { border-color: #333; } }
|
||||
|
||||
header .content { padding: 1rem 3.5rem; }
|
||||
|
||||
header h2 { margin-top: .5em; font-size: 1em; }
|
||||
|
||||
header h2 a.button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header h2 a.button { background: #333; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header h2 a.button { border-color: #444; } }
|
||||
|
||||
header h2 a.button.current { border: 2px solid; background: #fff; border-color: #999; cursor: default; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header h2 a.button.current { background: #1e1e1e; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header h2 a.button.current { border-color: #777; } }
|
||||
|
||||
header p.text { margin: .5em 0 -.5em; color: #666; font-style: italic; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header p.text { color: #aaa; } }
|
||||
|
||||
header.sticky { position: fixed; left: 0; right: 0; height: 2.5em; }
|
||||
|
||||
header.sticky .text { display: none; }
|
||||
|
||||
header.sticky h1, header.sticky h2 { font-size: 1em; margin-top: 0; display: inline-block; }
|
||||
|
||||
header.sticky .content { padding: 0.5rem 3.5rem; }
|
||||
|
||||
header.sticky .content p { font-size: 1em; }
|
||||
|
||||
header.sticky ~ #source { padding-top: 6.5em; }
|
||||
|
||||
main { position: relative; z-index: 1; }
|
||||
|
||||
footer { margin: 1rem 3.5rem; }
|
||||
|
||||
footer .content { padding: 0; color: #666; font-style: italic; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { footer .content { color: #aaa; } }
|
||||
|
||||
#index { margin: 1rem 0 0 3.5rem; }
|
||||
|
||||
h1 { font-size: 1.25em; display: inline-block; }
|
||||
|
||||
#filter_container { float: right; margin: 0 2em 0 0; line-height: 1.66em; }
|
||||
|
||||
#filter_container #filter { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container #filter { border-color: #444; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container #filter { background: #1e1e1e; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container #filter { color: #eee; } }
|
||||
|
||||
#filter_container #filter:focus { border-color: #007acc; }
|
||||
|
||||
#filter_container :disabled ~ label { color: #ccc; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container :disabled ~ label { color: #444; } }
|
||||
|
||||
#filter_container label { font-size: .875em; color: #666; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #filter_container label { color: #aaa; } }
|
||||
|
||||
header button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; background: #eee; color: inherit; text-decoration: none; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button { background: #333; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button { border-color: #444; } }
|
||||
|
||||
header button:active, header button:focus { outline: 2px dashed #007acc; }
|
||||
|
||||
header button.run { background: #eeffee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.run { background: #373d29; } }
|
||||
|
||||
header button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.run.show_run { background: #373d29; } }
|
||||
|
||||
header button.mis { background: #ffeeee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.mis { background: #4b1818; } }
|
||||
|
||||
header button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.mis.show_mis { background: #4b1818; } }
|
||||
|
||||
header button.exc { background: #f7f7f7; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.exc { background: #333; } }
|
||||
|
||||
header button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.exc.show_exc { background: #333; } }
|
||||
|
||||
header button.par { background: #ffffd5; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.par { background: #650; } }
|
||||
|
||||
header button.par.show_par { background: #ffa; border: 2px solid #bbbb00; margin: 0 .1em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { header button.par.show_par { background: #650; } }
|
||||
|
||||
#help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; }
|
||||
|
||||
#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; }
|
||||
|
||||
#help_panel_wrapper { float: right; position: relative; }
|
||||
|
||||
#keyboard_icon { margin: 5px; }
|
||||
|
||||
#help_panel_state { display: none; }
|
||||
|
||||
#help_panel { top: 25px; right: 0; padding: .75em; border: 1px solid #883; color: #333; }
|
||||
|
||||
#help_panel .keyhelp p { margin-top: .75em; }
|
||||
|
||||
#help_panel .legend { font-style: italic; margin-bottom: 1em; }
|
||||
|
||||
.indexfile #help_panel { width: 25em; }
|
||||
|
||||
.pyfile #help_panel { width: 18em; }
|
||||
|
||||
#help_panel_state:checked ~ #help_panel { display: block; }
|
||||
|
||||
kbd { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; border-radius: 3px; }
|
||||
|
||||
#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; }
|
||||
|
||||
#source p { position: relative; white-space: pre; }
|
||||
|
||||
#source p * { box-sizing: border-box; }
|
||||
|
||||
#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; user-select: none; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .n { color: #777; } }
|
||||
|
||||
#source p .n.highlight { background: #ffdd00; }
|
||||
|
||||
#source p .n a { scroll-margin-top: 6em; text-decoration: none; color: #999; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } }
|
||||
|
||||
#source p .n a:hover { text-decoration: underline; color: #999; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } }
|
||||
|
||||
#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } }
|
||||
|
||||
#source p .t:hover { background: #f2f2f2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } }
|
||||
|
||||
#source p .t:hover ~ .r .annotate.long { display: block; }
|
||||
|
||||
#source p .t .com { color: #008000; font-style: italic; line-height: 1px; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t .com { color: #6a9955; } }
|
||||
|
||||
#source p .t .key { font-weight: bold; line-height: 1px; }
|
||||
|
||||
#source p .t .str, #source p .t .fst { color: #0451a5; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .t .str, #source p .t .fst { color: #9cdcfe; } }
|
||||
|
||||
#source p.mis .t { border-left: 0.2em solid #ff0000; }
|
||||
|
||||
#source p.mis.show_mis .t { background: #fdd; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } }
|
||||
|
||||
#source p.mis.show_mis .t:hover { background: #f2d2d2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } }
|
||||
|
||||
#source p.mis.mis2 .t { border-left: 0.2em dotted #ff0000; }
|
||||
|
||||
#source p.mis.mis2.show_mis .t { background: #ffeeee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.mis.mis2.show_mis .t { background: #351b1b; } }
|
||||
|
||||
#source p.mis.mis2.show_mis .t:hover { background: #f2d2d2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.mis.mis2.show_mis .t:hover { background: #532323; } }
|
||||
|
||||
#source p.run .t { border-left: 0.2em solid #00dd00; }
|
||||
|
||||
#source p.run.show_run .t { background: #dfd; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } }
|
||||
|
||||
#source p.run.show_run .t:hover { background: #d2f2d2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } }
|
||||
|
||||
#source p.run.run2 .t { border-left: 0.2em dotted #00dd00; }
|
||||
|
||||
#source p.run.run2.show_run .t { background: #eeffee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.run.run2.show_run .t { background: #2b2e24; } }
|
||||
|
||||
#source p.run.run2.show_run .t:hover { background: #d2f2d2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.run.run2.show_run .t:hover { background: #404633; } }
|
||||
|
||||
#source p.exc .t { border-left: 0.2em solid #808080; }
|
||||
|
||||
#source p.exc.show_exc .t { background: #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } }
|
||||
|
||||
#source p.exc.show_exc .t:hover { background: #e2e2e2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } }
|
||||
|
||||
#source p.exc.exc2 .t { border-left: 0.2em dotted #808080; }
|
||||
|
||||
#source p.exc.exc2.show_exc .t { background: #f7f7f7; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.exc.exc2.show_exc .t { background: #292929; } }
|
||||
|
||||
#source p.exc.exc2.show_exc .t:hover { background: #e2e2e2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.exc.exc2.show_exc .t:hover { background: #3c3c3c; } }
|
||||
|
||||
#source p.par .t { border-left: 0.2em solid #bbbb00; }
|
||||
|
||||
#source p.par.show_par .t { background: #ffa; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } }
|
||||
|
||||
#source p.par.show_par .t:hover { background: #f2f2a2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } }
|
||||
|
||||
#source p.par.par2 .t { border-left: 0.2em dotted #bbbb00; }
|
||||
|
||||
#source p.par.par2.show_par .t { background: #ffffd5; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.par.par2.show_par .t { background: #423a0f; } }
|
||||
|
||||
#source p.par.par2.show_par .t:hover { background: #f2f2a2; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p.par.par2.show_par .t:hover { background: #6d5d0c; } }
|
||||
|
||||
#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; }
|
||||
|
||||
#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } }
|
||||
|
||||
#source p .annotate.short:hover ~ .long { display: block; }
|
||||
|
||||
#source p .annotate.long { width: 30em; right: 2.5em; }
|
||||
|
||||
#source p input { display: none; }
|
||||
|
||||
#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; }
|
||||
|
||||
#source p input ~ .r label.ctx::before { content: "▶ "; }
|
||||
|
||||
#source p input ~ .r label.ctx:hover { background: #e8f4ff; color: #666; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } }
|
||||
|
||||
#source p input:checked ~ .r label.ctx { background: #d0e8ff; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } }
|
||||
|
||||
#source p input:checked ~ .r label.ctx::before { content: "▼ "; }
|
||||
|
||||
#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; }
|
||||
|
||||
#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } }
|
||||
|
||||
#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #d0e8ff; border-radius: .25em; margin-right: 1.75em; text-align: right; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } }
|
||||
|
||||
#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; }
|
||||
|
||||
#index table.index { margin-left: -.5em; }
|
||||
|
||||
#index td, #index th { text-align: right; padding: .25em .5em; border-bottom: 1px solid #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } }
|
||||
|
||||
#index td.name, #index th.name { text-align: left; width: auto; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; min-width: 15em; }
|
||||
|
||||
#index th { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-style: italic; color: #333; cursor: pointer; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index th { color: #ddd; } }
|
||||
|
||||
#index th:hover { background: #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } }
|
||||
|
||||
#index th .arrows { color: #666; font-size: 85%; font-family: sans-serif; font-style: normal; pointer-events: none; }
|
||||
|
||||
#index th[aria-sort="ascending"], #index th[aria-sort="descending"] { white-space: nowrap; background: #eee; padding-left: .5em; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index th[aria-sort="ascending"], #index th[aria-sort="descending"] { background: #333; } }
|
||||
|
||||
#index th[aria-sort="ascending"] .arrows::after { content: " ▲"; }
|
||||
|
||||
#index th[aria-sort="descending"] .arrows::after { content: " ▼"; }
|
||||
|
||||
#index td.name { font-size: 1.15em; }
|
||||
|
||||
#index td.name a { text-decoration: none; color: inherit; }
|
||||
|
||||
#index td.name .no-noun { font-style: italic; }
|
||||
|
||||
#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; }
|
||||
|
||||
#index tr.region:hover { background: #eee; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #index tr.region:hover { background: #333; } }
|
||||
|
||||
#index tr.region:hover td.name { text-decoration: underline; color: inherit; }
|
||||
|
||||
#scroll_marker { position: fixed; z-index: 3; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } }
|
||||
|
||||
#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; }
|
||||
|
||||
@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } }
|
||||
824
venv/lib/python3.12/site-packages/coverage/htmlfiles/style.scss
Normal file
824
venv/lib/python3.12/site-packages/coverage/htmlfiles/style.scss
Normal file
@@ -0,0 +1,824 @@
|
||||
/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
|
||||
/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
|
||||
|
||||
// CSS styles for coverage.py HTML reports.
|
||||
|
||||
// When you edit this file, you need to run "make css" to get the CSS file
|
||||
// generated, and then check in both the .scss and the .css files.
|
||||
|
||||
// When working on the file, this command is useful:
|
||||
// sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css
|
||||
//
|
||||
// OR you can process sass purely in python with `pip install pysass`, then:
|
||||
// pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css
|
||||
|
||||
// Ignore this comment, it's for the CSS output file:
|
||||
/* Don't edit this .css file. Edit the .scss file instead! */
|
||||
|
||||
// Dimensions
|
||||
$left-gutter: 3.5rem;
|
||||
|
||||
//
|
||||
// Declare colors and variables
|
||||
//
|
||||
|
||||
$font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
|
||||
$font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace;
|
||||
|
||||
$off-button-lighten: 50%;
|
||||
$hover-dark-amt: 95%;
|
||||
$multi-dim-amt: 50%;
|
||||
|
||||
$focus-color: #007acc;
|
||||
|
||||
$mis-color: #ff0000;
|
||||
$run-color: #00dd00;
|
||||
$exc-color: #808080;
|
||||
$par-color: #bbbb00;
|
||||
|
||||
$light-bg: #fff;
|
||||
$light-fg: #000;
|
||||
$light-gray1: #f8f8f8;
|
||||
$light-gray2: #eee;
|
||||
$light-gray3: #ccc;
|
||||
$light-gray4: #999;
|
||||
$light-gray5: #666;
|
||||
$light-gray6: #333;
|
||||
$light-pln-bg: $light-bg;
|
||||
$light-mis-bg: #fdd;
|
||||
$light-run-bg: #dfd;
|
||||
$light-exc-bg: $light-gray2;
|
||||
$light-par-bg: #ffa;
|
||||
$light-token-com: #008000;
|
||||
$light-token-str: #0451a5;
|
||||
$light-context-bg-color: #d0e8ff;
|
||||
|
||||
$dark-bg: #1e1e1e;
|
||||
$dark-fg: #eee;
|
||||
$dark-gray1: #222;
|
||||
$dark-gray2: #333;
|
||||
$dark-gray3: #444;
|
||||
$dark-gray4: #777;
|
||||
$dark-gray5: #aaa;
|
||||
$dark-gray6: #ddd;
|
||||
$dark-pln-bg: $dark-bg;
|
||||
$dark-mis-bg: #4b1818;
|
||||
$dark-run-bg: #373d29;
|
||||
$dark-exc-bg: $dark-gray2;
|
||||
$dark-par-bg: #650;
|
||||
$dark-token-com: #6a9955;
|
||||
$dark-token-str: #9cdcfe;
|
||||
$dark-context-bg-color: #056;
|
||||
|
||||
//
|
||||
// Mixins and utilities
|
||||
//
|
||||
|
||||
@mixin background-dark($color) {
|
||||
@media (prefers-color-scheme: dark) {
|
||||
background: $color;
|
||||
}
|
||||
}
|
||||
@mixin color-dark($color) {
|
||||
@media (prefers-color-scheme: dark) {
|
||||
color: $color;
|
||||
}
|
||||
}
|
||||
@mixin border-color-dark($color) {
|
||||
@media (prefers-color-scheme: dark) {
|
||||
border-color: $color;
|
||||
}
|
||||
}
|
||||
|
||||
// Add visual outline to navigable elements on focus improve accessibility.
|
||||
@mixin focus-border {
|
||||
&:active, &:focus {
|
||||
outline: 2px dashed $focus-color;
|
||||
}
|
||||
}
|
||||
|
||||
@mixin button-shape {
|
||||
font-family: inherit;
|
||||
font-size: inherit;
|
||||
border: 1px solid;
|
||||
border-radius: .2em;
|
||||
background: $light-gray2;
|
||||
@include background-dark($dark-gray2);
|
||||
color: inherit;
|
||||
text-decoration: none;
|
||||
padding: .1em .5em;
|
||||
margin: 1px calc(.1em + 1px);
|
||||
cursor: pointer;
|
||||
border-color: $light-gray3;
|
||||
@include border-color-dark($dark-gray3);
|
||||
}
|
||||
|
||||
// Page-wide styles
|
||||
html, body, h1, h2, h3, p, table, td, th {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
border: 0;
|
||||
font-weight: inherit;
|
||||
font-style: inherit;
|
||||
font-size: 100%;
|
||||
font-family: inherit;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
// Set baseline grid to 16 pt.
|
||||
body {
|
||||
font-family: $font-normal;
|
||||
font-size: 1em;
|
||||
background: $light-bg;
|
||||
color: $light-fg;
|
||||
@include background-dark($dark-bg);
|
||||
@include color-dark($dark-fg);
|
||||
}
|
||||
|
||||
html>body {
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
a {
|
||||
@include focus-border;
|
||||
}
|
||||
|
||||
p {
|
||||
font-size: .875em;
|
||||
line-height: 1.4em;
|
||||
}
|
||||
|
||||
table {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
td {
|
||||
vertical-align: top;
|
||||
}
|
||||
table tr.hidden {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
p#no_rows {
|
||||
display: none;
|
||||
font-size: 1.15em;
|
||||
font-family: $font-normal;
|
||||
}
|
||||
|
||||
a.nav {
|
||||
text-decoration: none;
|
||||
color: inherit;
|
||||
|
||||
&:hover {
|
||||
text-decoration: underline;
|
||||
color: inherit;
|
||||
}
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
// Page structure
|
||||
header {
|
||||
background: $light-gray1;
|
||||
@include background-dark(black);
|
||||
width: 100%;
|
||||
z-index: 2;
|
||||
border-bottom: 1px solid $light-gray3;
|
||||
@include border-color-dark($dark-gray2);
|
||||
|
||||
.content {
|
||||
padding: 1rem $left-gutter;
|
||||
}
|
||||
|
||||
h2 {
|
||||
margin-top: .5em;
|
||||
font-size: 1em;
|
||||
|
||||
a.button {
|
||||
@include button-shape;
|
||||
&.current {
|
||||
border: 2px solid;
|
||||
background: $light-bg;
|
||||
@include background-dark($dark-bg);
|
||||
border-color: $light-gray4;
|
||||
@include border-color-dark($dark-gray4);
|
||||
cursor: default;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p.text {
|
||||
margin: .5em 0 -.5em;
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
&.sticky {
|
||||
position: fixed;
|
||||
left: 0;
|
||||
right: 0;
|
||||
height: 2.5em;
|
||||
|
||||
.text {
|
||||
display: none;
|
||||
}
|
||||
|
||||
h1, h2 {
|
||||
font-size: 1em;
|
||||
margin-top: 0;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.content {
|
||||
padding: .5rem $left-gutter;
|
||||
p {
|
||||
font-size: 1em;
|
||||
}
|
||||
}
|
||||
|
||||
& ~ #source {
|
||||
padding-top: 6.5em;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main {
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
footer {
|
||||
margin: 1rem $left-gutter;
|
||||
|
||||
.content {
|
||||
padding: 0;
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
font-style: italic;
|
||||
}
|
||||
}
|
||||
|
||||
#index {
|
||||
margin: 1rem 0 0 $left-gutter;
|
||||
}
|
||||
|
||||
// Header styles
|
||||
|
||||
h1 {
|
||||
font-size: 1.25em;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
#filter_container {
|
||||
float: right;
|
||||
margin: 0 2em 0 0;
|
||||
line-height: 1.66em;
|
||||
|
||||
#filter {
|
||||
width: 10em;
|
||||
padding: 0.2em 0.5em;
|
||||
border: 2px solid $light-gray3;
|
||||
background: $light-bg;
|
||||
color: $light-fg;
|
||||
@include border-color-dark($dark-gray3);
|
||||
@include background-dark($dark-bg);
|
||||
@include color-dark($dark-fg);
|
||||
&:focus {
|
||||
border-color: $focus-color;
|
||||
}
|
||||
}
|
||||
|
||||
:disabled ~ label{
|
||||
color: $light-gray3;
|
||||
@include color-dark($dark-gray3);
|
||||
}
|
||||
|
||||
label {
|
||||
font-size: .875em;
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
}
|
||||
}
|
||||
|
||||
header button {
|
||||
@include button-shape;
|
||||
@include focus-border;
|
||||
|
||||
&.run {
|
||||
background: mix($light-run-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-run-bg);
|
||||
&.show_run {
|
||||
background: $light-run-bg;
|
||||
@include background-dark($dark-run-bg);
|
||||
border: 2px solid $run-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
&.mis {
|
||||
background: mix($light-mis-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-mis-bg);
|
||||
&.show_mis {
|
||||
background: $light-mis-bg;
|
||||
@include background-dark($dark-mis-bg);
|
||||
border: 2px solid $mis-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
&.exc {
|
||||
background: mix($light-exc-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-exc-bg);
|
||||
&.show_exc {
|
||||
background: $light-exc-bg;
|
||||
@include background-dark($dark-exc-bg);
|
||||
border: 2px solid $exc-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
&.par {
|
||||
background: mix($light-par-bg, $light-bg, $off-button-lighten);
|
||||
@include background-dark($dark-par-bg);
|
||||
&.show_par {
|
||||
background: $light-par-bg;
|
||||
@include background-dark($dark-par-bg);
|
||||
border: 2px solid $par-color;
|
||||
margin: 0 .1em;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Yellow post-it things.
|
||||
%popup {
|
||||
display: none;
|
||||
position: absolute;
|
||||
z-index: 999;
|
||||
background: #ffffcc;
|
||||
border: 1px solid #888;
|
||||
border-radius: .2em;
|
||||
color: #333;
|
||||
padding: .25em .5em;
|
||||
}
|
||||
|
||||
// Yellow post-it's in the text listings.
|
||||
%in-text-popup {
|
||||
@extend %popup;
|
||||
white-space: normal;
|
||||
float: right;
|
||||
top: 1.75em;
|
||||
right: 1em;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
// Help panel
|
||||
#help_panel_wrapper {
|
||||
float: right;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
#keyboard_icon {
|
||||
margin: 5px;
|
||||
}
|
||||
|
||||
#help_panel_state {
|
||||
display: none;
|
||||
}
|
||||
|
||||
#help_panel {
|
||||
@extend %popup;
|
||||
top: 25px;
|
||||
right: 0;
|
||||
padding: .75em;
|
||||
border: 1px solid #883;
|
||||
|
||||
color: #333;
|
||||
|
||||
.keyhelp p {
|
||||
margin-top: .75em;
|
||||
}
|
||||
|
||||
.legend {
|
||||
font-style: italic;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
.indexfile & {
|
||||
width: 25em;
|
||||
}
|
||||
|
||||
.pyfile & {
|
||||
width: 18em;
|
||||
}
|
||||
|
||||
#help_panel_state:checked ~ & {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
kbd {
|
||||
border: 1px solid black;
|
||||
border-color: #888 #333 #333 #888;
|
||||
padding: .1em .35em;
|
||||
font-family: $font-code;
|
||||
font-weight: bold;
|
||||
background: #eee;
|
||||
border-radius: 3px;
|
||||
}
|
||||
|
||||
// Source file styles
|
||||
|
||||
// The slim bar at the left edge of the source lines, colored by coverage.
|
||||
$border-indicator-width: .2em;
|
||||
|
||||
#source {
|
||||
padding: 1em 0 1em $left-gutter;
|
||||
font-family: $font-code;
|
||||
|
||||
p {
|
||||
// position relative makes position:absolute pop-ups appear in the right place.
|
||||
position: relative;
|
||||
white-space: pre;
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.n {
|
||||
float: left;
|
||||
text-align: right;
|
||||
width: $left-gutter;
|
||||
box-sizing: border-box;
|
||||
margin-left: -$left-gutter;
|
||||
padding-right: 1em;
|
||||
color: $light-gray4;
|
||||
user-select: none;
|
||||
@include color-dark($dark-gray4);
|
||||
|
||||
&.highlight {
|
||||
background: #ffdd00;
|
||||
}
|
||||
|
||||
a {
|
||||
// Make anchors to the line scroll the line to be
|
||||
// visible beneath the fixed-position header.
|
||||
scroll-margin-top: 6em;
|
||||
text-decoration: none;
|
||||
color: $light-gray4;
|
||||
@include color-dark($dark-gray4);
|
||||
&:hover {
|
||||
text-decoration: underline;
|
||||
color: $light-gray4;
|
||||
@include color-dark($dark-gray4);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.t {
|
||||
display: inline-block;
|
||||
width: 100%;
|
||||
box-sizing: border-box;
|
||||
margin-left: -.5em;
|
||||
padding-left: .5em - $border-indicator-width;
|
||||
border-left: $border-indicator-width solid $light-bg;
|
||||
@include border-color-dark($dark-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-pln-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt));
|
||||
|
||||
& ~ .r .annotate.long {
|
||||
display: block;
|
||||
}
|
||||
}
|
||||
|
||||
// Syntax coloring
|
||||
.com {
|
||||
color: $light-token-com;
|
||||
@include color-dark($dark-token-com);
|
||||
font-style: italic;
|
||||
line-height: 1px;
|
||||
}
|
||||
.key {
|
||||
font-weight: bold;
|
||||
line-height: 1px;
|
||||
}
|
||||
.str, .fst {
|
||||
color: $light-token-str;
|
||||
@include color-dark($dark-token-str);
|
||||
}
|
||||
}
|
||||
|
||||
&.mis {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $mis-color;
|
||||
}
|
||||
|
||||
&.show_mis .t {
|
||||
background: $light-mis-bg;
|
||||
@include background-dark($dark-mis-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-mis-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
|
||||
&.mis2 {
|
||||
.t {
|
||||
border-left: $border-indicator-width dotted $mis-color;
|
||||
}
|
||||
|
||||
&.show_mis .t {
|
||||
background: mix($light-mis-bg, $light-bg, $multi-dim-amt);
|
||||
@include background-dark(mix($dark-mis-bg, $dark-bg, $multi-dim-amt));
|
||||
|
||||
&:hover {
|
||||
background: mix($light-mis-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.run {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $run-color;
|
||||
}
|
||||
|
||||
&.show_run .t {
|
||||
background: $light-run-bg;
|
||||
@include background-dark($dark-run-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-run-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
|
||||
&.run2 {
|
||||
.t {
|
||||
border-left: $border-indicator-width dotted $run-color;
|
||||
}
|
||||
|
||||
&.show_run .t {
|
||||
background: mix($light-run-bg, $light-bg, $multi-dim-amt);
|
||||
@include background-dark(mix($dark-run-bg, $dark-bg, $multi-dim-amt));
|
||||
|
||||
&:hover {
|
||||
background: mix($light-run-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.exc {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $exc-color;
|
||||
}
|
||||
|
||||
&.show_exc .t {
|
||||
background: $light-exc-bg;
|
||||
@include background-dark($dark-exc-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-exc-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
|
||||
&.exc2 {
|
||||
.t {
|
||||
border-left: $border-indicator-width dotted $exc-color;
|
||||
}
|
||||
|
||||
&.show_exc .t {
|
||||
background: mix($light-exc-bg, $light-bg, $multi-dim-amt);
|
||||
@include background-dark(mix($dark-exc-bg, $dark-bg, $multi-dim-amt));
|
||||
|
||||
&:hover {
|
||||
background: mix($light-exc-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
&.par {
|
||||
.t {
|
||||
border-left: $border-indicator-width solid $par-color;
|
||||
}
|
||||
|
||||
&.show_par .t {
|
||||
background: $light-par-bg;
|
||||
@include background-dark($dark-par-bg);
|
||||
|
||||
&:hover {
|
||||
background: mix($light-par-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
|
||||
&.par2 {
|
||||
.t {
|
||||
border-left: $border-indicator-width dotted $par-color;
|
||||
}
|
||||
|
||||
&.show_par .t {
|
||||
background: mix($light-par-bg, $light-bg, $multi-dim-amt);
|
||||
@include background-dark(mix($dark-par-bg, $dark-bg, $multi-dim-amt));
|
||||
|
||||
&:hover {
|
||||
background: mix($light-par-bg, $light-fg, $hover-dark-amt);
|
||||
@include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.r {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 2.5em;
|
||||
font-family: $font-normal;
|
||||
}
|
||||
|
||||
.annotate {
|
||||
font-family: $font-normal;
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray6);
|
||||
padding-right: .5em;
|
||||
|
||||
&.short:hover ~ .long {
|
||||
display: block;
|
||||
}
|
||||
|
||||
&.long {
|
||||
@extend %in-text-popup;
|
||||
width: 30em;
|
||||
right: 2.5em;
|
||||
}
|
||||
}
|
||||
|
||||
input {
|
||||
display: none;
|
||||
|
||||
& ~ .r label.ctx {
|
||||
cursor: pointer;
|
||||
border-radius: .25em;
|
||||
&::before {
|
||||
content: "▶ ";
|
||||
}
|
||||
&:hover {
|
||||
background: mix($light-context-bg-color, $light-bg, $off-button-lighten);
|
||||
@include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten));
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
}
|
||||
}
|
||||
|
||||
&:checked ~ .r label.ctx {
|
||||
background: $light-context-bg-color;
|
||||
@include background-dark($dark-context-bg-color);
|
||||
color: $light-gray5;
|
||||
@include color-dark($dark-gray5);
|
||||
border-radius: .75em .75em 0 0;
|
||||
padding: 0 .5em;
|
||||
margin: -.25em 0;
|
||||
&::before {
|
||||
content: "▼ ";
|
||||
}
|
||||
}
|
||||
|
||||
&:checked ~ .ctxs {
|
||||
padding: .25em .5em;
|
||||
overflow-y: scroll;
|
||||
max-height: 10.5em;
|
||||
}
|
||||
}
|
||||
|
||||
label.ctx {
|
||||
color: $light-gray4;
|
||||
@include color-dark($dark-gray4);
|
||||
display: inline-block;
|
||||
padding: 0 .5em;
|
||||
font-size: .8333em; // 10/12
|
||||
}
|
||||
|
||||
.ctxs {
|
||||
display: block;
|
||||
max-height: 0;
|
||||
overflow-y: hidden;
|
||||
transition: all .2s;
|
||||
padding: 0 .5em;
|
||||
font-family: $font-normal;
|
||||
white-space: nowrap;
|
||||
background: $light-context-bg-color;
|
||||
@include background-dark($dark-context-bg-color);
|
||||
border-radius: .25em;
|
||||
margin-right: 1.75em;
|
||||
text-align: right;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// index styles
|
||||
#index {
|
||||
font-family: $font-code;
|
||||
font-size: 0.875em;
|
||||
|
||||
table.index {
|
||||
margin-left: -.5em;
|
||||
}
|
||||
td, th {
|
||||
text-align: right;
|
||||
padding: .25em .5em;
|
||||
border-bottom: 1px solid $light-gray2;
|
||||
@include border-color-dark($dark-gray2);
|
||||
&.name {
|
||||
text-align: left;
|
||||
width: auto;
|
||||
font-family: $font-normal;
|
||||
min-width: 15em;
|
||||
}
|
||||
}
|
||||
th {
|
||||
font-family: $font-normal;
|
||||
font-style: italic;
|
||||
color: $light-gray6;
|
||||
@include color-dark($dark-gray6);
|
||||
cursor: pointer;
|
||||
&:hover {
|
||||
background: $light-gray2;
|
||||
@include background-dark($dark-gray2);
|
||||
}
|
||||
.arrows {
|
||||
color: #666;
|
||||
font-size: 85%;
|
||||
font-family: sans-serif;
|
||||
font-style: normal;
|
||||
pointer-events: none;
|
||||
}
|
||||
&[aria-sort="ascending"], &[aria-sort="descending"] {
|
||||
white-space: nowrap;
|
||||
background: $light-gray2;
|
||||
@include background-dark($dark-gray2);
|
||||
padding-left: .5em;
|
||||
}
|
||||
&[aria-sort="ascending"] .arrows::after {
|
||||
content: " ▲";
|
||||
}
|
||||
&[aria-sort="descending"] .arrows::after {
|
||||
content: " ▼";
|
||||
}
|
||||
}
|
||||
td.name {
|
||||
font-size: 1.15em;
|
||||
a {
|
||||
text-decoration: none;
|
||||
color: inherit;
|
||||
}
|
||||
& .no-noun {
|
||||
font-style: italic;
|
||||
}
|
||||
}
|
||||
|
||||
tr.total td,
|
||||
tr.total_dynamic td {
|
||||
font-weight: bold;
|
||||
border-top: 1px solid #ccc;
|
||||
border-bottom: none;
|
||||
}
|
||||
tr.region:hover {
|
||||
background: $light-gray2;
|
||||
@include background-dark($dark-gray2);
|
||||
td.name {
|
||||
text-decoration: underline;
|
||||
color: inherit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scroll marker styles
|
||||
#scroll_marker {
|
||||
position: fixed;
|
||||
z-index: 3;
|
||||
right: 0;
|
||||
top: 0;
|
||||
width: 16px;
|
||||
height: 100%;
|
||||
background: $light-bg;
|
||||
border-left: 1px solid $light-gray2;
|
||||
@include background-dark($dark-bg);
|
||||
@include border-color-dark($dark-gray2);
|
||||
will-change: transform; // for faster scrolling of fixed element in Chrome
|
||||
|
||||
.marker {
|
||||
background: $light-gray3;
|
||||
@include background-dark($dark-gray3);
|
||||
position: absolute;
|
||||
min-height: 3px;
|
||||
width: 100%;
|
||||
}
|
||||
}
|
||||
614
venv/lib/python3.12/site-packages/coverage/inorout.py
Normal file
614
venv/lib/python3.12/site-packages/coverage/inorout.py
Normal file
@@ -0,0 +1,614 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Determining whether files are being measured/reported or not."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
import inspect
|
||||
import itertools
|
||||
import os
|
||||
import os.path
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import sysconfig
|
||||
import traceback
|
||||
from collections.abc import Iterable
|
||||
from types import FrameType, ModuleType
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from coverage import env
|
||||
from coverage.disposition import FileDisposition, disposition_init
|
||||
from coverage.exceptions import ConfigError, CoverageException, PluginError
|
||||
from coverage.files import (
|
||||
GlobMatcher,
|
||||
ModuleMatcher,
|
||||
TreeMatcher,
|
||||
canonical_filename,
|
||||
find_python_files,
|
||||
prep_patterns,
|
||||
)
|
||||
from coverage.misc import isolate_module, sys_modules_saved
|
||||
from coverage.python import source_for_file, source_for_morf
|
||||
from coverage.types import TDebugCtl, TFileDisposition, TMorf, TWarnFn
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.config import CoverageConfig
|
||||
from coverage.plugin_support import Plugins
|
||||
|
||||
|
||||
modules_we_happen_to_have: list[ModuleType] = [
|
||||
inspect,
|
||||
itertools,
|
||||
os,
|
||||
platform,
|
||||
re,
|
||||
sysconfig,
|
||||
traceback,
|
||||
]
|
||||
|
||||
if env.PYPY:
|
||||
# Pypy has some unusual stuff in the "stdlib". Consider those locations
|
||||
# when deciding where the stdlib is. These modules are not used for anything,
|
||||
# they are modules importable from the pypy lib directories, so that we can
|
||||
# find those directories.
|
||||
import _pypy_irc_topic # pylint: disable=import-error
|
||||
import _structseq # pylint: disable=import-error
|
||||
|
||||
modules_we_happen_to_have.extend([_structseq, _pypy_irc_topic])
|
||||
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
def canonical_path(morf: TMorf, directory: bool = False) -> str:
|
||||
"""Return the canonical path of the module or file `morf`.
|
||||
|
||||
If the module is a package, then return its directory. If it is a
|
||||
module, then return its file, unless `directory` is True, in which
|
||||
case return its enclosing directory.
|
||||
|
||||
"""
|
||||
morf_path = canonical_filename(source_for_morf(morf))
|
||||
if morf_path.endswith("__init__.py") or directory:
|
||||
morf_path = os.path.split(morf_path)[0]
|
||||
return morf_path
|
||||
|
||||
|
||||
def name_for_module(filename: str, frame: FrameType | None) -> str | None:
|
||||
"""Get the name of the module for a filename and frame.
|
||||
|
||||
For configurability's sake, we allow __main__ modules to be matched by
|
||||
their importable name.
|
||||
|
||||
If loaded via runpy (aka -m), we can usually recover the "original"
|
||||
full dotted module name, otherwise, we resort to interpreting the
|
||||
file name to get the module's name. In the case that the module name
|
||||
can't be determined, None is returned.
|
||||
|
||||
"""
|
||||
module_globals = frame.f_globals if frame is not None else {}
|
||||
dunder_name: str | None = module_globals.get("__name__", None)
|
||||
|
||||
if isinstance(dunder_name, str) and dunder_name != "__main__":
|
||||
# This is the usual case: an imported module.
|
||||
return dunder_name
|
||||
|
||||
spec = module_globals.get("__spec__", None)
|
||||
if spec:
|
||||
fullname = spec.name
|
||||
if isinstance(fullname, str) and fullname != "__main__":
|
||||
# Module loaded via: runpy -m
|
||||
return fullname
|
||||
|
||||
# Script as first argument to Python command line.
|
||||
inspectedname = inspect.getmodulename(filename)
|
||||
if inspectedname is not None:
|
||||
return inspectedname
|
||||
else:
|
||||
return dunder_name
|
||||
|
||||
|
||||
def module_is_namespace(mod: ModuleType) -> bool:
|
||||
"""Is the module object `mod` a PEP420 namespace module?"""
|
||||
return hasattr(mod, "__path__") and getattr(mod, "__file__", None) is None
|
||||
|
||||
|
||||
def module_has_file(mod: ModuleType) -> bool:
|
||||
"""Does the module object `mod` have an existing __file__ ?"""
|
||||
mod__file__ = getattr(mod, "__file__", None)
|
||||
if mod__file__ is None:
|
||||
return False
|
||||
return os.path.exists(mod__file__)
|
||||
|
||||
|
||||
def file_and_path_for_module(modulename: str) -> tuple[str | None, list[str]]:
|
||||
"""Find the file and search path for `modulename`.
|
||||
|
||||
Returns:
|
||||
filename: The filename of the module, or None.
|
||||
path: A list (possibly empty) of directories to find submodules in.
|
||||
|
||||
"""
|
||||
filename = None
|
||||
path = []
|
||||
try:
|
||||
spec = importlib.util.find_spec(modulename)
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
if spec is not None:
|
||||
filename = spec.origin
|
||||
path = list(spec.submodule_search_locations or ())
|
||||
return filename, path
|
||||
|
||||
|
||||
def add_stdlib_paths(paths: set[str]) -> None:
|
||||
"""Add paths where the stdlib can be found to the set `paths`."""
|
||||
# Look at where some standard modules are located. That's the
|
||||
# indication for "installed with the interpreter". In some
|
||||
# environments (virtualenv, for example), these modules may be
|
||||
# spread across a few locations. Look at all the candidate modules
|
||||
# we've imported, and take all the different ones.
|
||||
for m in modules_we_happen_to_have:
|
||||
if hasattr(m, "__file__"):
|
||||
paths.add(canonical_path(m, directory=True))
|
||||
|
||||
|
||||
def add_third_party_paths(paths: set[str]) -> None:
|
||||
"""Add locations for third-party packages to the set `paths`."""
|
||||
# Get the paths that sysconfig knows about.
|
||||
scheme_names = set(sysconfig.get_scheme_names())
|
||||
|
||||
for scheme in scheme_names:
|
||||
# https://foss.heptapod.net/pypy/pypy/-/issues/3433
|
||||
better_scheme = "pypy_posix" if scheme == "pypy" else scheme
|
||||
if os.name in better_scheme.split("_"):
|
||||
config_paths = sysconfig.get_paths(scheme)
|
||||
for path_name in ["platlib", "purelib", "scripts"]:
|
||||
paths.add(config_paths[path_name])
|
||||
|
||||
|
||||
def add_coverage_paths(paths: set[str]) -> None:
|
||||
"""Add paths where coverage.py code can be found to the set `paths`."""
|
||||
cover_path = canonical_path(__file__, directory=True)
|
||||
paths.add(cover_path)
|
||||
if env.TESTING:
|
||||
# Don't include our own test code.
|
||||
paths.add(os.path.join(cover_path, "tests"))
|
||||
|
||||
|
||||
class InOrOut:
|
||||
"""Machinery for determining what files to measure."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config: CoverageConfig,
|
||||
warn: TWarnFn,
|
||||
debug: TDebugCtl | None,
|
||||
include_namespace_packages: bool,
|
||||
) -> None:
|
||||
self.warn = warn
|
||||
self.debug = debug
|
||||
self.include_namespace_packages = include_namespace_packages
|
||||
|
||||
self.source_pkgs: list[str] = []
|
||||
self.source_pkgs.extend(config.source_pkgs)
|
||||
self.source_dirs: list[str] = []
|
||||
self.source_dirs.extend(config.source_dirs)
|
||||
for src in config.source or []:
|
||||
if os.path.isdir(src):
|
||||
self.source_dirs.append(src)
|
||||
else:
|
||||
self.source_pkgs.append(src)
|
||||
|
||||
# Canonicalize everything in `source_dirs`.
|
||||
# Also confirm that they actually are directories.
|
||||
for i, src in enumerate(self.source_dirs):
|
||||
if not os.path.isdir(src):
|
||||
raise ConfigError(f"Source dir is not a directory: {src!r}")
|
||||
self.source_dirs[i] = canonical_filename(src)
|
||||
|
||||
self.source_pkgs_unmatched = self.source_pkgs[:]
|
||||
|
||||
self.include = prep_patterns(config.run_include)
|
||||
self.omit = prep_patterns(config.run_omit)
|
||||
|
||||
# The directories for files considered "installed with the interpreter".
|
||||
self.pylib_paths: set[str] = set()
|
||||
if not config.cover_pylib:
|
||||
add_stdlib_paths(self.pylib_paths)
|
||||
|
||||
# To avoid tracing the coverage.py code itself, we skip anything
|
||||
# located where we are.
|
||||
self.cover_paths: set[str] = set()
|
||||
add_coverage_paths(self.cover_paths)
|
||||
|
||||
# Find where third-party packages are installed.
|
||||
self.third_paths: set[str] = set()
|
||||
add_third_party_paths(self.third_paths)
|
||||
|
||||
def _debug(msg: str) -> None:
|
||||
if self.debug:
|
||||
self.debug.write(msg)
|
||||
|
||||
# The matchers for should_trace.
|
||||
|
||||
# Generally useful information
|
||||
_debug("sys.path:" + "".join(f"\n {p}" for p in sys.path))
|
||||
|
||||
# Create the matchers we need for should_trace
|
||||
self.source_match = None
|
||||
self.source_pkgs_match = None
|
||||
self.pylib_match = None
|
||||
self.include_match = self.omit_match = None
|
||||
|
||||
if self.source_dirs or self.source_pkgs:
|
||||
against = []
|
||||
if self.source_dirs:
|
||||
self.source_match = TreeMatcher(self.source_dirs, "source")
|
||||
against.append(f"trees {self.source_match!r}")
|
||||
if self.source_pkgs:
|
||||
self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs")
|
||||
against.append(f"modules {self.source_pkgs_match!r}")
|
||||
_debug("Source matching against " + " and ".join(against))
|
||||
else:
|
||||
if self.pylib_paths:
|
||||
self.pylib_match = TreeMatcher(self.pylib_paths, "pylib")
|
||||
_debug(f"Python stdlib matching: {self.pylib_match!r}")
|
||||
if self.include:
|
||||
self.include_match = GlobMatcher(self.include, "include")
|
||||
_debug(f"Include matching: {self.include_match!r}")
|
||||
if self.omit:
|
||||
self.omit_match = GlobMatcher(self.omit, "omit")
|
||||
_debug(f"Omit matching: {self.omit_match!r}")
|
||||
|
||||
self.cover_match = TreeMatcher(self.cover_paths, "coverage")
|
||||
_debug(f"Coverage code matching: {self.cover_match!r}")
|
||||
|
||||
self.third_match = TreeMatcher(self.third_paths, "third")
|
||||
_debug(f"Third-party lib matching: {self.third_match!r}")
|
||||
|
||||
# Check if the source we want to measure has been installed as a
|
||||
# third-party package.
|
||||
# Is the source inside a third-party area?
|
||||
self.source_in_third_paths = set()
|
||||
with sys_modules_saved():
|
||||
for pkg in self.source_pkgs:
|
||||
try:
|
||||
modfile, path = file_and_path_for_module(pkg)
|
||||
_debug(f"Imported source package {pkg!r} as {modfile!r}")
|
||||
except CoverageException as exc:
|
||||
_debug(f"Couldn't import source package {pkg!r}: {exc}")
|
||||
continue
|
||||
if modfile:
|
||||
if self.third_match.match(modfile):
|
||||
_debug(
|
||||
f"Source in third-party: source_pkg {pkg!r} at {modfile!r}",
|
||||
)
|
||||
self.source_in_third_paths.add(canonical_path(source_for_file(modfile)))
|
||||
else:
|
||||
for pathdir in path:
|
||||
if self.third_match.match(pathdir):
|
||||
_debug(
|
||||
f"Source in third-party: {pkg!r} path directory at {pathdir!r}",
|
||||
)
|
||||
self.source_in_third_paths.add(pathdir)
|
||||
|
||||
for src in self.source_dirs:
|
||||
if self.third_match.match(src):
|
||||
_debug(f"Source in third-party: source directory {src!r}")
|
||||
self.source_in_third_paths.add(src)
|
||||
self.source_in_third_match = TreeMatcher(self.source_in_third_paths, "source_in_third")
|
||||
_debug(f"Source in third-party matching: {self.source_in_third_match}")
|
||||
|
||||
self.plugins: Plugins
|
||||
self.disp_class: type[TFileDisposition] = FileDisposition
|
||||
|
||||
def should_trace(self, filename: str, frame: FrameType | None = None) -> TFileDisposition:
|
||||
"""Decide whether to trace execution in `filename`, with a reason.
|
||||
|
||||
This function is called from the trace function. As each new file name
|
||||
is encountered, this function determines whether it is traced or not.
|
||||
|
||||
Returns a FileDisposition object.
|
||||
|
||||
"""
|
||||
original_filename = filename
|
||||
disp = disposition_init(self.disp_class, filename)
|
||||
|
||||
def nope(disp: TFileDisposition, reason: str) -> TFileDisposition:
|
||||
"""Simple helper to make it easy to return NO."""
|
||||
disp.trace = False
|
||||
disp.reason = reason
|
||||
return disp
|
||||
|
||||
if original_filename.startswith("<"):
|
||||
return nope(disp, "original file name is not real")
|
||||
|
||||
if frame is not None:
|
||||
# Compiled Python files have two file names: frame.f_code.co_filename is
|
||||
# the file name at the time the .pyc was compiled. The second name is
|
||||
# __file__, which is where the .pyc was actually loaded from. Since
|
||||
# .pyc files can be moved after compilation (for example, by being
|
||||
# installed), we look for __file__ in the frame and prefer it to the
|
||||
# co_filename value.
|
||||
dunder_file = frame.f_globals and frame.f_globals.get("__file__")
|
||||
if dunder_file:
|
||||
# Danger: __file__ can (rarely?) be of type Path.
|
||||
filename = source_for_file(str(dunder_file))
|
||||
if original_filename and not original_filename.startswith("<"):
|
||||
orig = os.path.basename(original_filename)
|
||||
if orig != os.path.basename(filename):
|
||||
# Files shouldn't be renamed when moved. This happens when
|
||||
# exec'ing code. If it seems like something is wrong with
|
||||
# the frame's file name, then just use the original.
|
||||
filename = original_filename
|
||||
|
||||
if not filename:
|
||||
# Empty string is pretty useless.
|
||||
return nope(disp, "empty string isn't a file name")
|
||||
|
||||
if filename.startswith("memory:"):
|
||||
return nope(disp, "memory isn't traceable")
|
||||
|
||||
if filename.startswith("<"):
|
||||
# Lots of non-file execution is represented with artificial
|
||||
# file names like "<string>", "<doctest readme.txt[0]>", or
|
||||
# "<exec_function>". Don't ever trace these executions, since we
|
||||
# can't do anything with the data later anyway.
|
||||
return nope(disp, "file name is not real")
|
||||
|
||||
canonical = canonical_filename(filename)
|
||||
disp.canonical_filename = canonical
|
||||
|
||||
# Try the plugins, see if they have an opinion about the file.
|
||||
plugin = None
|
||||
for plugin in self.plugins.file_tracers:
|
||||
if not plugin._coverage_enabled:
|
||||
continue
|
||||
|
||||
try:
|
||||
file_tracer = plugin.file_tracer(canonical)
|
||||
if file_tracer is not None:
|
||||
file_tracer._coverage_plugin = plugin
|
||||
disp.trace = True
|
||||
disp.file_tracer = file_tracer
|
||||
if file_tracer.has_dynamic_source_filename():
|
||||
disp.has_dynamic_filename = True
|
||||
else:
|
||||
disp.source_filename = canonical_filename(
|
||||
file_tracer.source_filename(),
|
||||
)
|
||||
break
|
||||
except Exception:
|
||||
plugin_name = plugin._coverage_plugin_name
|
||||
tb = traceback.format_exc()
|
||||
self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}")
|
||||
plugin._coverage_enabled = False
|
||||
continue
|
||||
else:
|
||||
# No plugin wanted it: it's Python.
|
||||
disp.trace = True
|
||||
disp.source_filename = canonical
|
||||
|
||||
if not disp.has_dynamic_filename:
|
||||
if not disp.source_filename:
|
||||
raise PluginError(
|
||||
f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'",
|
||||
)
|
||||
reason = self.check_include_omit_etc(disp.source_filename, frame)
|
||||
if reason:
|
||||
nope(disp, reason)
|
||||
|
||||
return disp
|
||||
|
||||
def check_include_omit_etc(self, filename: str, frame: FrameType | None) -> str | None:
|
||||
"""Check a file name against the include, omit, etc, rules.
|
||||
|
||||
Returns a string or None. String means, don't trace, and is the reason
|
||||
why. None means no reason found to not trace.
|
||||
|
||||
"""
|
||||
modulename = name_for_module(filename, frame)
|
||||
|
||||
# If the user specified source or include, then that's authoritative
|
||||
# about the outer bound of what to measure and we don't have to apply
|
||||
# any canned exclusions. If they didn't, then we have to exclude the
|
||||
# stdlib and coverage.py directories.
|
||||
if self.source_match or self.source_pkgs_match:
|
||||
extra = ""
|
||||
ok = False
|
||||
if self.source_pkgs_match:
|
||||
if isinstance(modulename, str) and self.source_pkgs_match.match(modulename):
|
||||
ok = True
|
||||
if modulename in self.source_pkgs_unmatched:
|
||||
self.source_pkgs_unmatched.remove(modulename)
|
||||
else:
|
||||
extra = f"module {modulename!r} "
|
||||
if not ok and self.source_match:
|
||||
if self.source_match.match(filename):
|
||||
ok = True
|
||||
if not ok:
|
||||
return extra + "falls outside the --source spec"
|
||||
if self.third_match.match(filename) and not self.source_in_third_match.match(filename):
|
||||
return "inside --source, but is third-party"
|
||||
elif self.include_match:
|
||||
if not self.include_match.match(filename):
|
||||
return "falls outside the --include trees"
|
||||
else:
|
||||
# We exclude the coverage.py code itself, since a little of it
|
||||
# will be measured otherwise.
|
||||
if self.cover_match.match(filename):
|
||||
return "is part of coverage.py"
|
||||
|
||||
# If we aren't supposed to trace installed code, then check if this
|
||||
# is near the Python standard library and skip it if so.
|
||||
if self.pylib_match and self.pylib_match.match(filename):
|
||||
return "is in the stdlib"
|
||||
|
||||
# Exclude anything in the third-party installation areas.
|
||||
if self.third_match.match(filename):
|
||||
return "is a third-party module"
|
||||
|
||||
# Check the file against the omit pattern.
|
||||
if self.omit_match and self.omit_match.match(filename):
|
||||
return "is inside an --omit pattern"
|
||||
|
||||
# No point tracing a file we can't later write to SQLite.
|
||||
try:
|
||||
filename.encode("utf-8")
|
||||
except UnicodeEncodeError:
|
||||
return "non-encodable filename"
|
||||
|
||||
# No reason found to skip this file.
|
||||
return None
|
||||
|
||||
def warn_conflicting_settings(self) -> None:
|
||||
"""Warn if there are settings that conflict."""
|
||||
if self.include:
|
||||
if self.source_dirs or self.source_pkgs:
|
||||
self.warn("--include is ignored because --source is set", slug="include-ignored")
|
||||
|
||||
def warn_already_imported_files(self) -> None:
|
||||
"""Warn if files have already been imported that we will be measuring."""
|
||||
if self.include or self.source_dirs or self.source_pkgs:
|
||||
warned = set()
|
||||
for mod in list(sys.modules.values()):
|
||||
filename = getattr(mod, "__file__", None)
|
||||
if filename is None:
|
||||
continue
|
||||
if filename in warned:
|
||||
continue
|
||||
|
||||
if len(getattr(mod, "__path__", ())) > 1:
|
||||
# A namespace package, which confuses this code, so ignore it.
|
||||
continue
|
||||
|
||||
disp = self.should_trace(filename)
|
||||
if disp.has_dynamic_filename:
|
||||
# A plugin with dynamic filenames: the Python file
|
||||
# shouldn't cause a warning, since it won't be the subject
|
||||
# of tracing anyway.
|
||||
continue
|
||||
if disp.trace:
|
||||
msg = f"Already imported a file that will be measured: {filename}"
|
||||
self.warn(msg, slug="already-imported")
|
||||
warned.add(filename)
|
||||
elif self.debug and self.debug.should("trace"):
|
||||
self.debug.write(
|
||||
"Didn't trace already imported file {!r}: {}".format(
|
||||
disp.original_filename,
|
||||
disp.reason,
|
||||
),
|
||||
)
|
||||
|
||||
def warn_unimported_source(self) -> None:
|
||||
"""Warn about source packages that were of interest, but never traced."""
|
||||
for pkg in self.source_pkgs_unmatched:
|
||||
self._warn_about_unmeasured_code(pkg)
|
||||
|
||||
def _warn_about_unmeasured_code(self, pkg: str) -> None:
|
||||
"""Warn about a package or module that we never traced.
|
||||
|
||||
`pkg` is a string, the name of the package or module.
|
||||
|
||||
"""
|
||||
mod = sys.modules.get(pkg)
|
||||
if mod is None:
|
||||
self.warn(f"Module {pkg} was never imported.", slug="module-not-imported")
|
||||
return
|
||||
|
||||
if module_is_namespace(mod):
|
||||
# A namespace package. It's OK for this not to have been traced,
|
||||
# since there is no code directly in it.
|
||||
return
|
||||
|
||||
if not module_has_file(mod):
|
||||
self.warn(f"Module {pkg} has no Python source.", slug="module-not-python")
|
||||
return
|
||||
|
||||
# The module was in sys.modules, and seems like a module with code, but
|
||||
# we never measured it. I guess that means it was imported before
|
||||
# coverage even started.
|
||||
msg = f"Module {pkg} was previously imported, but not measured"
|
||||
self.warn(msg, slug="module-not-measured")
|
||||
|
||||
def find_possibly_unexecuted_files(self) -> Iterable[tuple[str, str | None]]:
|
||||
"""Find files in the areas of interest that might be untraced.
|
||||
|
||||
Yields pairs: file path, and responsible plug-in name.
|
||||
"""
|
||||
for pkg in self.source_pkgs:
|
||||
if pkg not in sys.modules or not module_has_file(sys.modules[pkg]):
|
||||
continue
|
||||
pkg_file = source_for_file(cast(str, sys.modules[pkg].__file__))
|
||||
yield from self._find_executable_files(canonical_path(pkg_file))
|
||||
|
||||
for src in self.source_dirs:
|
||||
yield from self._find_executable_files(src)
|
||||
|
||||
def _find_plugin_files(self, src_dir: str) -> Iterable[tuple[str, str]]:
|
||||
"""Get executable files from the plugins."""
|
||||
for plugin in self.plugins.file_tracers:
|
||||
for x_file in plugin.find_executable_files(src_dir):
|
||||
yield x_file, plugin._coverage_plugin_name
|
||||
|
||||
def _find_executable_files(self, src_dir: str) -> Iterable[tuple[str, str | None]]:
|
||||
"""Find executable files in `src_dir`.
|
||||
|
||||
Search for files in `src_dir` that can be executed because they
|
||||
are probably importable. Don't include ones that have been omitted
|
||||
by the configuration.
|
||||
|
||||
Yield the file path, and the plugin name that handles the file.
|
||||
|
||||
"""
|
||||
py_files = (
|
||||
(py_file, None)
|
||||
for py_file in find_python_files(src_dir, self.include_namespace_packages)
|
||||
)
|
||||
plugin_files = self._find_plugin_files(src_dir)
|
||||
|
||||
for file_path, plugin_name in itertools.chain(py_files, plugin_files):
|
||||
file_path = canonical_filename(file_path)
|
||||
if self.omit_match and self.omit_match.match(file_path):
|
||||
# Turns out this file was omitted, so don't pull it back
|
||||
# in as un-executed.
|
||||
continue
|
||||
yield file_path, plugin_name
|
||||
|
||||
def sys_info(self) -> Iterable[tuple[str, Any]]:
|
||||
"""Our information for Coverage.sys_info.
|
||||
|
||||
Returns a list of (key, value) pairs.
|
||||
"""
|
||||
info = [
|
||||
("coverage_paths", self.cover_paths),
|
||||
("stdlib_paths", self.pylib_paths),
|
||||
("third_party_paths", self.third_paths),
|
||||
("source_in_third_party_paths", self.source_in_third_paths),
|
||||
]
|
||||
|
||||
matcher_names = [
|
||||
"source_match",
|
||||
"source_pkgs_match",
|
||||
"include_match",
|
||||
"omit_match",
|
||||
"cover_match",
|
||||
"pylib_match",
|
||||
"third_match",
|
||||
"source_in_third_match",
|
||||
]
|
||||
|
||||
for matcher_name in matcher_names:
|
||||
matcher = getattr(self, matcher_name)
|
||||
if matcher:
|
||||
matcher_info = matcher.info()
|
||||
else:
|
||||
matcher_info = "-none-"
|
||||
info.append((matcher_name, matcher_info))
|
||||
|
||||
return info
|
||||
188
venv/lib/python3.12/site-packages/coverage/jsonreport.py
Normal file
188
venv/lib/python3.12/site-packages/coverage/jsonreport.py
Normal file
@@ -0,0 +1,188 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Json reporting for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from typing import IO, TYPE_CHECKING, Any
|
||||
|
||||
from coverage import __version__
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, AnalysisNarrower, Numbers
|
||||
from coverage.types import TLineNo, TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
from coverage.data import CoverageData
|
||||
from coverage.plugin import FileReporter
|
||||
|
||||
|
||||
# A type for data that can be JSON-serialized.
|
||||
JsonObj = dict[str, Any]
|
||||
|
||||
# "Version 1" had no format number at all.
|
||||
# 2: add the meta.format field.
|
||||
# 3: add region information (functions, classes)
|
||||
FORMAT_VERSION = 3
|
||||
|
||||
|
||||
class JsonReporter:
|
||||
"""A reporter for writing JSON coverage results."""
|
||||
|
||||
report_type = "JSON report"
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
self.total = Numbers(self.config.precision)
|
||||
self.report_data: JsonObj = {}
|
||||
|
||||
def make_summary(self, nums: Numbers) -> JsonObj:
|
||||
"""Create a dict summarizing `nums`."""
|
||||
return {
|
||||
"covered_lines": nums.n_executed,
|
||||
"num_statements": nums.n_statements,
|
||||
"percent_covered": nums.pc_covered,
|
||||
"percent_covered_display": nums.pc_covered_str,
|
||||
"missing_lines": nums.n_missing,
|
||||
"excluded_lines": nums.n_excluded,
|
||||
}
|
||||
|
||||
def make_branch_summary(self, nums: Numbers) -> JsonObj:
|
||||
"""Create a dict summarizing the branch info in `nums`."""
|
||||
return {
|
||||
"num_branches": nums.n_branches,
|
||||
"num_partial_branches": nums.n_partial_branches,
|
||||
"covered_branches": nums.n_executed_branches,
|
||||
"missing_branches": nums.n_missing_branches,
|
||||
}
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
||||
"""Generate a json report for `morfs`.
|
||||
|
||||
`morfs` is a list of modules or file names.
|
||||
|
||||
`outfile` is a file object to write the json to.
|
||||
|
||||
"""
|
||||
outfile = outfile or sys.stdout
|
||||
coverage_data = self.coverage.get_data()
|
||||
coverage_data.set_query_contexts(self.config.report_contexts)
|
||||
self.report_data["meta"] = {
|
||||
"format": FORMAT_VERSION,
|
||||
"version": __version__,
|
||||
"timestamp": datetime.datetime.now().isoformat(),
|
||||
"branch_coverage": coverage_data.has_arcs(),
|
||||
"show_contexts": self.config.json_show_contexts,
|
||||
}
|
||||
|
||||
measured_files = {}
|
||||
for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
measured_files[file_reporter.relative_filename()] = self.report_one_file(
|
||||
coverage_data,
|
||||
analysis,
|
||||
file_reporter,
|
||||
)
|
||||
|
||||
self.report_data["files"] = measured_files
|
||||
self.report_data["totals"] = self.make_summary(self.total)
|
||||
|
||||
if coverage_data.has_arcs():
|
||||
self.report_data["totals"].update(self.make_branch_summary(self.total))
|
||||
|
||||
json.dump(
|
||||
self.report_data,
|
||||
outfile,
|
||||
indent=(4 if self.config.json_pretty_print else None),
|
||||
)
|
||||
|
||||
return self.total.n_statements and self.total.pc_covered
|
||||
|
||||
def report_one_file(
|
||||
self, coverage_data: CoverageData, analysis: Analysis, file_reporter: FileReporter
|
||||
) -> JsonObj:
|
||||
"""Extract the relevant report data for a single file."""
|
||||
nums = analysis.numbers
|
||||
self.total += nums
|
||||
summary = self.make_summary(nums)
|
||||
reported_file: JsonObj = {
|
||||
"executed_lines": sorted(analysis.executed),
|
||||
"summary": summary,
|
||||
"missing_lines": sorted(analysis.missing),
|
||||
"excluded_lines": sorted(analysis.excluded),
|
||||
}
|
||||
if self.config.json_show_contexts:
|
||||
reported_file["contexts"] = coverage_data.contexts_by_lineno(analysis.filename)
|
||||
if coverage_data.has_arcs():
|
||||
summary.update(self.make_branch_summary(nums))
|
||||
reported_file["executed_branches"] = list(
|
||||
_convert_branch_arcs(analysis.executed_branch_arcs()),
|
||||
)
|
||||
reported_file["missing_branches"] = list(
|
||||
_convert_branch_arcs(analysis.missing_branch_arcs()),
|
||||
)
|
||||
|
||||
num_lines = len(file_reporter.source().splitlines())
|
||||
regions = file_reporter.code_regions()
|
||||
for noun, plural in file_reporter.code_region_kinds():
|
||||
outside_lines = set(range(1, num_lines + 1))
|
||||
for region in regions:
|
||||
if region.kind != noun:
|
||||
continue
|
||||
outside_lines -= region.lines
|
||||
|
||||
narrower = AnalysisNarrower(analysis)
|
||||
narrower.add_regions(r.lines for r in regions if r.kind == noun)
|
||||
narrower.add_regions([outside_lines])
|
||||
|
||||
reported_file[plural] = region_data = {}
|
||||
for region in regions:
|
||||
if region.kind != noun:
|
||||
continue
|
||||
region_data[region.name] = self.make_region_data(
|
||||
coverage_data,
|
||||
narrower.narrow(region.lines),
|
||||
)
|
||||
|
||||
region_data[""] = self.make_region_data(
|
||||
coverage_data,
|
||||
narrower.narrow(outside_lines),
|
||||
)
|
||||
return reported_file
|
||||
|
||||
def make_region_data(self, coverage_data: CoverageData, narrowed_analysis: Analysis) -> JsonObj:
|
||||
"""Create the data object for one region of a file."""
|
||||
narrowed_nums = narrowed_analysis.numbers
|
||||
narrowed_summary = self.make_summary(narrowed_nums)
|
||||
this_region = {
|
||||
"executed_lines": sorted(narrowed_analysis.executed),
|
||||
"summary": narrowed_summary,
|
||||
"missing_lines": sorted(narrowed_analysis.missing),
|
||||
"excluded_lines": sorted(narrowed_analysis.excluded),
|
||||
}
|
||||
if self.config.json_show_contexts:
|
||||
contexts = coverage_data.contexts_by_lineno(narrowed_analysis.filename)
|
||||
this_region["contexts"] = contexts
|
||||
if coverage_data.has_arcs():
|
||||
narrowed_summary.update(self.make_branch_summary(narrowed_nums))
|
||||
this_region["executed_branches"] = list(
|
||||
_convert_branch_arcs(narrowed_analysis.executed_branch_arcs()),
|
||||
)
|
||||
this_region["missing_branches"] = list(
|
||||
_convert_branch_arcs(narrowed_analysis.missing_branch_arcs()),
|
||||
)
|
||||
return this_region
|
||||
|
||||
|
||||
def _convert_branch_arcs(
|
||||
branch_arcs: dict[TLineNo, list[TLineNo]],
|
||||
) -> Iterable[tuple[TLineNo, TLineNo]]:
|
||||
"""Convert branch arcs to a list of two-element tuples."""
|
||||
for source, targets in branch_arcs.items():
|
||||
for target in targets:
|
||||
yield source, target
|
||||
219
venv/lib/python3.12/site-packages/coverage/lcovreport.py
Normal file
219
venv/lib/python3.12/site-packages/coverage/lcovreport.py
Normal file
@@ -0,0 +1,219 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""LCOV reporting for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from typing import IO, TYPE_CHECKING
|
||||
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, AnalysisNarrower, Numbers
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
|
||||
def line_hash(line: str) -> str:
|
||||
"""Produce a hash of a source line for use in the LCOV file."""
|
||||
# The LCOV file format optionally allows each line to be MD5ed as a
|
||||
# fingerprint of the file. This is not a security use. Some security
|
||||
# scanners raise alarms about the use of MD5 here, but it is a false
|
||||
# positive. This is not a security concern.
|
||||
# The unusual encoding of the MD5 hash, as a base64 sequence with the
|
||||
# trailing = signs stripped, is specified by the LCOV file format.
|
||||
hashed = hashlib.md5(line.encode("utf-8"), usedforsecurity=False).digest()
|
||||
return base64.b64encode(hashed).decode("ascii").rstrip("=")
|
||||
|
||||
|
||||
def lcov_lines(
|
||||
analysis: Analysis,
|
||||
lines: list[int],
|
||||
source_lines: list[str],
|
||||
outfile: IO[str],
|
||||
) -> None:
|
||||
"""Emit line coverage records for an analyzed file."""
|
||||
hash_suffix = ""
|
||||
for line in lines:
|
||||
if source_lines:
|
||||
hash_suffix = "," + line_hash(source_lines[line - 1])
|
||||
# Q: can we get info about the number of times a statement is
|
||||
# executed? If so, that should be recorded here.
|
||||
hit = int(line not in analysis.missing)
|
||||
outfile.write(f"DA:{line},{hit}{hash_suffix}\n")
|
||||
|
||||
if analysis.numbers.n_statements > 0:
|
||||
outfile.write(f"LF:{analysis.numbers.n_statements}\n")
|
||||
outfile.write(f"LH:{analysis.numbers.n_executed}\n")
|
||||
|
||||
|
||||
def lcov_functions(
|
||||
fr: FileReporter,
|
||||
file_analysis: Analysis,
|
||||
outfile: IO[str],
|
||||
) -> None:
|
||||
"""Emit function coverage records for an analyzed file."""
|
||||
# lcov 2.2 introduces a new format for function coverage records.
|
||||
# We continue to generate the old format because we don't know what
|
||||
# version of the lcov tools will be used to read this report.
|
||||
|
||||
# "and region.lines" below avoids a crash due to a bug in PyPy 3.8
|
||||
# where, for whatever reason, when collecting data in --branch mode,
|
||||
# top-level functions have an empty lines array. Instead we just don't
|
||||
# emit function records for those.
|
||||
|
||||
# suppressions because of https://github.com/pylint-dev/pylint/issues/9923
|
||||
functions = [
|
||||
(
|
||||
min(region.start, min(region.lines)), # pylint: disable=nested-min-max
|
||||
max(region.start, max(region.lines)), # pylint: disable=nested-min-max
|
||||
region,
|
||||
)
|
||||
for region in fr.code_regions()
|
||||
if region.kind == "function" and region.lines
|
||||
]
|
||||
if not functions:
|
||||
return
|
||||
|
||||
narrower = AnalysisNarrower(file_analysis)
|
||||
narrower.add_regions(r.lines for _, _, r in functions)
|
||||
|
||||
functions.sort()
|
||||
functions_hit = 0
|
||||
for first_line, last_line, region in functions:
|
||||
# A function counts as having been executed if any of it has been
|
||||
# executed.
|
||||
analysis = narrower.narrow(region.lines)
|
||||
hit = int(analysis.numbers.n_executed > 0)
|
||||
functions_hit += hit
|
||||
|
||||
outfile.write(f"FN:{first_line},{last_line},{region.name}\n")
|
||||
outfile.write(f"FNDA:{hit},{region.name}\n")
|
||||
|
||||
outfile.write(f"FNF:{len(functions)}\n")
|
||||
outfile.write(f"FNH:{functions_hit}\n")
|
||||
|
||||
|
||||
def lcov_arcs(
|
||||
fr: FileReporter,
|
||||
analysis: Analysis,
|
||||
lines: list[int],
|
||||
outfile: IO[str],
|
||||
) -> None:
|
||||
"""Emit branch coverage records for an analyzed file."""
|
||||
branch_stats = analysis.branch_stats()
|
||||
executed_arcs = analysis.executed_branch_arcs()
|
||||
missing_arcs = analysis.missing_branch_arcs()
|
||||
|
||||
for line in lines:
|
||||
if line not in branch_stats:
|
||||
continue
|
||||
|
||||
# This is only one of several possible ways to map our sets of executed
|
||||
# and not-executed arcs to BRDA codes. It seems to produce reasonable
|
||||
# results when fed through genhtml.
|
||||
_, taken = branch_stats[line]
|
||||
|
||||
if taken == 0:
|
||||
# When _none_ of the out arcs from 'line' were executed,
|
||||
# it can mean the line always raised an exception.
|
||||
assert len(executed_arcs[line]) == 0
|
||||
destinations = [(dst, "-") for dst in missing_arcs[line]]
|
||||
else:
|
||||
# Q: can we get counts of the number of times each arc was executed?
|
||||
# branch_stats has "total" and "taken" counts for each branch,
|
||||
# but it doesn't have "taken" broken down by destination.
|
||||
destinations = [(dst, "1") for dst in executed_arcs[line]]
|
||||
destinations.extend((dst, "0") for dst in missing_arcs[line])
|
||||
|
||||
# Sort exit arcs after normal arcs. Exit arcs typically come from
|
||||
# an if statement, at the end of a function, with no else clause.
|
||||
# This structure reads like you're jumping to the end of the function
|
||||
# when the conditional expression is false, so it should be presented
|
||||
# as the second alternative for the branch, after the alternative that
|
||||
# enters the if clause.
|
||||
destinations.sort(key=lambda d: (d[0] < 0, d))
|
||||
|
||||
for dst, hit in destinations:
|
||||
branch = fr.arc_description(line, dst)
|
||||
outfile.write(f"BRDA:{line},0,{branch},{hit}\n")
|
||||
|
||||
# Summary of the branch coverage.
|
||||
brf = sum(t for t, k in branch_stats.values())
|
||||
brh = brf - sum(t - k for t, k in branch_stats.values())
|
||||
if brf > 0:
|
||||
outfile.write(f"BRF:{brf}\n")
|
||||
outfile.write(f"BRH:{brh}\n")
|
||||
|
||||
|
||||
class LcovReporter:
|
||||
"""A reporter for writing LCOV coverage reports."""
|
||||
|
||||
report_type = "LCOV report"
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = coverage.config
|
||||
self.total = Numbers(self.coverage.config.precision)
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
||||
"""Renders the full lcov report.
|
||||
|
||||
`morfs` is a list of modules or filenames
|
||||
|
||||
outfile is the file object to write the file into.
|
||||
"""
|
||||
|
||||
self.coverage.get_data()
|
||||
outfile = outfile or sys.stdout
|
||||
|
||||
# ensure file records are sorted by the _relative_ filename, not the full path
|
||||
to_report = [
|
||||
(fr.relative_filename(), fr, analysis)
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs)
|
||||
]
|
||||
to_report.sort()
|
||||
|
||||
for fname, fr, analysis in to_report:
|
||||
self.total += analysis.numbers
|
||||
self.lcov_file(fname, fr, analysis, outfile)
|
||||
|
||||
return self.total.n_statements and self.total.pc_covered
|
||||
|
||||
def lcov_file(
|
||||
self,
|
||||
rel_fname: str,
|
||||
fr: FileReporter,
|
||||
analysis: Analysis,
|
||||
outfile: IO[str],
|
||||
) -> None:
|
||||
"""Produces the lcov data for a single file.
|
||||
|
||||
This currently supports both line and branch coverage,
|
||||
however function coverage is not supported.
|
||||
"""
|
||||
|
||||
if analysis.numbers.n_statements == 0:
|
||||
if self.config.skip_empty:
|
||||
return
|
||||
|
||||
outfile.write(f"SF:{rel_fname}\n")
|
||||
|
||||
lines = sorted(analysis.statements)
|
||||
if self.config.lcov_line_checksums:
|
||||
source_lines = fr.source().splitlines()
|
||||
else:
|
||||
source_lines = []
|
||||
|
||||
lcov_lines(analysis, lines, source_lines, outfile)
|
||||
lcov_functions(fr, analysis, outfile)
|
||||
if analysis.has_arcs:
|
||||
lcov_arcs(fr, analysis, lines, outfile)
|
||||
|
||||
outfile.write("end_of_record\n")
|
||||
372
venv/lib/python3.12/site-packages/coverage/misc.py
Normal file
372
venv/lib/python3.12/site-packages/coverage/misc.py
Normal file
@@ -0,0 +1,372 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Miscellaneous stuff for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import datetime
|
||||
import errno
|
||||
import functools
|
||||
import hashlib
|
||||
import importlib
|
||||
import importlib.util
|
||||
import inspect
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
from collections.abc import Iterable, Iterator, Mapping, Sequence
|
||||
from types import ModuleType
|
||||
from typing import Any, NoReturn, TypeVar
|
||||
|
||||
# In 6.0, the exceptions moved from misc.py to exceptions.py. But a number of
|
||||
# other packages were importing the exceptions from misc, so import them here.
|
||||
# pylint: disable=unused-wildcard-import
|
||||
from coverage.exceptions import * # pylint: disable=wildcard-import
|
||||
from coverage.exceptions import CoverageException
|
||||
from coverage.types import TArc
|
||||
|
||||
ISOLATED_MODULES: dict[ModuleType, ModuleType] = {}
|
||||
|
||||
|
||||
def isolate_module(mod: ModuleType) -> ModuleType:
|
||||
"""Copy a module so that we are isolated from aggressive mocking.
|
||||
|
||||
If a test suite mocks os.path.exists (for example), and then we need to use
|
||||
it during the test, everything will get tangled up if we use their mock.
|
||||
Making a copy of the module when we import it will isolate coverage.py from
|
||||
those complications.
|
||||
"""
|
||||
if mod not in ISOLATED_MODULES:
|
||||
new_mod = types.ModuleType(mod.__name__)
|
||||
ISOLATED_MODULES[mod] = new_mod
|
||||
for name in dir(mod):
|
||||
value = getattr(mod, name)
|
||||
if isinstance(value, types.ModuleType):
|
||||
value = isolate_module(value)
|
||||
setattr(new_mod, name, value)
|
||||
return ISOLATED_MODULES[mod]
|
||||
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class SysModuleSaver:
|
||||
"""Saves the contents of sys.modules, and removes new modules later."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.old_modules = set(sys.modules)
|
||||
|
||||
def restore(self) -> None:
|
||||
"""Remove any modules imported since this object started."""
|
||||
new_modules = set(sys.modules) - self.old_modules
|
||||
for m in new_modules:
|
||||
del sys.modules[m]
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def sys_modules_saved() -> Iterator[None]:
|
||||
"""A context manager to remove any modules imported during a block."""
|
||||
saver = SysModuleSaver()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
saver.restore()
|
||||
|
||||
|
||||
def import_third_party(modname: str) -> tuple[ModuleType, bool]:
|
||||
"""Import a third-party module we need, but might not be installed.
|
||||
|
||||
This also cleans out the module after the import, so that coverage won't
|
||||
appear to have imported it. This lets the third party use coverage for
|
||||
their own tests.
|
||||
|
||||
Arguments:
|
||||
modname (str): the name of the module to import.
|
||||
|
||||
Returns:
|
||||
The imported module, and a boolean indicating if the module could be imported.
|
||||
|
||||
If the boolean is False, the module returned is not the one you want: don't use it.
|
||||
|
||||
"""
|
||||
with sys_modules_saved():
|
||||
try:
|
||||
return importlib.import_module(modname), True
|
||||
except ImportError:
|
||||
return sys, False
|
||||
|
||||
|
||||
def nice_pair(pair: TArc) -> str:
|
||||
"""Make a nice string representation of a pair of numbers.
|
||||
|
||||
If the numbers are equal, just return the number, otherwise return the pair
|
||||
with a dash between them, indicating the range.
|
||||
|
||||
"""
|
||||
start, end = pair
|
||||
if start == end:
|
||||
return f"{start}"
|
||||
else:
|
||||
return f"{start}-{end}"
|
||||
|
||||
|
||||
def bool_or_none(b: Any) -> bool | None:
|
||||
"""Return bool(b), but preserve None."""
|
||||
if b is None:
|
||||
return None
|
||||
else:
|
||||
return bool(b)
|
||||
|
||||
|
||||
def join_regex(regexes: Iterable[str]) -> str:
|
||||
"""Combine a series of regex strings into one that matches any of them."""
|
||||
regexes = list(regexes)
|
||||
if len(regexes) == 1:
|
||||
return regexes[0]
|
||||
else:
|
||||
return "|".join(f"(?:{r})" for r in regexes)
|
||||
|
||||
|
||||
def file_be_gone(path: str) -> None:
|
||||
"""Remove a file, and don't get annoyed if it doesn't exist."""
|
||||
try:
|
||||
os.remove(path)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
|
||||
def ensure_dir(directory: str) -> None:
|
||||
"""Make sure the directory exists.
|
||||
|
||||
If `directory` is None or empty, do nothing.
|
||||
"""
|
||||
if directory:
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
|
||||
|
||||
def ensure_dir_for_file(path: str) -> None:
|
||||
"""Make sure the directory for the path exists."""
|
||||
ensure_dir(os.path.dirname(path))
|
||||
|
||||
|
||||
class Hasher:
|
||||
"""Hashes Python data for fingerprinting."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.hash = hashlib.new("sha3_256", usedforsecurity=False)
|
||||
|
||||
def update(self, v: Any) -> None:
|
||||
"""Add `v` to the hash, recursively if needed."""
|
||||
self.hash.update(str(type(v)).encode("utf-8"))
|
||||
if isinstance(v, str):
|
||||
self.hash.update(v.encode("utf-8"))
|
||||
elif isinstance(v, bytes):
|
||||
self.hash.update(v)
|
||||
elif v is None:
|
||||
pass
|
||||
elif isinstance(v, (int, float)):
|
||||
self.hash.update(str(v).encode("utf-8"))
|
||||
elif isinstance(v, (tuple, list)):
|
||||
for e in v:
|
||||
self.update(e)
|
||||
elif isinstance(v, dict):
|
||||
keys = v.keys()
|
||||
for k in sorted(keys):
|
||||
self.update(k)
|
||||
self.update(v[k])
|
||||
else:
|
||||
for k in dir(v):
|
||||
if k.startswith("__"):
|
||||
continue
|
||||
a = getattr(v, k)
|
||||
if inspect.isroutine(a):
|
||||
continue
|
||||
self.update(k)
|
||||
self.update(a)
|
||||
self.hash.update(b".")
|
||||
|
||||
def hexdigest(self) -> str:
|
||||
"""Retrieve the hex digest of the hash."""
|
||||
return self.hash.hexdigest()[:32]
|
||||
|
||||
|
||||
def _needs_to_implement(that: Any, func_name: str) -> NoReturn:
|
||||
"""Helper to raise NotImplementedError in interface stubs."""
|
||||
if hasattr(that, "_coverage_plugin_name"):
|
||||
thing = "Plugin"
|
||||
name = that._coverage_plugin_name
|
||||
else:
|
||||
thing = "Class"
|
||||
klass = that.__class__
|
||||
name = f"{klass.__module__}.{klass.__name__}"
|
||||
|
||||
raise NotImplementedError(
|
||||
f"{thing} {name!r} needs to implement {func_name}()",
|
||||
)
|
||||
|
||||
|
||||
class DefaultValue:
|
||||
"""A sentinel object to use for unusual default-value needs.
|
||||
|
||||
Construct with a string that will be used as the repr, for display in help
|
||||
and Sphinx output.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, display_as: str) -> None:
|
||||
self.display_as = display_as
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self.display_as
|
||||
|
||||
|
||||
def substitute_variables(text: str, variables: Mapping[str, str]) -> str:
|
||||
"""Substitute ``${VAR}`` variables in `text` with their values.
|
||||
|
||||
Variables in the text can take a number of shell-inspired forms::
|
||||
|
||||
$VAR
|
||||
${VAR}
|
||||
${VAR?} strict: an error if VAR isn't defined.
|
||||
${VAR-missing} defaulted: "missing" if VAR isn't defined.
|
||||
$$ just a dollar sign.
|
||||
|
||||
`variables` is a dictionary of variable values.
|
||||
|
||||
Returns the resulting text with values substituted.
|
||||
|
||||
"""
|
||||
dollar_pattern = r"""(?x) # Use extended regex syntax
|
||||
\$ # A dollar sign,
|
||||
(?: # then
|
||||
(?P<dollar> \$ ) | # a dollar sign, or
|
||||
(?P<word1> \w+ ) | # a plain word, or
|
||||
\{ # a {-wrapped
|
||||
(?P<word2> \w+ ) # word,
|
||||
(?: # either
|
||||
(?P<strict> \? ) | # with a strict marker
|
||||
-(?P<defval> [^}]* ) # or a default value
|
||||
)? # maybe.
|
||||
}
|
||||
)
|
||||
"""
|
||||
|
||||
dollar_groups = ("dollar", "word1", "word2")
|
||||
|
||||
def dollar_replace(match: re.Match[str]) -> str:
|
||||
"""Called for each $replacement."""
|
||||
# Only one of the dollar_groups will have matched, just get its text.
|
||||
word = next(g for g in match.group(*dollar_groups) if g) # pragma: always breaks
|
||||
if word == "$":
|
||||
return "$"
|
||||
elif word in variables:
|
||||
return variables[word]
|
||||
elif match["strict"]:
|
||||
msg = f"Variable {word} is undefined: {text!r}"
|
||||
raise CoverageException(msg)
|
||||
else:
|
||||
return match["defval"]
|
||||
|
||||
text = re.sub(dollar_pattern, dollar_replace, text)
|
||||
return text
|
||||
|
||||
|
||||
def format_local_datetime(dt: datetime.datetime) -> str:
|
||||
"""Return a string with local timezone representing the date."""
|
||||
return dt.astimezone().strftime("%Y-%m-%d %H:%M %z")
|
||||
|
||||
|
||||
def import_local_file(modname: str, modfile: str | None = None) -> ModuleType:
|
||||
"""Import a local file as a module.
|
||||
|
||||
Opens a file in the current directory named `modname`.py, imports it
|
||||
as `modname`, and returns the module object. `modfile` is the file to
|
||||
import if it isn't in the current directory.
|
||||
|
||||
"""
|
||||
if modfile is None:
|
||||
modfile = modname + ".py"
|
||||
spec = importlib.util.spec_from_file_location(modname, modfile)
|
||||
assert spec is not None
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
sys.modules[modname] = mod
|
||||
assert spec.loader is not None
|
||||
spec.loader.exec_module(mod)
|
||||
|
||||
return mod
|
||||
|
||||
|
||||
@functools.cache
|
||||
def _human_key(s: str) -> tuple[list[str | int], str]:
|
||||
"""Turn a string into a list of string and number chunks.
|
||||
|
||||
"z23a" -> (["z", 23, "a"], "z23a")
|
||||
|
||||
The original string is appended as a last value to ensure the
|
||||
key is unique enough so that "x1y" and "x001y" can be distinguished.
|
||||
"""
|
||||
|
||||
def tryint(s: str) -> str | int:
|
||||
"""If `s` is a number, return an int, else `s` unchanged."""
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
return s
|
||||
|
||||
return ([tryint(c) for c in re.split(r"(\d+)", s)], s)
|
||||
|
||||
|
||||
def human_sorted(strings: Iterable[str]) -> list[str]:
|
||||
"""Sort the given iterable of strings the way that humans expect.
|
||||
|
||||
Numeric components in the strings are sorted as numbers.
|
||||
|
||||
Returns the sorted list.
|
||||
|
||||
"""
|
||||
return sorted(strings, key=_human_key)
|
||||
|
||||
|
||||
SortableItem = TypeVar("SortableItem", bound=Sequence[Any])
|
||||
|
||||
|
||||
def human_sorted_items(
|
||||
items: Iterable[SortableItem],
|
||||
reverse: bool = False,
|
||||
) -> list[SortableItem]:
|
||||
"""Sort (string, ...) items the way humans expect.
|
||||
|
||||
The elements of `items` can be any tuple/list. They'll be sorted by the
|
||||
first element (a string), with ties broken by the remaining elements.
|
||||
|
||||
Returns the sorted list of items.
|
||||
"""
|
||||
return sorted(items, key=lambda item: (_human_key(item[0]), *item[1:]), reverse=reverse)
|
||||
|
||||
|
||||
def plural(n: int, thing: str = "", things: str = "") -> str:
|
||||
"""Pluralize a word.
|
||||
|
||||
If n is 1, return thing. Otherwise return things, or thing+s.
|
||||
"""
|
||||
if n == 1:
|
||||
return thing
|
||||
else:
|
||||
return things or (thing + "s")
|
||||
|
||||
|
||||
def stdout_link(text: str, url: str) -> str:
|
||||
"""Format text+url as a clickable link for stdout.
|
||||
|
||||
If attached to a terminal, use escape sequences. Otherwise, just return
|
||||
the text.
|
||||
"""
|
||||
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
|
||||
return f"\033]8;;{url}\a{text}\033]8;;\a"
|
||||
else:
|
||||
return text
|
||||
120
venv/lib/python3.12/site-packages/coverage/multiproc.py
Normal file
120
venv/lib/python3.12/site-packages/coverage/multiproc.py
Normal file
@@ -0,0 +1,120 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Monkey-patching to add multiprocessing support for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import multiprocessing
|
||||
import multiprocessing.process
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import traceback
|
||||
from typing import Any
|
||||
|
||||
from coverage.debug import DebugControl
|
||||
|
||||
# An attribute that will be set on the module to indicate that it has been
|
||||
# monkey-patched.
|
||||
PATCHED_MARKER = "_coverage$patched"
|
||||
|
||||
|
||||
OriginalProcess = multiprocessing.process.BaseProcess
|
||||
original_bootstrap = OriginalProcess._bootstrap # type: ignore[attr-defined]
|
||||
|
||||
|
||||
class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method
|
||||
"""A replacement for multiprocess.Process that starts coverage."""
|
||||
|
||||
def _bootstrap(self, *args, **kwargs): # type: ignore[no-untyped-def]
|
||||
"""Wrapper around _bootstrap to start coverage."""
|
||||
debug: DebugControl | None = None
|
||||
try:
|
||||
from coverage import Coverage # avoid circular import
|
||||
|
||||
cov = Coverage(data_suffix=True, auto_data=True)
|
||||
cov._warn_preimported_source = False
|
||||
cov.start()
|
||||
_debug = cov._debug
|
||||
assert _debug is not None
|
||||
if _debug.should("multiproc"):
|
||||
debug = _debug
|
||||
if debug:
|
||||
debug.write("Calling multiprocessing bootstrap")
|
||||
except Exception:
|
||||
print("Exception during multiprocessing bootstrap init:", file=sys.stderr)
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
raise
|
||||
try:
|
||||
return original_bootstrap(self, *args, **kwargs)
|
||||
finally:
|
||||
if debug:
|
||||
debug.write("Finished multiprocessing bootstrap")
|
||||
try:
|
||||
cov.stop()
|
||||
cov.save()
|
||||
except Exception as exc:
|
||||
if debug:
|
||||
debug.write("Exception during multiprocessing bootstrap cleanup", exc=exc)
|
||||
raise
|
||||
if debug:
|
||||
debug.write("Saved multiprocessing data")
|
||||
|
||||
|
||||
class Stowaway:
|
||||
"""An object to pickle, so when it is unpickled, it can apply the monkey-patch."""
|
||||
|
||||
def __init__(self, rcfile: str) -> None:
|
||||
self.rcfile = rcfile
|
||||
|
||||
def __getstate__(self) -> dict[str, str]:
|
||||
return {"rcfile": self.rcfile}
|
||||
|
||||
def __setstate__(self, state: dict[str, str]) -> None:
|
||||
patch_multiprocessing(state["rcfile"])
|
||||
|
||||
|
||||
def patch_multiprocessing(rcfile: str) -> None:
|
||||
"""Monkey-patch the multiprocessing module.
|
||||
|
||||
This enables coverage measurement of processes started by multiprocessing.
|
||||
This involves aggressive monkey-patching.
|
||||
|
||||
`rcfile` is the path to the rcfile being used.
|
||||
|
||||
"""
|
||||
|
||||
if hasattr(multiprocessing, PATCHED_MARKER):
|
||||
return
|
||||
|
||||
OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap # type: ignore[attr-defined]
|
||||
|
||||
# Set the value in ProcessWithCoverage that will be pickled into the child
|
||||
# process.
|
||||
os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile)
|
||||
|
||||
# When spawning processes rather than forking them, we have no state in the
|
||||
# new process. We sneak in there with a Stowaway: we stuff one of our own
|
||||
# objects into the data that gets pickled and sent to the subprocess. When
|
||||
# the Stowaway is unpickled, its __setstate__ method is called, which
|
||||
# re-applies the monkey-patch.
|
||||
# Windows only spawns, so this is needed to keep Windows working.
|
||||
try:
|
||||
from multiprocessing import spawn
|
||||
|
||||
original_get_preparation_data = spawn.get_preparation_data
|
||||
except (ImportError, AttributeError):
|
||||
pass
|
||||
else:
|
||||
|
||||
def get_preparation_data_with_stowaway(name: str) -> dict[str, Any]:
|
||||
"""Get the original preparation data, and also insert our stowaway."""
|
||||
d = original_get_preparation_data(name)
|
||||
d["stowaway"] = Stowaway(rcfile)
|
||||
return d
|
||||
|
||||
spawn.get_preparation_data = get_preparation_data_with_stowaway
|
||||
|
||||
setattr(multiprocessing, PATCHED_MARKER, True)
|
||||
146
venv/lib/python3.12/site-packages/coverage/numbits.py
Normal file
146
venv/lib/python3.12/site-packages/coverage/numbits.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
Functions to manipulate packed binary representations of number sets.
|
||||
|
||||
To save space, coverage stores sets of line numbers in SQLite using a packed
|
||||
binary representation called a numbits. A numbits is a set of positive
|
||||
integers.
|
||||
|
||||
A numbits is stored as a blob in the database. The exact meaning of the bytes
|
||||
in the blobs should be considered an implementation detail that might change in
|
||||
the future. Use these functions to work with those binary blobs of data.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from collections.abc import Iterable
|
||||
from itertools import zip_longest
|
||||
|
||||
|
||||
def nums_to_numbits(nums: Iterable[int]) -> bytes:
|
||||
"""Convert `nums` into a numbits.
|
||||
|
||||
Arguments:
|
||||
nums: a reusable iterable of integers, the line numbers to store.
|
||||
|
||||
Returns:
|
||||
A binary blob.
|
||||
"""
|
||||
try:
|
||||
nbytes = max(nums) // 8 + 1
|
||||
except ValueError:
|
||||
# nums was empty.
|
||||
return b""
|
||||
b = bytearray(nbytes)
|
||||
for num in nums:
|
||||
b[num // 8] |= 1 << num % 8
|
||||
return bytes(b)
|
||||
|
||||
|
||||
def numbits_to_nums(numbits: bytes) -> list[int]:
|
||||
"""Convert a numbits into a list of numbers.
|
||||
|
||||
Arguments:
|
||||
numbits: a binary blob, the packed number set.
|
||||
|
||||
Returns:
|
||||
A list of ints.
|
||||
|
||||
When registered as a SQLite function by :func:`register_sqlite_functions`,
|
||||
this returns a string, a JSON-encoded list of ints.
|
||||
|
||||
"""
|
||||
nums = []
|
||||
for byte_i, byte in enumerate(numbits):
|
||||
for bit_i in range(8):
|
||||
if byte & (1 << bit_i):
|
||||
nums.append(byte_i * 8 + bit_i)
|
||||
return nums
|
||||
|
||||
|
||||
def numbits_union(numbits1: bytes, numbits2: bytes) -> bytes:
|
||||
"""Compute the union of two numbits.
|
||||
|
||||
Returns:
|
||||
A new numbits, the union of `numbits1` and `numbits2`.
|
||||
"""
|
||||
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
|
||||
return bytes(b1 | b2 for b1, b2 in byte_pairs)
|
||||
|
||||
|
||||
def numbits_intersection(numbits1: bytes, numbits2: bytes) -> bytes:
|
||||
"""Compute the intersection of two numbits.
|
||||
|
||||
Returns:
|
||||
A new numbits, the intersection `numbits1` and `numbits2`.
|
||||
"""
|
||||
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
|
||||
intersection_bytes = bytes(b1 & b2 for b1, b2 in byte_pairs)
|
||||
return intersection_bytes.rstrip(b"\0")
|
||||
|
||||
|
||||
def numbits_any_intersection(numbits1: bytes, numbits2: bytes) -> bool:
|
||||
"""Is there any number that appears in both numbits?
|
||||
|
||||
Determine whether two number sets have a non-empty intersection. This is
|
||||
faster than computing the intersection.
|
||||
|
||||
Returns:
|
||||
A bool, True if there is any number in both `numbits1` and `numbits2`.
|
||||
"""
|
||||
byte_pairs = zip_longest(numbits1, numbits2, fillvalue=0)
|
||||
return any(b1 & b2 for b1, b2 in byte_pairs)
|
||||
|
||||
|
||||
def num_in_numbits(num: int, numbits: bytes) -> bool:
|
||||
"""Does the integer `num` appear in `numbits`?
|
||||
|
||||
Returns:
|
||||
A bool, True if `num` is a member of `numbits`.
|
||||
"""
|
||||
nbyte, nbit = divmod(num, 8)
|
||||
if nbyte >= len(numbits):
|
||||
return False
|
||||
return bool(numbits[nbyte] & (1 << nbit))
|
||||
|
||||
|
||||
def register_sqlite_functions(connection: sqlite3.Connection) -> None:
|
||||
"""
|
||||
Define numbits functions in a SQLite connection.
|
||||
|
||||
This defines these functions for use in SQLite statements:
|
||||
|
||||
* :func:`numbits_union`
|
||||
* :func:`numbits_intersection`
|
||||
* :func:`numbits_any_intersection`
|
||||
* :func:`num_in_numbits`
|
||||
* :func:`numbits_to_nums`
|
||||
|
||||
`connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>`
|
||||
object. After creating the connection, pass it to this function to
|
||||
register the numbits functions. Then you can use numbits functions in your
|
||||
queries::
|
||||
|
||||
import sqlite3
|
||||
from coverage.numbits import register_sqlite_functions
|
||||
|
||||
conn = sqlite3.connect("example.db")
|
||||
register_sqlite_functions(conn)
|
||||
c = conn.cursor()
|
||||
# Kind of a nonsense query:
|
||||
# Find all the files and contexts that executed line 47 in any file:
|
||||
c.execute(
|
||||
"select file_id, context_id from line_bits where num_in_numbits(?, numbits)",
|
||||
(47,)
|
||||
)
|
||||
"""
|
||||
connection.create_function("numbits_union", 2, numbits_union)
|
||||
connection.create_function("numbits_intersection", 2, numbits_intersection)
|
||||
connection.create_function("numbits_any_intersection", 2, numbits_any_intersection)
|
||||
connection.create_function("num_in_numbits", 2, num_in_numbits)
|
||||
connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
|
||||
1354
venv/lib/python3.12/site-packages/coverage/parser.py
Normal file
1354
venv/lib/python3.12/site-packages/coverage/parser.py
Normal file
File diff suppressed because it is too large
Load Diff
165
venv/lib/python3.12/site-packages/coverage/patch.py
Normal file
165
venv/lib/python3.12/site-packages/coverage/patch.py
Normal file
@@ -0,0 +1,165 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Invasive patches for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import atexit
|
||||
import contextlib
|
||||
import os
|
||||
import site
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, NoReturn
|
||||
|
||||
from coverage import env
|
||||
from coverage.debug import NoDebugging, DevNullDebug
|
||||
from coverage.exceptions import ConfigError, CoverageException
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
from coverage.config import CoverageConfig
|
||||
from coverage.types import TDebugCtl
|
||||
|
||||
|
||||
def apply_patches(
|
||||
cov: Coverage,
|
||||
config: CoverageConfig,
|
||||
debug: TDebugCtl,
|
||||
*,
|
||||
make_pth_file: bool = True,
|
||||
) -> None:
|
||||
"""Apply invasive patches requested by `[run] patch=`."""
|
||||
debug = debug if debug.should("patch") else DevNullDebug()
|
||||
for patch in sorted(set(config.patch)):
|
||||
if patch == "_exit":
|
||||
_patch__exit(cov, debug)
|
||||
|
||||
elif patch == "execv":
|
||||
_patch_execv(cov, config, debug)
|
||||
|
||||
elif patch == "fork":
|
||||
_patch_fork(debug)
|
||||
|
||||
elif patch == "subprocess":
|
||||
_patch_subprocess(config, debug, make_pth_file)
|
||||
|
||||
else:
|
||||
raise ConfigError(f"Unknown patch {patch!r}")
|
||||
|
||||
|
||||
def _patch__exit(cov: Coverage, debug: TDebugCtl) -> None:
|
||||
"""Patch os._exit."""
|
||||
debug.write("Patching _exit")
|
||||
|
||||
old_exit = os._exit
|
||||
|
||||
def coverage_os_exit_patch(status: int) -> NoReturn:
|
||||
with contextlib.suppress(Exception):
|
||||
debug.write(f"Using _exit patch with {cov = }")
|
||||
with contextlib.suppress(Exception):
|
||||
cov.save()
|
||||
old_exit(status)
|
||||
|
||||
os._exit = coverage_os_exit_patch
|
||||
|
||||
|
||||
def _patch_execv(cov: Coverage, config: CoverageConfig, debug: TDebugCtl) -> None:
|
||||
"""Patch the execv family of functions."""
|
||||
if env.WINDOWS:
|
||||
raise CoverageException("patch=execv isn't supported yet on Windows.")
|
||||
|
||||
debug.write("Patching execv")
|
||||
|
||||
def make_execv_patch(fname: str, old_execv: Any) -> Any:
|
||||
def coverage_execv_patch(*args: Any, **kwargs: Any) -> Any:
|
||||
with contextlib.suppress(Exception):
|
||||
debug.write(f"Using execv patch for {fname} with {cov = }")
|
||||
with contextlib.suppress(Exception):
|
||||
cov.save()
|
||||
|
||||
if fname.endswith("e"):
|
||||
# Assume the `env` argument is passed positionally.
|
||||
new_env = args[-1]
|
||||
# Pass our configuration in the new environment.
|
||||
new_env["COVERAGE_PROCESS_CONFIG"] = config.serialize()
|
||||
if env.TESTING:
|
||||
# The subprocesses need to use the same core as the main process.
|
||||
new_env["COVERAGE_CORE"] = os.getenv("COVERAGE_CORE")
|
||||
|
||||
# When testing locally, we need to honor the pyc file location
|
||||
# or they get written to the .tox directories and pollute the
|
||||
# next run with a different core.
|
||||
if (cache_prefix := os.getenv("PYTHONPYCACHEPREFIX")) is not None:
|
||||
new_env["PYTHONPYCACHEPREFIX"] = cache_prefix
|
||||
|
||||
# Without this, it fails on PyPy and Ubuntu.
|
||||
new_env["PATH"] = os.getenv("PATH")
|
||||
old_execv(*args, **kwargs)
|
||||
|
||||
return coverage_execv_patch
|
||||
|
||||
# All the exec* and spawn* functions eventually call execv or execve.
|
||||
os.execv = make_execv_patch("execv", os.execv)
|
||||
os.execve = make_execv_patch("execve", os.execve)
|
||||
|
||||
|
||||
def _patch_fork(debug: TDebugCtl) -> None:
|
||||
"""Ensure Coverage is properly reset after a fork."""
|
||||
from coverage.control import _after_fork_in_child
|
||||
|
||||
if env.WINDOWS:
|
||||
raise CoverageException("patch=fork isn't supported yet on Windows.")
|
||||
|
||||
debug.write("Patching fork")
|
||||
os.register_at_fork(after_in_child=_after_fork_in_child)
|
||||
|
||||
|
||||
def _patch_subprocess(config: CoverageConfig, debug: TDebugCtl, make_pth_file: bool) -> None:
|
||||
"""Write .pth files and set environment vars to measure subprocesses."""
|
||||
debug.write("Patching subprocess")
|
||||
|
||||
if make_pth_file:
|
||||
pth_files = create_pth_files(debug)
|
||||
|
||||
def delete_pth_files() -> None:
|
||||
for p in pth_files:
|
||||
debug.write(f"Deleting subprocess .pth file: {str(p)!r}")
|
||||
p.unlink(missing_ok=True)
|
||||
|
||||
atexit.register(delete_pth_files)
|
||||
assert config.config_file is not None
|
||||
os.environ["COVERAGE_PROCESS_CONFIG"] = config.serialize()
|
||||
|
||||
|
||||
# Writing .pth files is not obvious. On Windows, getsitepackages() returns two
|
||||
# directories. A .pth file in the first will be run, but coverage isn't
|
||||
# importable yet. We write into all the places we can, but with defensive
|
||||
# import code.
|
||||
|
||||
PTH_CODE = """\
|
||||
try:
|
||||
import coverage
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
coverage.process_startup()
|
||||
"""
|
||||
|
||||
PTH_TEXT = f"import sys; exec({PTH_CODE!r})\n"
|
||||
|
||||
|
||||
def create_pth_files(debug: TDebugCtl = NoDebugging()) -> list[Path]:
|
||||
"""Create .pth files for measuring subprocesses."""
|
||||
pth_files = []
|
||||
for pth_dir in site.getsitepackages():
|
||||
pth_file = Path(pth_dir) / f"subcover_{os.getpid()}.pth"
|
||||
try:
|
||||
if debug.should("patch"):
|
||||
debug.write(f"Writing subprocess .pth file: {str(pth_file)!r}")
|
||||
pth_file.write_text(PTH_TEXT, encoding="utf-8")
|
||||
except OSError: # pragma: cant happen
|
||||
continue
|
||||
else:
|
||||
pth_files.append(pth_file)
|
||||
return pth_files
|
||||
200
venv/lib/python3.12/site-packages/coverage/phystokens.py
Normal file
200
venv/lib/python3.12/site-packages/coverage/phystokens.py
Normal file
@@ -0,0 +1,200 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Better tokenizing for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import io
|
||||
import keyword
|
||||
import re
|
||||
import sys
|
||||
import token
|
||||
import tokenize
|
||||
from collections.abc import Iterable
|
||||
|
||||
from coverage import env
|
||||
from coverage.types import TLineNo, TSourceTokenLines
|
||||
|
||||
TokenInfos = Iterable[tokenize.TokenInfo]
|
||||
|
||||
|
||||
def _phys_tokens(toks: TokenInfos) -> TokenInfos:
|
||||
"""Return all physical tokens, even line continuations.
|
||||
|
||||
tokenize.generate_tokens() doesn't return a token for the backslash that
|
||||
continues lines. This wrapper provides those tokens so that we can
|
||||
re-create a faithful representation of the original source.
|
||||
|
||||
Returns the same values as generate_tokens()
|
||||
|
||||
"""
|
||||
last_line: str | None = None
|
||||
last_lineno = -1
|
||||
last_ttext: str = ""
|
||||
for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
|
||||
if last_lineno != elineno:
|
||||
if last_line and last_line.endswith("\\\n"):
|
||||
# We are at the beginning of a new line, and the last line
|
||||
# ended with a backslash. We probably have to inject a
|
||||
# backslash token into the stream. Unfortunately, there's more
|
||||
# to figure out. This code::
|
||||
#
|
||||
# usage = """\
|
||||
# HEY THERE
|
||||
# """
|
||||
#
|
||||
# triggers this condition, but the token text is::
|
||||
#
|
||||
# '"""\\\nHEY THERE\n"""'
|
||||
#
|
||||
# so we need to figure out if the backslash is already in the
|
||||
# string token or not.
|
||||
inject_backslash = True
|
||||
if last_ttext.endswith("\\"):
|
||||
inject_backslash = False
|
||||
elif ttype == token.STRING:
|
||||
if ( # pylint: disable=simplifiable-if-statement
|
||||
last_line.endswith("\\\n")
|
||||
and last_line.rstrip(" \\\n").endswith(last_ttext)
|
||||
):
|
||||
# Deal with special cases like such code::
|
||||
#
|
||||
# a = ["aaa",\ # there may be zero or more blanks between "," and "\".
|
||||
# "bbb \
|
||||
# ccc"]
|
||||
#
|
||||
inject_backslash = True
|
||||
else:
|
||||
# It's a multi-line string and the first line ends with
|
||||
# a backslash, so we don't need to inject another.
|
||||
inject_backslash = False
|
||||
elif env.PYBEHAVIOR.fstring_syntax and ttype == token.FSTRING_MIDDLE:
|
||||
inject_backslash = False
|
||||
if inject_backslash:
|
||||
# Figure out what column the backslash is in.
|
||||
ccol = len(last_line.split("\n")[-2]) - 1
|
||||
# Yield the token, with a fake token type.
|
||||
yield tokenize.TokenInfo(
|
||||
99999,
|
||||
"\\\n",
|
||||
(slineno, ccol),
|
||||
(slineno, ccol + 2),
|
||||
last_line,
|
||||
)
|
||||
last_line = ltext
|
||||
if ttype not in (tokenize.NEWLINE, tokenize.NL):
|
||||
last_ttext = ttext
|
||||
yield tokenize.TokenInfo(ttype, ttext, (slineno, scol), (elineno, ecol), ltext)
|
||||
last_lineno = elineno
|
||||
|
||||
|
||||
def find_soft_key_lines(source: str) -> set[TLineNo]:
|
||||
"""Helper for finding lines with soft keywords, like match/case lines."""
|
||||
soft_key_lines: set[TLineNo] = set()
|
||||
|
||||
for node in ast.walk(ast.parse(source)):
|
||||
if sys.version_info >= (3, 10) and isinstance(node, ast.Match):
|
||||
soft_key_lines.add(node.lineno)
|
||||
for case in node.cases:
|
||||
soft_key_lines.add(case.pattern.lineno)
|
||||
elif sys.version_info >= (3, 12) and isinstance(node, ast.TypeAlias):
|
||||
soft_key_lines.add(node.lineno)
|
||||
|
||||
return soft_key_lines
|
||||
|
||||
|
||||
def source_token_lines(source: str) -> TSourceTokenLines:
|
||||
"""Generate a series of lines, one for each line in `source`.
|
||||
|
||||
Each line is a list of pairs, each pair is a token::
|
||||
|
||||
[('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
|
||||
|
||||
Each pair has a token class, and the token text.
|
||||
|
||||
If you concatenate all the token texts, and then join them with newlines,
|
||||
you should have your original `source` back, with two differences:
|
||||
trailing white space is not preserved, and a final line with no newline
|
||||
is indistinguishable from a final line with a newline.
|
||||
|
||||
"""
|
||||
|
||||
ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
|
||||
line: list[tuple[str, str]] = []
|
||||
col = 0
|
||||
|
||||
source = source.expandtabs(8).replace("\r\n", "\n")
|
||||
tokgen = generate_tokens(source)
|
||||
|
||||
if env.PYBEHAVIOR.soft_keywords:
|
||||
soft_key_lines = find_soft_key_lines(source)
|
||||
else:
|
||||
soft_key_lines = set()
|
||||
|
||||
for ttype, ttext, (sline, scol), (_, ecol), _ in _phys_tokens(tokgen):
|
||||
mark_start = True
|
||||
for part in re.split("(\n)", ttext):
|
||||
if part == "\n":
|
||||
yield line
|
||||
line = []
|
||||
col = 0
|
||||
mark_end = False
|
||||
elif part == "":
|
||||
mark_end = False
|
||||
elif ttype in ws_tokens:
|
||||
mark_end = False
|
||||
else:
|
||||
if env.PYBEHAVIOR.fstring_syntax and ttype == token.FSTRING_MIDDLE:
|
||||
part = part.replace("{", "{{").replace("}", "}}")
|
||||
ecol = scol + len(part)
|
||||
if mark_start and scol > col:
|
||||
line.append(("ws", " " * (scol - col)))
|
||||
mark_start = False
|
||||
tok_class = tokenize.tok_name.get(ttype, "xx").lower()[:3]
|
||||
if ttype == token.NAME:
|
||||
if keyword.iskeyword(ttext):
|
||||
# Hard keywords are always keywords.
|
||||
tok_class = "key"
|
||||
elif env.PYBEHAVIOR.soft_keywords and keyword.issoftkeyword(ttext):
|
||||
# Soft keywords appear at the start of their line.
|
||||
if len(line) == 0:
|
||||
is_start_of_line = True
|
||||
elif (len(line) == 1) and line[0][0] == "ws":
|
||||
is_start_of_line = True
|
||||
else:
|
||||
is_start_of_line = False
|
||||
if is_start_of_line and sline in soft_key_lines:
|
||||
tok_class = "key"
|
||||
line.append((tok_class, part))
|
||||
mark_end = True
|
||||
scol = 0
|
||||
if mark_end:
|
||||
col = ecol
|
||||
|
||||
if line:
|
||||
yield line
|
||||
|
||||
|
||||
def generate_tokens(text: str) -> TokenInfos:
|
||||
"""A helper around `tokenize.generate_tokens`.
|
||||
|
||||
Originally this was used to cache the results, but it didn't seem to make
|
||||
reporting go faster, and caused issues with using too much memory.
|
||||
|
||||
"""
|
||||
readline = io.StringIO(text).readline
|
||||
return tokenize.generate_tokens(readline)
|
||||
|
||||
|
||||
def source_encoding(source: bytes) -> str:
|
||||
"""Determine the encoding for `source`, according to PEP 263.
|
||||
|
||||
`source` is a byte string: the text of the program.
|
||||
|
||||
Returns a string, the name of the encoding.
|
||||
|
||||
"""
|
||||
readline = iter(source.splitlines(True)).__next__
|
||||
return tokenize.detect_encoding(readline)[0]
|
||||
617
venv/lib/python3.12/site-packages/coverage/plugin.py
Normal file
617
venv/lib/python3.12/site-packages/coverage/plugin.py
Normal file
@@ -0,0 +1,617 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
.. versionadded:: 4.0
|
||||
|
||||
Plug-in interfaces for coverage.py.
|
||||
|
||||
Coverage.py supports a few different kinds of plug-ins that change its
|
||||
behavior:
|
||||
|
||||
* File tracers implement tracing of non-Python file types.
|
||||
|
||||
* Configurers add custom configuration, using Python code to change the
|
||||
configuration.
|
||||
|
||||
* Dynamic context switchers decide when the dynamic context has changed, for
|
||||
example, to record what test function produced the coverage.
|
||||
|
||||
To write a coverage.py plug-in, create a module with a subclass of
|
||||
:class:`~coverage.CoveragePlugin`. You will override methods in your class to
|
||||
participate in various aspects of coverage.py's processing.
|
||||
Different types of plug-ins have to override different methods.
|
||||
|
||||
Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info`
|
||||
to provide debugging information about their operation.
|
||||
|
||||
Your module must also contain a ``coverage_init`` function that registers an
|
||||
instance of your plug-in class::
|
||||
|
||||
import coverage
|
||||
|
||||
class MyPlugin(coverage.CoveragePlugin):
|
||||
...
|
||||
|
||||
def coverage_init(reg, options):
|
||||
reg.add_file_tracer(MyPlugin())
|
||||
|
||||
You use the `reg` parameter passed to your ``coverage_init`` function to
|
||||
register your plug-in object. The registration method you call depends on
|
||||
what kind of plug-in it is.
|
||||
|
||||
If your plug-in takes options, the `options` parameter is a dictionary of your
|
||||
plug-in's options from the coverage.py configuration file. Use them however
|
||||
you want to configure your object before registering it.
|
||||
|
||||
Coverage.py will store its own information on your plug-in object, using
|
||||
attributes whose names start with ``_coverage_``. Don't be startled.
|
||||
|
||||
.. warning::
|
||||
Plug-ins are imported by coverage.py before it begins measuring code.
|
||||
If you write a plugin in your own project, it might import your product
|
||||
code before coverage.py can start measuring. This can result in your
|
||||
own code being reported as missing.
|
||||
|
||||
One solution is to put your plugins in your project tree, but not in
|
||||
your importable Python package.
|
||||
|
||||
|
||||
.. _file_tracer_plugins:
|
||||
|
||||
File Tracers
|
||||
============
|
||||
|
||||
File tracers implement measurement support for non-Python files. File tracers
|
||||
implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim
|
||||
files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report
|
||||
on those files.
|
||||
|
||||
In your ``coverage_init`` function, use the ``add_file_tracer`` method to
|
||||
register your file tracer.
|
||||
|
||||
|
||||
.. _configurer_plugins:
|
||||
|
||||
Configurers
|
||||
===========
|
||||
|
||||
.. versionadded:: 4.5
|
||||
|
||||
Configurers modify the configuration of coverage.py during start-up.
|
||||
Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to
|
||||
change the configuration.
|
||||
|
||||
In your ``coverage_init`` function, use the ``add_configurer`` method to
|
||||
register your configurer.
|
||||
|
||||
|
||||
.. _dynamic_context_plugins:
|
||||
|
||||
Dynamic Context Switchers
|
||||
=========================
|
||||
|
||||
.. versionadded:: 5.0
|
||||
|
||||
Dynamic context switcher plugins implement the
|
||||
:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute
|
||||
the context label for each measured frame.
|
||||
|
||||
Computed context labels are useful when you want to group measured data without
|
||||
modifying the source code.
|
||||
|
||||
For example, you could write a plugin that checks `frame.f_code` to inspect
|
||||
the currently executed method, and set the context label to a fully qualified
|
||||
method name if it's an instance method of `unittest.TestCase` and the method
|
||||
name starts with 'test'. Such a plugin would provide basic coverage grouping
|
||||
by test and could be used with test runners that have no built-in coveragepy
|
||||
support.
|
||||
|
||||
In your ``coverage_init`` function, use the ``add_dynamic_context`` method to
|
||||
register your dynamic context switcher.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import functools
|
||||
from collections.abc import Iterable
|
||||
from types import FrameType
|
||||
from typing import Any
|
||||
|
||||
from coverage import files
|
||||
from coverage.misc import _needs_to_implement
|
||||
from coverage.types import TArc, TConfigurable, TLineNo, TSourceTokenLines
|
||||
|
||||
|
||||
class CoveragePlugin:
|
||||
"""Base class for coverage.py plug-ins."""
|
||||
|
||||
_coverage_plugin_name: str
|
||||
_coverage_enabled: bool
|
||||
|
||||
def file_tracer(self, filename: str) -> FileTracer | None: # pylint: disable=unused-argument
|
||||
"""Get a :class:`FileTracer` object for a file.
|
||||
|
||||
Plug-in type: file tracer.
|
||||
|
||||
Every Python source file is offered to your plug-in to give it a chance
|
||||
to take responsibility for tracing the file. If your plug-in can
|
||||
handle the file, it should return a :class:`FileTracer` object.
|
||||
Otherwise return None.
|
||||
|
||||
There is no way to register your plug-in for particular files.
|
||||
Instead, this method is invoked for all files as they are executed,
|
||||
and the plug-in decides whether it can trace the file or not.
|
||||
Be prepared for `filename` to refer to all kinds of files that have
|
||||
nothing to do with your plug-in.
|
||||
|
||||
The file name will be a Python file being executed. There are two
|
||||
broad categories of behavior for a plug-in, depending on the kind of
|
||||
files your plug-in supports:
|
||||
|
||||
* Static file names: each of your original source files has been
|
||||
converted into a distinct Python file. Your plug-in is invoked with
|
||||
the Python file name, and it maps it back to its original source
|
||||
file.
|
||||
|
||||
* Dynamic file names: all of your source files are executed by the same
|
||||
Python file. In this case, your plug-in implements
|
||||
:meth:`FileTracer.dynamic_source_filename` to provide the actual
|
||||
source file for each execution frame.
|
||||
|
||||
`filename` is a string, the path to the file being considered. This is
|
||||
the absolute real path to the file. If you are comparing to other
|
||||
paths, be sure to take this into account.
|
||||
|
||||
Returns a :class:`FileTracer` object to use to trace `filename`, or
|
||||
None if this plug-in cannot trace this file.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def file_reporter(
|
||||
self,
|
||||
filename: str, # pylint: disable=unused-argument
|
||||
) -> FileReporter | str: # str should be Literal["python"]
|
||||
"""Get the :class:`FileReporter` class to use for a file.
|
||||
|
||||
Plug-in type: file tracer.
|
||||
|
||||
This will only be invoked if `filename` returns non-None from
|
||||
:meth:`file_tracer`. It's an error to return None from this method.
|
||||
|
||||
Returns a :class:`FileReporter` object to use to report on `filename`,
|
||||
or the string `"python"` to have coverage.py treat the file as Python.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "file_reporter")
|
||||
|
||||
def dynamic_context(
|
||||
self,
|
||||
frame: FrameType, # pylint: disable=unused-argument
|
||||
) -> str | None:
|
||||
"""Get the dynamically computed context label for `frame`.
|
||||
|
||||
Plug-in type: dynamic context.
|
||||
|
||||
This method is invoked for each frame when outside of a dynamic
|
||||
context, to see if a new dynamic context should be started. If it
|
||||
returns a string, a new context label is set for this and deeper
|
||||
frames. The dynamic context ends when this frame returns.
|
||||
|
||||
Returns a string to start a new dynamic context, or None if no new
|
||||
context should be started.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def find_executable_files(
|
||||
self,
|
||||
src_dir: str, # pylint: disable=unused-argument
|
||||
) -> Iterable[str]:
|
||||
"""Yield all of the executable files in `src_dir`, recursively.
|
||||
|
||||
Plug-in type: file tracer.
|
||||
|
||||
Executability is a plug-in-specific property, but generally means files
|
||||
which would have been considered for coverage analysis, had they been
|
||||
included automatically.
|
||||
|
||||
Returns or yields a sequence of strings, the paths to files that could
|
||||
have been executed, including files that had been executed.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
def configure(self, config: TConfigurable) -> None:
|
||||
"""Modify the configuration of coverage.py.
|
||||
|
||||
Plug-in type: configurer.
|
||||
|
||||
This method is called during coverage.py start-up, to give your plug-in
|
||||
a chance to change the configuration. The `config` parameter is an
|
||||
object with :meth:`~coverage.Coverage.get_option` and
|
||||
:meth:`~coverage.Coverage.set_option` methods. Do not call any other
|
||||
methods on the `config` object.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def sys_info(self) -> Iterable[tuple[str, Any]]:
|
||||
"""Get a list of information useful for debugging.
|
||||
|
||||
Plug-in type: any.
|
||||
|
||||
This method will be invoked for ``--debug=sys``. Your
|
||||
plug-in can return any information it wants to be displayed.
|
||||
|
||||
Returns a list of pairs: `[(name, value), ...]`.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class CoveragePluginBase:
|
||||
"""Plugins produce specialized objects, which point back to the original plugin."""
|
||||
|
||||
_coverage_plugin: CoveragePlugin
|
||||
|
||||
|
||||
class FileTracer(CoveragePluginBase):
|
||||
"""Support needed for files during the execution phase.
|
||||
|
||||
File tracer plug-ins implement subclasses of FileTracer to return from
|
||||
their :meth:`~CoveragePlugin.file_tracer` method.
|
||||
|
||||
You may construct this object from :meth:`CoveragePlugin.file_tracer` any
|
||||
way you like. A natural choice would be to pass the file name given to
|
||||
`file_tracer`.
|
||||
|
||||
`FileTracer` objects should only be created in the
|
||||
:meth:`CoveragePlugin.file_tracer` method.
|
||||
|
||||
See :ref:`howitworks` for details of the different coverage.py phases.
|
||||
|
||||
"""
|
||||
|
||||
def source_filename(self) -> str:
|
||||
"""The source file name for this file.
|
||||
|
||||
This may be any file name you like. A key responsibility of a plug-in
|
||||
is to own the mapping from Python execution back to whatever source
|
||||
file name was originally the source of the code.
|
||||
|
||||
See :meth:`CoveragePlugin.file_tracer` for details about static and
|
||||
dynamic file names.
|
||||
|
||||
Returns the file name to credit with this execution.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "source_filename")
|
||||
|
||||
def has_dynamic_source_filename(self) -> bool:
|
||||
"""Does this FileTracer have dynamic source file names?
|
||||
|
||||
FileTracers can provide dynamically determined file names by
|
||||
implementing :meth:`dynamic_source_filename`. Invoking that function
|
||||
is expensive. To determine whether to invoke it, coverage.py uses the
|
||||
result of this function to know if it needs to bother invoking
|
||||
:meth:`dynamic_source_filename`.
|
||||
|
||||
See :meth:`CoveragePlugin.file_tracer` for details about static and
|
||||
dynamic file names.
|
||||
|
||||
Returns True if :meth:`dynamic_source_filename` should be called to get
|
||||
dynamic source file names.
|
||||
|
||||
"""
|
||||
return False
|
||||
|
||||
def dynamic_source_filename(
|
||||
self,
|
||||
filename: str, # pylint: disable=unused-argument
|
||||
frame: FrameType, # pylint: disable=unused-argument
|
||||
) -> str | None:
|
||||
"""Get a dynamically computed source file name.
|
||||
|
||||
Some plug-ins need to compute the source file name dynamically for each
|
||||
frame.
|
||||
|
||||
This function will not be invoked if
|
||||
:meth:`has_dynamic_source_filename` returns False.
|
||||
|
||||
Returns the source file name for this frame, or None if this frame
|
||||
shouldn't be measured.
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
|
||||
"""Get the range of source line numbers for a given a call frame.
|
||||
|
||||
The call frame is examined, and the source line number in the original
|
||||
file is returned. The return value is a pair of numbers, the starting
|
||||
line number and the ending line number, both inclusive. For example,
|
||||
returning (5, 7) means that lines 5, 6, and 7 should be considered
|
||||
executed.
|
||||
|
||||
This function might decide that the frame doesn't indicate any lines
|
||||
from the source file were executed. Return (-1, -1) in this case to
|
||||
tell coverage.py that no lines should be recorded for this frame.
|
||||
|
||||
"""
|
||||
lineno = frame.f_lineno
|
||||
return lineno, lineno
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class CodeRegion:
|
||||
"""Data for a region of code found by :meth:`FileReporter.code_regions`."""
|
||||
|
||||
#: The kind of region, like `"function"` or `"class"`. Must be one of the
|
||||
#: singular values returned by :meth:`FileReporter.code_region_kinds`.
|
||||
kind: str
|
||||
|
||||
#: The name of the region. For example, a function or class name.
|
||||
name: str
|
||||
|
||||
#: The line in the source file to link to when navigating to the region.
|
||||
#: Can be a line not mentioned in `lines`.
|
||||
start: int
|
||||
|
||||
#: The lines in the region. Should be lines that could be executed in the
|
||||
#: region. For example, a class region includes all of the lines in the
|
||||
#: methods of the class, but not the lines defining class attributes, since
|
||||
#: they are executed on import, not as part of exercising the class. The
|
||||
#: set can include non-executable lines like blanks and comments.
|
||||
lines: set[int]
|
||||
|
||||
def __lt__(self, other: CodeRegion) -> bool:
|
||||
"""To support sorting to make test-writing easier."""
|
||||
if self.name == other.name:
|
||||
return min(self.lines) < min(other.lines)
|
||||
return self.name < other.name
|
||||
|
||||
|
||||
@functools.total_ordering
|
||||
class FileReporter(CoveragePluginBase):
|
||||
"""Support needed for files during the analysis and reporting phases.
|
||||
|
||||
File tracer plug-ins implement a subclass of `FileReporter`, and return
|
||||
instances from their :meth:`CoveragePlugin.file_reporter` method.
|
||||
|
||||
There are many methods here, but only :meth:`lines` is required, to provide
|
||||
the set of executable lines in the file.
|
||||
|
||||
See :ref:`howitworks` for details of the different coverage.py phases.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, filename: str) -> None:
|
||||
"""Simple initialization of a `FileReporter`.
|
||||
|
||||
The `filename` argument is the path to the file being reported. This
|
||||
will be available as the `.filename` attribute on the object. Other
|
||||
method implementations on this base class rely on this attribute.
|
||||
|
||||
"""
|
||||
self.filename = filename
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__} filename={self.filename!r}>"
|
||||
|
||||
def relative_filename(self) -> str:
|
||||
"""Get the relative file name for this file.
|
||||
|
||||
This file path will be displayed in reports. The default
|
||||
implementation will supply the actual project-relative file path. You
|
||||
only need to supply this method if you have an unusual syntax for file
|
||||
paths.
|
||||
|
||||
"""
|
||||
return files.relative_filename(self.filename)
|
||||
|
||||
def source(self) -> str:
|
||||
"""Get the source for the file.
|
||||
|
||||
Returns a Unicode string.
|
||||
|
||||
The base implementation simply reads the `self.filename` file and
|
||||
decodes it as UTF-8. Override this method if your file isn't readable
|
||||
as a text file, or if you need other encoding support.
|
||||
|
||||
"""
|
||||
with open(self.filename, encoding="utf-8") as f:
|
||||
return f.read()
|
||||
|
||||
def lines(self) -> set[TLineNo]:
|
||||
"""Get the executable lines in this file.
|
||||
|
||||
Your plug-in must determine which lines in the file were possibly
|
||||
executable. This method returns a set of those line numbers.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
"""
|
||||
_needs_to_implement(self, "lines")
|
||||
|
||||
def excluded_lines(self) -> set[TLineNo]:
|
||||
"""Get the excluded executable lines in this file.
|
||||
|
||||
Your plug-in can use any method it likes to allow the user to exclude
|
||||
executable lines from consideration.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
The base implementation returns the empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
|
||||
"""Translate recorded lines into reported lines.
|
||||
|
||||
Some file formats will want to report lines slightly differently than
|
||||
they are recorded. For example, Python records the last line of a
|
||||
multi-line statement, but reports are nicer if they mention the first
|
||||
line.
|
||||
|
||||
Your plug-in can optionally define this method to perform these kinds
|
||||
of adjustment.
|
||||
|
||||
`lines` is a sequence of integers, the recorded line numbers.
|
||||
|
||||
Returns a set of integers, the adjusted line numbers.
|
||||
|
||||
The base implementation returns the numbers unchanged.
|
||||
|
||||
"""
|
||||
return set(lines)
|
||||
|
||||
def arcs(self) -> set[TArc]:
|
||||
"""Get the executable arcs in this file.
|
||||
|
||||
To support branch coverage, your plug-in needs to be able to indicate
|
||||
possible execution paths, as a set of line number pairs. Each pair is
|
||||
a `(prev, next)` pair indicating that execution can transition from the
|
||||
`prev` line number to the `next` line number.
|
||||
|
||||
Returns a set of pairs of line numbers. The default implementation
|
||||
returns an empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def no_branch_lines(self) -> set[TLineNo]:
|
||||
"""Get the lines excused from branch coverage in this file.
|
||||
|
||||
Your plug-in can use any method it likes to allow the user to exclude
|
||||
lines from consideration of branch coverage.
|
||||
|
||||
Returns a set of line numbers.
|
||||
|
||||
The base implementation returns the empty set.
|
||||
|
||||
"""
|
||||
return set()
|
||||
|
||||
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
|
||||
"""Translate recorded arcs into reported arcs.
|
||||
|
||||
Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
|
||||
line number pairs.
|
||||
|
||||
Returns a set of line number pairs.
|
||||
|
||||
The default implementation returns `arcs` unchanged.
|
||||
|
||||
"""
|
||||
return set(arcs)
|
||||
|
||||
def exit_counts(self) -> dict[TLineNo, int]:
|
||||
"""Get a count of exits from that each line.
|
||||
|
||||
To determine which lines are branches, coverage.py looks for lines that
|
||||
have more than one exit. This function creates a dict mapping each
|
||||
executable line number to a count of how many exits it has.
|
||||
|
||||
To be honest, this feels wrong, and should be refactored. Let me know
|
||||
if you attempt to implement this method in your plug-in...
|
||||
|
||||
"""
|
||||
return {}
|
||||
|
||||
def missing_arc_description(
|
||||
self,
|
||||
start: TLineNo,
|
||||
end: TLineNo,
|
||||
executed_arcs: Iterable[TArc] | None = None, # pylint: disable=unused-argument
|
||||
) -> str:
|
||||
"""Provide an English sentence describing a missing arc.
|
||||
|
||||
The `start` and `end` arguments are the line numbers of the missing
|
||||
arc. Negative numbers indicate entering or exiting code objects.
|
||||
|
||||
The `executed_arcs` argument is a set of line number pairs, the arcs
|
||||
that were executed in this file.
|
||||
|
||||
By default, this simply returns the string "Line {start} didn't jump
|
||||
to {end}".
|
||||
|
||||
"""
|
||||
return f"Line {start} didn't jump to line {end}"
|
||||
|
||||
def arc_description(
|
||||
self,
|
||||
start: TLineNo, # pylint: disable=unused-argument
|
||||
end: TLineNo,
|
||||
) -> str:
|
||||
"""Provide an English description of an arc's effect."""
|
||||
return f"jump to line {end}"
|
||||
|
||||
def source_token_lines(self) -> TSourceTokenLines:
|
||||
"""Generate a series of tokenized lines, one for each line in `source`.
|
||||
|
||||
These tokens are used for syntax-colored reports.
|
||||
|
||||
Each line is a list of pairs, each pair is a token::
|
||||
|
||||
[("key", "def"), ("ws", " "), ("nam", "hello"), ("op", "("), ... ]
|
||||
|
||||
Each pair has a token class, and the token text. The token classes
|
||||
are:
|
||||
|
||||
* ``"com"``: a comment
|
||||
* ``"key"``: a keyword
|
||||
* ``"nam"``: a name, or identifier
|
||||
* ``"num"``: a number
|
||||
* ``"op"``: an operator
|
||||
* ``"str"``: a string literal
|
||||
* ``"ws"``: some white space
|
||||
* ``"txt"``: some other kind of text
|
||||
|
||||
If you concatenate all the token texts, and then join them with
|
||||
newlines, you should have your original source back.
|
||||
|
||||
The default implementation simply returns each line tagged as
|
||||
``"txt"``.
|
||||
|
||||
"""
|
||||
for line in self.source().splitlines():
|
||||
yield [("txt", line)]
|
||||
|
||||
def code_regions(self) -> Iterable[CodeRegion]:
|
||||
"""Identify regions in the source file for finer reporting than by file.
|
||||
|
||||
Returns an iterable of :class:`CodeRegion` objects. The kinds reported
|
||||
should be in the possibilities returned by :meth:`code_region_kinds`.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
def code_region_kinds(self) -> Iterable[tuple[str, str]]:
|
||||
"""Return the kinds of code regions this plugin can find.
|
||||
|
||||
The returned pairs are the singular and plural forms of the kinds::
|
||||
|
||||
[
|
||||
("function", "functions"),
|
||||
("class", "classes"),
|
||||
]
|
||||
|
||||
This will usually be hard-coded, but could also differ by the specific
|
||||
source file involved.
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
return isinstance(other, FileReporter) and self.filename == other.filename
|
||||
|
||||
def __lt__(self, other: Any) -> bool:
|
||||
return isinstance(other, FileReporter) and self.filename < other.filename
|
||||
|
||||
# This object doesn't need to be hashed.
|
||||
__hash__ = None # type: ignore[assignment]
|
||||
299
venv/lib/python3.12/site-packages/coverage/plugin_support.py
Normal file
299
venv/lib/python3.12/site-packages/coverage/plugin_support.py
Normal file
@@ -0,0 +1,299 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Support for plugins."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
from collections.abc import Iterable, Iterator
|
||||
from types import FrameType
|
||||
from typing import Any, Callable
|
||||
|
||||
from coverage.exceptions import PluginError
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.plugin import CoveragePlugin, FileReporter, FileTracer
|
||||
from coverage.types import TArc, TConfigurable, TDebugCtl, TLineNo, TPluginConfig, TSourceTokenLines
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
class Plugins:
|
||||
"""The currently loaded collection of coverage.py plugins."""
|
||||
|
||||
def __init__(self, debug: TDebugCtl | None = None) -> None:
|
||||
self.order: list[CoveragePlugin] = []
|
||||
self.names: dict[str, CoveragePlugin] = {}
|
||||
self.file_tracers: list[CoveragePlugin] = []
|
||||
self.configurers: list[CoveragePlugin] = []
|
||||
self.context_switchers: list[CoveragePlugin] = []
|
||||
|
||||
self.current_module: str | None = None
|
||||
self.debug = debug
|
||||
|
||||
def load_from_config(
|
||||
self,
|
||||
modules: Iterable[str],
|
||||
config: TPluginConfig,
|
||||
) -> None:
|
||||
"""Load plugin modules, and read their settings from configuration."""
|
||||
|
||||
for module in modules:
|
||||
self.current_module = module
|
||||
__import__(module)
|
||||
mod = sys.modules[module]
|
||||
|
||||
coverage_init = getattr(mod, "coverage_init", None)
|
||||
if not coverage_init:
|
||||
raise PluginError(
|
||||
f"Plugin module {module!r} didn't define a coverage_init function",
|
||||
)
|
||||
|
||||
options = config.get_plugin_options(module)
|
||||
coverage_init(self, options)
|
||||
|
||||
self.current_module = None
|
||||
|
||||
def load_from_callables(
|
||||
self,
|
||||
plugin_inits: Iterable[TCoverageInit],
|
||||
) -> None:
|
||||
"""Load plugins from callables provided."""
|
||||
for fn in plugin_inits:
|
||||
fn(self)
|
||||
|
||||
def add_file_tracer(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a file tracer plugin.
|
||||
|
||||
`plugin` is an instance of a third-party plugin class. It must
|
||||
implement the :meth:`CoveragePlugin.file_tracer` method.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, self.file_tracers)
|
||||
|
||||
def add_configurer(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a configuring plugin.
|
||||
|
||||
`plugin` is an instance of a third-party plugin class. It must
|
||||
implement the :meth:`CoveragePlugin.configure` method.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, self.configurers)
|
||||
|
||||
def add_dynamic_context(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a dynamic context plugin.
|
||||
|
||||
`plugin` is an instance of a third-party plugin class. It must
|
||||
implement the :meth:`CoveragePlugin.dynamic_context` method.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, self.context_switchers)
|
||||
|
||||
def add_noop(self, plugin: CoveragePlugin) -> None:
|
||||
"""Add a plugin that does nothing.
|
||||
|
||||
This is only useful for testing the plugin support.
|
||||
|
||||
"""
|
||||
self._add_plugin(plugin, None)
|
||||
|
||||
def _add_plugin(
|
||||
self,
|
||||
plugin: CoveragePlugin,
|
||||
specialized: list[CoveragePlugin] | None,
|
||||
) -> None:
|
||||
"""Add a plugin object.
|
||||
|
||||
`plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
|
||||
is a list to append the plugin to.
|
||||
|
||||
"""
|
||||
plugin_name = f"{self.current_module}.{plugin.__class__.__name__}"
|
||||
if self.debug and self.debug.should("plugin"):
|
||||
self.debug.write(f"Loaded plugin {self.current_module!r}: {plugin!r}")
|
||||
labelled = LabelledDebug(f"plugin {self.current_module!r}", self.debug)
|
||||
plugin = DebugPluginWrapper(plugin, labelled)
|
||||
|
||||
plugin._coverage_plugin_name = plugin_name
|
||||
plugin._coverage_enabled = True
|
||||
self.order.append(plugin)
|
||||
self.names[plugin_name] = plugin
|
||||
if specialized is not None:
|
||||
specialized.append(plugin)
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self.order)
|
||||
|
||||
def __iter__(self) -> Iterator[CoveragePlugin]:
|
||||
return iter(self.order)
|
||||
|
||||
def get(self, plugin_name: str) -> CoveragePlugin:
|
||||
"""Return a plugin by name."""
|
||||
return self.names[plugin_name]
|
||||
|
||||
|
||||
TCoverageInit = Callable[[Plugins], None]
|
||||
|
||||
|
||||
class LabelledDebug:
|
||||
"""A Debug writer, but with labels for prepending to the messages."""
|
||||
|
||||
def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()):
|
||||
self.labels = list(prev_labels) + [label]
|
||||
self.debug = debug
|
||||
|
||||
def add_label(self, label: str) -> LabelledDebug:
|
||||
"""Add a label to the writer, and return a new `LabelledDebug`."""
|
||||
return LabelledDebug(label, self.debug, self.labels)
|
||||
|
||||
def message_prefix(self) -> str:
|
||||
"""The prefix to use on messages, combining the labels."""
|
||||
prefixes = self.labels + [""]
|
||||
return ":\n".join(" " * i + label for i, label in enumerate(prefixes))
|
||||
|
||||
def write(self, message: str) -> None:
|
||||
"""Write `message`, but with the labels prepended."""
|
||||
self.debug.write(f"{self.message_prefix()}{message}")
|
||||
|
||||
|
||||
class DebugPluginWrapper(CoveragePlugin):
|
||||
"""Wrap a plugin, and use debug to report on what it's doing."""
|
||||
|
||||
def __init__(self, plugin: CoveragePlugin, debug: LabelledDebug) -> None:
|
||||
super().__init__()
|
||||
self.plugin = plugin
|
||||
self.debug = debug
|
||||
|
||||
def file_tracer(self, filename: str) -> FileTracer | None:
|
||||
tracer = self.plugin.file_tracer(filename)
|
||||
self.debug.write(f"file_tracer({filename!r}) --> {tracer!r}")
|
||||
if tracer:
|
||||
debug = self.debug.add_label(f"file {filename!r}")
|
||||
tracer = DebugFileTracerWrapper(tracer, debug)
|
||||
return tracer
|
||||
|
||||
def file_reporter(self, filename: str) -> FileReporter | str:
|
||||
reporter = self.plugin.file_reporter(filename)
|
||||
assert isinstance(reporter, FileReporter)
|
||||
self.debug.write(f"file_reporter({filename!r}) --> {reporter!r}")
|
||||
if reporter:
|
||||
debug = self.debug.add_label(f"file {filename!r}")
|
||||
reporter = DebugFileReporterWrapper(filename, reporter, debug)
|
||||
return reporter
|
||||
|
||||
def dynamic_context(self, frame: FrameType) -> str | None:
|
||||
context = self.plugin.dynamic_context(frame)
|
||||
self.debug.write(f"dynamic_context({frame!r}) --> {context!r}")
|
||||
return context
|
||||
|
||||
def find_executable_files(self, src_dir: str) -> Iterable[str]:
|
||||
executable_files = self.plugin.find_executable_files(src_dir)
|
||||
self.debug.write(f"find_executable_files({src_dir!r}) --> {executable_files!r}")
|
||||
return executable_files
|
||||
|
||||
def configure(self, config: TConfigurable) -> None:
|
||||
self.debug.write(f"configure({config!r})")
|
||||
self.plugin.configure(config)
|
||||
|
||||
def sys_info(self) -> Iterable[tuple[str, Any]]:
|
||||
return self.plugin.sys_info()
|
||||
|
||||
|
||||
class DebugFileTracerWrapper(FileTracer):
|
||||
"""A debugging `FileTracer`."""
|
||||
|
||||
def __init__(self, tracer: FileTracer, debug: LabelledDebug) -> None:
|
||||
self.tracer = tracer
|
||||
self.debug = debug
|
||||
|
||||
def _show_frame(self, frame: FrameType) -> str:
|
||||
"""A short string identifying a frame, for debug messages."""
|
||||
filename = os.path.basename(frame.f_code.co_filename)
|
||||
return f"{filename}@{frame.f_lineno}"
|
||||
|
||||
def source_filename(self) -> str:
|
||||
sfilename = self.tracer.source_filename()
|
||||
self.debug.write(f"source_filename() --> {sfilename!r}")
|
||||
return sfilename
|
||||
|
||||
def has_dynamic_source_filename(self) -> bool:
|
||||
has = self.tracer.has_dynamic_source_filename()
|
||||
self.debug.write(f"has_dynamic_source_filename() --> {has!r}")
|
||||
return has
|
||||
|
||||
def dynamic_source_filename(self, filename: str, frame: FrameType) -> str | None:
|
||||
dyn = self.tracer.dynamic_source_filename(filename, frame)
|
||||
self.debug.write(
|
||||
"dynamic_source_filename({!r}, {}) --> {!r}".format(
|
||||
filename,
|
||||
self._show_frame(frame),
|
||||
dyn,
|
||||
)
|
||||
)
|
||||
return dyn
|
||||
|
||||
def line_number_range(self, frame: FrameType) -> tuple[TLineNo, TLineNo]:
|
||||
pair = self.tracer.line_number_range(frame)
|
||||
self.debug.write(f"line_number_range({self._show_frame(frame)}) --> {pair!r}")
|
||||
return pair
|
||||
|
||||
|
||||
class DebugFileReporterWrapper(FileReporter):
|
||||
"""A debugging `FileReporter`."""
|
||||
|
||||
def __init__(self, filename: str, reporter: FileReporter, debug: LabelledDebug) -> None:
|
||||
super().__init__(filename)
|
||||
self.reporter = reporter
|
||||
self.debug = debug
|
||||
|
||||
def relative_filename(self) -> str:
|
||||
ret = self.reporter.relative_filename()
|
||||
self.debug.write(f"relative_filename() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def lines(self) -> set[TLineNo]:
|
||||
ret = self.reporter.lines()
|
||||
self.debug.write(f"lines() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def excluded_lines(self) -> set[TLineNo]:
|
||||
ret = self.reporter.excluded_lines()
|
||||
self.debug.write(f"excluded_lines() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
|
||||
ret = self.reporter.translate_lines(lines)
|
||||
self.debug.write(f"translate_lines({lines!r}) --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
|
||||
ret = self.reporter.translate_arcs(arcs)
|
||||
self.debug.write(f"translate_arcs({arcs!r}) --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def no_branch_lines(self) -> set[TLineNo]:
|
||||
ret = self.reporter.no_branch_lines()
|
||||
self.debug.write(f"no_branch_lines() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def exit_counts(self) -> dict[TLineNo, int]:
|
||||
ret = self.reporter.exit_counts()
|
||||
self.debug.write(f"exit_counts() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def arcs(self) -> set[TArc]:
|
||||
ret = self.reporter.arcs()
|
||||
self.debug.write(f"arcs() --> {ret!r}")
|
||||
return ret
|
||||
|
||||
def source(self) -> str:
|
||||
ret = self.reporter.source()
|
||||
self.debug.write(f"source() --> {len(ret)} chars")
|
||||
return ret
|
||||
|
||||
def source_token_lines(self) -> TSourceTokenLines:
|
||||
ret = list(self.reporter.source_token_lines())
|
||||
self.debug.write(f"source_token_lines() --> {len(ret)} tokens")
|
||||
return ret
|
||||
1
venv/lib/python3.12/site-packages/coverage/py.typed
Normal file
1
venv/lib/python3.12/site-packages/coverage/py.typed
Normal file
@@ -0,0 +1 @@
|
||||
# Marker file for PEP 561 to indicate that this package has type hints.
|
||||
269
venv/lib/python3.12/site-packages/coverage/python.py
Normal file
269
venv/lib/python3.12/site-packages/coverage/python.py
Normal file
@@ -0,0 +1,269 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Python source expertise for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os.path
|
||||
import types
|
||||
import zipimport
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from coverage import env
|
||||
from coverage.exceptions import CoverageException, NoSource
|
||||
from coverage.files import canonical_filename, relative_filename, zip_location
|
||||
from coverage.misc import isolate_module, join_regex
|
||||
from coverage.parser import PythonParser
|
||||
from coverage.phystokens import source_encoding, source_token_lines
|
||||
from coverage.plugin import CodeRegion, FileReporter
|
||||
from coverage.regions import code_regions
|
||||
from coverage.types import TArc, TLineNo, TMorf, TSourceTokenLines
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
def read_python_source(filename: str) -> bytes:
|
||||
"""Read the Python source text from `filename`.
|
||||
|
||||
Returns bytes.
|
||||
|
||||
"""
|
||||
with open(filename, "rb") as f:
|
||||
source = f.read()
|
||||
|
||||
return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
|
||||
|
||||
|
||||
def get_python_source(filename: str) -> str:
|
||||
"""Return the source code, as unicode."""
|
||||
base, ext = os.path.splitext(filename)
|
||||
if ext == ".py" and env.WINDOWS:
|
||||
exts = [".py", ".pyw"]
|
||||
else:
|
||||
exts = [ext]
|
||||
|
||||
source_bytes: bytes | None
|
||||
for ext in exts:
|
||||
try_filename = base + ext
|
||||
if os.path.exists(try_filename):
|
||||
# A regular text file: open it.
|
||||
source_bytes = read_python_source(try_filename)
|
||||
break
|
||||
|
||||
# Maybe it's in a zip file?
|
||||
source_bytes = get_zip_bytes(try_filename)
|
||||
if source_bytes is not None:
|
||||
break
|
||||
else:
|
||||
# Couldn't find source.
|
||||
raise NoSource(f"No source for code: '{filename}'.", slug="no-source")
|
||||
|
||||
# Replace \f because of http://bugs.python.org/issue19035
|
||||
source_bytes = source_bytes.replace(b"\f", b" ")
|
||||
source = source_bytes.decode(source_encoding(source_bytes), "replace")
|
||||
|
||||
# Python code should always end with a line with a newline.
|
||||
if source and source[-1] != "\n":
|
||||
source += "\n"
|
||||
|
||||
return source
|
||||
|
||||
|
||||
def get_zip_bytes(filename: str) -> bytes | None:
|
||||
"""Get data from `filename` if it is a zip file path.
|
||||
|
||||
Returns the bytestring data read from the zip file, or None if no zip file
|
||||
could be found or `filename` isn't in it. The data returned will be
|
||||
an empty string if the file is empty.
|
||||
|
||||
"""
|
||||
zipfile_inner = zip_location(filename)
|
||||
if zipfile_inner is not None:
|
||||
zipfile, inner = zipfile_inner
|
||||
try:
|
||||
zi = zipimport.zipimporter(zipfile)
|
||||
except zipimport.ZipImportError:
|
||||
return None
|
||||
try:
|
||||
data = zi.get_data(inner)
|
||||
except OSError:
|
||||
return None
|
||||
return data
|
||||
return None
|
||||
|
||||
|
||||
def source_for_file(filename: str) -> str:
|
||||
"""Return the source filename for `filename`.
|
||||
|
||||
Given a file name being traced, return the best guess as to the source
|
||||
file to attribute it to.
|
||||
|
||||
"""
|
||||
if filename.endswith(".py"):
|
||||
# .py files are themselves source files.
|
||||
return filename
|
||||
|
||||
elif filename.endswith((".pyc", ".pyo")):
|
||||
# Bytecode files probably have source files near them.
|
||||
py_filename = filename[:-1]
|
||||
if os.path.exists(py_filename):
|
||||
# Found a .py file, use that.
|
||||
return py_filename
|
||||
if env.WINDOWS:
|
||||
# On Windows, it could be a .pyw file.
|
||||
pyw_filename = py_filename + "w"
|
||||
if os.path.exists(pyw_filename):
|
||||
return pyw_filename
|
||||
# Didn't find source, but it's probably the .py file we want.
|
||||
return py_filename
|
||||
|
||||
# No idea, just use the file name as-is.
|
||||
return filename
|
||||
|
||||
|
||||
def source_for_morf(morf: TMorf) -> str:
|
||||
"""Get the source filename for the module-or-file `morf`."""
|
||||
if hasattr(morf, "__file__") and morf.__file__:
|
||||
filename = morf.__file__
|
||||
elif isinstance(morf, types.ModuleType):
|
||||
# A module should have had .__file__, otherwise we can't use it.
|
||||
# This could be a PEP-420 namespace package.
|
||||
raise CoverageException(f"Module {morf} has no file")
|
||||
else:
|
||||
filename = morf
|
||||
|
||||
filename = source_for_file(filename)
|
||||
return filename
|
||||
|
||||
|
||||
class PythonFileReporter(FileReporter):
|
||||
"""Report support for a Python file."""
|
||||
|
||||
def __init__(self, morf: TMorf, coverage: Coverage | None = None) -> None:
|
||||
self.coverage = coverage
|
||||
|
||||
filename = source_for_morf(morf)
|
||||
|
||||
fname = filename
|
||||
canonicalize = True
|
||||
if self.coverage is not None:
|
||||
if self.coverage.config.relative_files:
|
||||
canonicalize = False
|
||||
if canonicalize:
|
||||
fname = canonical_filename(filename)
|
||||
super().__init__(fname)
|
||||
|
||||
if hasattr(morf, "__name__"):
|
||||
name = morf.__name__.replace(".", os.sep)
|
||||
if os.path.basename(filename).startswith("__init__."):
|
||||
name += os.sep + "__init__"
|
||||
name += ".py"
|
||||
else:
|
||||
name = relative_filename(filename)
|
||||
self.relname = name
|
||||
|
||||
self._source: str | None = None
|
||||
self._parser: PythonParser | None = None
|
||||
self._excluded = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<PythonFileReporter {self.filename!r}>"
|
||||
|
||||
def relative_filename(self) -> str:
|
||||
return self.relname
|
||||
|
||||
@property
|
||||
def parser(self) -> PythonParser:
|
||||
"""Lazily create a :class:`PythonParser`."""
|
||||
assert self.coverage is not None
|
||||
if self._parser is None:
|
||||
self._parser = PythonParser(
|
||||
filename=self.filename,
|
||||
exclude=self.coverage._exclude_regex("exclude"),
|
||||
)
|
||||
self._parser.parse_source()
|
||||
return self._parser
|
||||
|
||||
def lines(self) -> set[TLineNo]:
|
||||
"""Return the line numbers of statements in the file."""
|
||||
return self.parser.statements
|
||||
|
||||
def multiline_map(self) -> dict[TLineNo, TLineNo]:
|
||||
"""A map of line numbers to first-line in a multi-line statement."""
|
||||
return self.parser._multiline
|
||||
|
||||
def excluded_lines(self) -> set[TLineNo]:
|
||||
"""Return the line numbers of statements in the file."""
|
||||
return self.parser.excluded
|
||||
|
||||
def translate_lines(self, lines: Iterable[TLineNo]) -> set[TLineNo]:
|
||||
return self.parser.translate_lines(lines)
|
||||
|
||||
def translate_arcs(self, arcs: Iterable[TArc]) -> set[TArc]:
|
||||
return self.parser.translate_arcs(arcs)
|
||||
|
||||
def no_branch_lines(self) -> set[TLineNo]:
|
||||
assert self.coverage is not None
|
||||
no_branch = self.parser.lines_matching(
|
||||
join_regex(self.coverage.config.partial_list + self.coverage.config.partial_always_list)
|
||||
)
|
||||
return no_branch
|
||||
|
||||
def arcs(self) -> set[TArc]:
|
||||
return self.parser.arcs()
|
||||
|
||||
def exit_counts(self) -> dict[TLineNo, int]:
|
||||
return self.parser.exit_counts()
|
||||
|
||||
def missing_arc_description(
|
||||
self,
|
||||
start: TLineNo,
|
||||
end: TLineNo,
|
||||
executed_arcs: Iterable[TArc] | None = None,
|
||||
) -> str:
|
||||
return self.parser.missing_arc_description(start, end)
|
||||
|
||||
def arc_description(self, start: TLineNo, end: TLineNo) -> str:
|
||||
return self.parser.arc_description(start, end)
|
||||
|
||||
def source(self) -> str:
|
||||
if self._source is None:
|
||||
self._source = get_python_source(self.filename)
|
||||
return self._source
|
||||
|
||||
def should_be_python(self) -> bool:
|
||||
"""Does it seem like this file should contain Python?
|
||||
|
||||
This is used to decide if a file reported as part of the execution of
|
||||
a program was really likely to have contained Python in the first
|
||||
place.
|
||||
|
||||
"""
|
||||
# Get the file extension.
|
||||
_, ext = os.path.splitext(self.filename)
|
||||
|
||||
# Anything named *.py* should be Python.
|
||||
if ext.startswith(".py"):
|
||||
return True
|
||||
# A file with no extension should be Python.
|
||||
if not ext:
|
||||
return True
|
||||
# Everything else is probably not Python.
|
||||
return False
|
||||
|
||||
def source_token_lines(self) -> TSourceTokenLines:
|
||||
return source_token_lines(self.source())
|
||||
|
||||
def code_regions(self) -> Iterable[CodeRegion]:
|
||||
return code_regions(self.source())
|
||||
|
||||
def code_region_kinds(self) -> Iterable[tuple[str, str]]:
|
||||
return [
|
||||
("function", "functions"),
|
||||
("class", "classes"),
|
||||
]
|
||||
369
venv/lib/python3.12/site-packages/coverage/pytracer.py
Normal file
369
venv/lib/python3.12/site-packages/coverage/pytracer.py
Normal file
@@ -0,0 +1,369 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Raw data collector for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import atexit
|
||||
import dis
|
||||
import itertools
|
||||
import sys
|
||||
import threading
|
||||
from types import FrameType, ModuleType
|
||||
from typing import Any, Callable, cast
|
||||
|
||||
from coverage import env
|
||||
from coverage.types import (
|
||||
TArc,
|
||||
TFileDisposition,
|
||||
TLineNo,
|
||||
Tracer,
|
||||
TShouldStartContextFn,
|
||||
TShouldTraceFn,
|
||||
TTraceData,
|
||||
TTraceFileData,
|
||||
TTraceFn,
|
||||
TWarnFn,
|
||||
)
|
||||
|
||||
# I don't understand why, but if we use `cast(set[TLineNo], ...)` inside
|
||||
# the _trace() function, we get some strange behavior on PyPy 3.10.
|
||||
# Assigning these names here and using them below fixes the problem.
|
||||
# See https://github.com/nedbat/coveragepy/issues/1902
|
||||
set_TLineNo = set[TLineNo]
|
||||
set_TArc = set[TArc]
|
||||
|
||||
|
||||
# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
|
||||
# PYVERSIONS: RESUME is new in Python3.11
|
||||
RESUME = dis.opmap.get("RESUME")
|
||||
RETURN_VALUE = dis.opmap["RETURN_VALUE"]
|
||||
if RESUME is None:
|
||||
YIELD_VALUE = dis.opmap["YIELD_VALUE"]
|
||||
YIELD_FROM = dis.opmap["YIELD_FROM"]
|
||||
YIELD_FROM_OFFSET = 0 if env.PYPY else 2
|
||||
else:
|
||||
YIELD_VALUE = YIELD_FROM = YIELD_FROM_OFFSET = -1
|
||||
|
||||
# When running meta-coverage, this file can try to trace itself, which confuses
|
||||
# everything. Don't trace ourselves.
|
||||
|
||||
THIS_FILE = __file__.rstrip("co")
|
||||
|
||||
|
||||
class PyTracer(Tracer):
|
||||
"""Python implementation of the raw data tracer."""
|
||||
|
||||
# Because of poor implementations of trace-function-manipulating tools,
|
||||
# the Python trace function must be kept very simple. In particular, there
|
||||
# must be only one function ever set as the trace function, both through
|
||||
# sys.settrace, and as the return value from the trace function. Put
|
||||
# another way, the trace function must always return itself. It cannot
|
||||
# swap in other functions, or return None to avoid tracing a particular
|
||||
# frame.
|
||||
#
|
||||
# The trace manipulator that introduced this restriction is DecoratorTools,
|
||||
# which sets a trace function, and then later restores the pre-existing one
|
||||
# by calling sys.settrace with a function it found in the current frame.
|
||||
#
|
||||
# Systems that use DecoratorTools (or similar trace manipulations) must use
|
||||
# PyTracer to get accurate results. The command-line --timid argument is
|
||||
# used to force the use of this tracer.
|
||||
|
||||
tracer_ids = itertools.count()
|
||||
|
||||
def __init__(self) -> None:
|
||||
# Which tracer are we?
|
||||
self.id = next(self.tracer_ids)
|
||||
|
||||
# Attributes set from the collector:
|
||||
self.data: TTraceData
|
||||
self.trace_arcs = False
|
||||
self.should_trace: TShouldTraceFn
|
||||
self.should_trace_cache: dict[str, TFileDisposition | None]
|
||||
self.should_start_context: TShouldStartContextFn | None = None
|
||||
self.switch_context: Callable[[str | None], None] | None = None
|
||||
self.lock_data: Callable[[], None]
|
||||
self.unlock_data: Callable[[], None]
|
||||
self.warn: TWarnFn
|
||||
|
||||
# The threading module to use, if any.
|
||||
self.threading: ModuleType | None = None
|
||||
|
||||
self.cur_file_data: TTraceFileData | None = None
|
||||
self.last_line: TLineNo = 0
|
||||
self.cur_file_name: str | None = None
|
||||
self.context: str | None = None
|
||||
self.started_context = False
|
||||
|
||||
# The data_stack parallels the Python call stack. Each entry is
|
||||
# information about an active frame, a four-element tuple:
|
||||
# [0] The TTraceData for this frame's file. Could be None if we
|
||||
# aren't tracing this frame.
|
||||
# [1] The current file name for the frame. None if we aren't tracing
|
||||
# this frame.
|
||||
# [2] The last line number executed in this frame.
|
||||
# [3] Boolean: did this frame start a new context?
|
||||
self.data_stack: list[tuple[TTraceFileData | None, str | None, TLineNo, bool]] = []
|
||||
self.thread: threading.Thread | None = None
|
||||
self.stopped = False
|
||||
self._activity = False
|
||||
|
||||
self.in_atexit = False
|
||||
# On exit, self.in_atexit = True
|
||||
atexit.register(setattr, self, "in_atexit", True)
|
||||
|
||||
# Cache a bound method on the instance, so that we don't have to
|
||||
# re-create a bound method object all the time.
|
||||
self._cached_bound_method_trace: TTraceFn = self._trace
|
||||
|
||||
def __repr__(self) -> str:
|
||||
points = sum(len(v) for v in self.data.values())
|
||||
files = len(self.data)
|
||||
return f"<PyTracer at {id(self):#x}: {points} data points in {files} files>"
|
||||
|
||||
def log(self, marker: str, *args: Any) -> None:
|
||||
"""For hard-core logging of what this tracer is doing."""
|
||||
with open("/tmp/debug_trace.txt", "a", encoding="utf-8") as f:
|
||||
f.write(f"{marker} {self.id}[{len(self.data_stack)}]")
|
||||
if 0: # if you want thread ids..
|
||||
f.write( # type: ignore[unreachable]
|
||||
".{:x}.{:x}".format(
|
||||
self.thread.ident,
|
||||
self.threading.current_thread().ident,
|
||||
)
|
||||
)
|
||||
f.write(" {}".format(" ".join(map(str, args))))
|
||||
if 0: # if you want callers..
|
||||
f.write(" | ") # type: ignore[unreachable]
|
||||
stack = " / ".join(
|
||||
(fname or "???").rpartition("/")[-1] for _, fname, _, _ in self.data_stack
|
||||
)
|
||||
f.write(stack)
|
||||
f.write("\n")
|
||||
|
||||
def _trace(
|
||||
self,
|
||||
frame: FrameType,
|
||||
event: str,
|
||||
arg: Any, # pylint: disable=unused-argument
|
||||
lineno: TLineNo | None = None, # pylint: disable=unused-argument
|
||||
) -> TTraceFn | None:
|
||||
"""The trace function passed to sys.settrace."""
|
||||
|
||||
if THIS_FILE in frame.f_code.co_filename:
|
||||
return None
|
||||
|
||||
# f = frame; code = f.f_code
|
||||
# self.log(":", f"{code.co_filename} {f.f_lineno} {code.co_name}()", event)
|
||||
|
||||
if self.stopped and sys.gettrace() == self._cached_bound_method_trace: # pylint: disable=comparison-with-callable
|
||||
# The PyTrace.stop() method has been called, possibly by another
|
||||
# thread, let's deactivate ourselves now.
|
||||
if 0:
|
||||
f = frame # type: ignore[unreachable]
|
||||
self.log("---\nX", f.f_code.co_filename, f.f_lineno)
|
||||
while f:
|
||||
self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace)
|
||||
f = f.f_back
|
||||
sys.settrace(None)
|
||||
try:
|
||||
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
|
||||
self.data_stack.pop()
|
||||
)
|
||||
except IndexError:
|
||||
self.log(
|
||||
"Empty stack!",
|
||||
frame.f_code.co_filename,
|
||||
frame.f_lineno,
|
||||
frame.f_code.co_name,
|
||||
)
|
||||
return None
|
||||
|
||||
# if event != "call" and frame.f_code.co_filename != self.cur_file_name:
|
||||
# self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
|
||||
|
||||
if event == "call":
|
||||
# Should we start a new context?
|
||||
if self.should_start_context and self.context is None:
|
||||
context_maybe = self.should_start_context(frame) # pylint: disable=not-callable
|
||||
if context_maybe is not None:
|
||||
self.context = context_maybe
|
||||
started_context = True
|
||||
assert self.switch_context is not None
|
||||
self.switch_context(self.context) # pylint: disable=not-callable
|
||||
else:
|
||||
started_context = False
|
||||
else:
|
||||
started_context = False
|
||||
self.started_context = started_context
|
||||
|
||||
# Entering a new frame. Decide if we should trace in this file.
|
||||
self._activity = True
|
||||
self.data_stack.append(
|
||||
(
|
||||
self.cur_file_data,
|
||||
self.cur_file_name,
|
||||
self.last_line,
|
||||
started_context,
|
||||
),
|
||||
)
|
||||
|
||||
# Improve tracing performance: when calling a function, both caller
|
||||
# and callee are often within the same file. if that's the case, we
|
||||
# don't have to re-check whether to trace the corresponding
|
||||
# function (which is a little bit expensive since it involves
|
||||
# dictionary lookups). This optimization is only correct if we
|
||||
# didn't start a context.
|
||||
filename = frame.f_code.co_filename
|
||||
if filename != self.cur_file_name or started_context:
|
||||
self.cur_file_name = filename
|
||||
disp = self.should_trace_cache.get(filename)
|
||||
if disp is None:
|
||||
disp = self.should_trace(filename, frame)
|
||||
self.should_trace_cache[filename] = disp
|
||||
|
||||
self.cur_file_data = None
|
||||
if disp.trace:
|
||||
tracename = disp.source_filename
|
||||
assert tracename is not None
|
||||
self.lock_data()
|
||||
try:
|
||||
if tracename not in self.data:
|
||||
self.data[tracename] = set()
|
||||
finally:
|
||||
self.unlock_data()
|
||||
self.cur_file_data = self.data[tracename]
|
||||
else:
|
||||
frame.f_trace_lines = False
|
||||
elif not self.cur_file_data:
|
||||
frame.f_trace_lines = False
|
||||
|
||||
# The call event is really a "start frame" event, and happens for
|
||||
# function calls and re-entering generators. The f_lasti field is
|
||||
# -1 for calls, and a real offset for generators. Use <0 as the
|
||||
# line number for calls, and the real line number for generators.
|
||||
if RESUME is not None:
|
||||
# The current opcode is guaranteed to be RESUME. The argument
|
||||
# determines what kind of resume it is.
|
||||
oparg = frame.f_code.co_code[frame.f_lasti + 1]
|
||||
real_call = (oparg == 0) # fmt: skip
|
||||
else:
|
||||
real_call = (getattr(frame, "f_lasti", -1) < 0) # fmt: skip
|
||||
if real_call:
|
||||
self.last_line = -frame.f_code.co_firstlineno
|
||||
else:
|
||||
self.last_line = frame.f_lineno
|
||||
|
||||
elif event == "line":
|
||||
# Record an executed line.
|
||||
if self.cur_file_data is not None:
|
||||
flineno: TLineNo = frame.f_lineno
|
||||
|
||||
if self.trace_arcs:
|
||||
cast(set_TArc, self.cur_file_data).add((self.last_line, flineno))
|
||||
else:
|
||||
cast(set_TLineNo, self.cur_file_data).add(flineno)
|
||||
self.last_line = flineno
|
||||
|
||||
elif event == "return":
|
||||
if self.trace_arcs and self.cur_file_data:
|
||||
# Record an arc leaving the function, but beware that a
|
||||
# "return" event might just mean yielding from a generator.
|
||||
code = frame.f_code.co_code
|
||||
lasti = frame.f_lasti
|
||||
if RESUME is not None:
|
||||
if len(code) == lasti + 2:
|
||||
# A return from the end of a code object is a real return.
|
||||
real_return = True
|
||||
else:
|
||||
# It is a real return if we aren't going to resume next.
|
||||
if env.PYBEHAVIOR.lasti_is_yield:
|
||||
lasti += 2
|
||||
real_return = code[lasti] != RESUME
|
||||
else:
|
||||
if code[lasti] == RETURN_VALUE:
|
||||
real_return = True
|
||||
elif code[lasti] == YIELD_VALUE:
|
||||
real_return = False
|
||||
elif len(code) <= lasti + YIELD_FROM_OFFSET:
|
||||
real_return = True
|
||||
elif code[lasti + YIELD_FROM_OFFSET] == YIELD_FROM:
|
||||
real_return = False
|
||||
else:
|
||||
real_return = True
|
||||
if real_return:
|
||||
first = frame.f_code.co_firstlineno
|
||||
cast(set_TArc, self.cur_file_data).add((self.last_line, -first))
|
||||
|
||||
# Leaving this function, pop the filename stack.
|
||||
self.cur_file_data, self.cur_file_name, self.last_line, self.started_context = (
|
||||
self.data_stack.pop()
|
||||
)
|
||||
# Leaving a context?
|
||||
if self.started_context:
|
||||
assert self.switch_context is not None
|
||||
self.context = None
|
||||
self.switch_context(None) # pylint: disable=not-callable
|
||||
|
||||
return self._cached_bound_method_trace
|
||||
|
||||
def start(self) -> TTraceFn:
|
||||
"""Start this Tracer.
|
||||
|
||||
Return a Python function suitable for use with sys.settrace().
|
||||
|
||||
"""
|
||||
self.stopped = False
|
||||
if self.threading:
|
||||
if self.thread is None:
|
||||
self.thread = self.threading.current_thread()
|
||||
|
||||
sys.settrace(self._cached_bound_method_trace)
|
||||
return self._cached_bound_method_trace
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop this Tracer."""
|
||||
# Get the active tracer callback before setting the stop flag to be
|
||||
# able to detect if the tracer was changed prior to stopping it.
|
||||
tf = sys.gettrace()
|
||||
|
||||
# Set the stop flag. The actual call to sys.settrace(None) will happen
|
||||
# in the self._trace callback itself to make sure to call it from the
|
||||
# right thread.
|
||||
self.stopped = True
|
||||
|
||||
if self.threading:
|
||||
assert self.thread is not None
|
||||
if self.thread.ident != self.threading.current_thread().ident:
|
||||
# Called on a different thread than started us: we can't unhook
|
||||
# ourselves, but we've set the flag that we should stop, so we
|
||||
# won't do any more tracing.
|
||||
# self.log("~", "stopping on different threads")
|
||||
return
|
||||
|
||||
# PyPy clears the trace function before running atexit functions,
|
||||
# so don't warn if we are in atexit on PyPy and the trace function
|
||||
# has changed to None. Metacoverage also messes this up, so don't
|
||||
# warn if we are measuring ourselves.
|
||||
suppress_warning = (env.PYPY and self.in_atexit and tf is None) or env.METACOV
|
||||
if self.warn and not suppress_warning:
|
||||
if tf != self._cached_bound_method_trace: # pylint: disable=comparison-with-callable
|
||||
self.warn(
|
||||
"Trace function changed, data is likely wrong: "
|
||||
+ f"{tf!r} != {self._cached_bound_method_trace!r}",
|
||||
slug="trace-changed",
|
||||
)
|
||||
|
||||
def activity(self) -> bool:
|
||||
"""Has there been any activity?"""
|
||||
return self._activity
|
||||
|
||||
def reset_activity(self) -> None:
|
||||
"""Reset the activity() flag."""
|
||||
self._activity = False
|
||||
|
||||
def get_stats(self) -> dict[str, int] | None:
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
return None
|
||||
127
venv/lib/python3.12/site-packages/coverage/regions.py
Normal file
127
venv/lib/python3.12/site-packages/coverage/regions.py
Normal file
@@ -0,0 +1,127 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Find functions and classes in Python code."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import dataclasses
|
||||
from typing import cast
|
||||
|
||||
from coverage.plugin import CodeRegion
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Context:
|
||||
"""The nested named context of a function or class."""
|
||||
|
||||
name: str
|
||||
kind: str
|
||||
lines: set[int]
|
||||
|
||||
|
||||
class RegionFinder:
|
||||
"""An ast visitor that will find and track regions of code.
|
||||
|
||||
Functions and classes are tracked by name. Results are in the .regions
|
||||
attribute.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.regions: list[CodeRegion] = []
|
||||
self.context: list[Context] = []
|
||||
|
||||
def parse_source(self, source: str) -> None:
|
||||
"""Parse `source` and walk the ast to populate the .regions attribute."""
|
||||
self.handle_node(ast.parse(source))
|
||||
|
||||
def fq_node_name(self) -> str:
|
||||
"""Get the current fully qualified name we're processing."""
|
||||
return ".".join(c.name for c in self.context)
|
||||
|
||||
def handle_node(self, node: ast.AST) -> None:
|
||||
"""Recursively handle any node."""
|
||||
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
||||
self.handle_FunctionDef(node)
|
||||
elif isinstance(node, ast.ClassDef):
|
||||
self.handle_ClassDef(node)
|
||||
else:
|
||||
self.handle_node_body(node)
|
||||
|
||||
def handle_node_body(self, node: ast.AST) -> None:
|
||||
"""Recursively handle the nodes in this node's body, if any."""
|
||||
for body_node in getattr(node, "body", ()):
|
||||
self.handle_node(body_node)
|
||||
|
||||
def handle_FunctionDef(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> None:
|
||||
"""Called for `def` or `async def`."""
|
||||
lines = set(range(node.body[0].lineno, cast(int, node.body[-1].end_lineno) + 1))
|
||||
if self.context and self.context[-1].kind == "class":
|
||||
# Function bodies are part of their enclosing class.
|
||||
self.context[-1].lines |= lines
|
||||
# Function bodies should be excluded from the nearest enclosing function.
|
||||
for ancestor in reversed(self.context):
|
||||
if ancestor.kind == "function":
|
||||
ancestor.lines -= lines
|
||||
break
|
||||
self.context.append(Context(node.name, "function", lines))
|
||||
self.regions.append(
|
||||
CodeRegion(
|
||||
kind="function",
|
||||
name=self.fq_node_name(),
|
||||
start=node.lineno,
|
||||
lines=lines,
|
||||
)
|
||||
)
|
||||
self.handle_node_body(node)
|
||||
self.context.pop()
|
||||
|
||||
def handle_ClassDef(self, node: ast.ClassDef) -> None:
|
||||
"""Called for `class`."""
|
||||
# The lines for a class are the lines in the methods of the class.
|
||||
# We start empty, and count on visit_FunctionDef to add the lines it
|
||||
# finds.
|
||||
lines: set[int] = set()
|
||||
self.context.append(Context(node.name, "class", lines))
|
||||
self.regions.append(
|
||||
CodeRegion(
|
||||
kind="class",
|
||||
name=self.fq_node_name(),
|
||||
start=node.lineno,
|
||||
lines=lines,
|
||||
)
|
||||
)
|
||||
self.handle_node_body(node)
|
||||
self.context.pop()
|
||||
# Class bodies should be excluded from the enclosing classes.
|
||||
for ancestor in reversed(self.context):
|
||||
if ancestor.kind == "class":
|
||||
ancestor.lines -= lines
|
||||
|
||||
|
||||
def code_regions(source: str) -> list[CodeRegion]:
|
||||
"""Find function and class regions in source code.
|
||||
|
||||
Analyzes the code in `source`, and returns a list of :class:`CodeRegion`
|
||||
objects describing functions and classes as regions of the code::
|
||||
|
||||
[
|
||||
CodeRegion(kind="function", name="func1", start=8, lines={10, 11, 12}),
|
||||
CodeRegion(kind="function", name="MyClass.method", start=30, lines={34, 35, 36}),
|
||||
CodeRegion(kind="class", name="MyClass", start=25, lines={34, 35, 36}),
|
||||
]
|
||||
|
||||
The line numbers will include comments and blank lines. Later processing
|
||||
will need to ignore those lines as needed.
|
||||
|
||||
Nested functions and classes are excluded from their enclosing region. No
|
||||
line should be reported as being part of more than one function, or more
|
||||
than one class. Lines in methods are reported as being in a function and
|
||||
in a class.
|
||||
|
||||
"""
|
||||
rf = RegionFinder()
|
||||
rf.parse_source(source)
|
||||
return rf.regions
|
||||
298
venv/lib/python3.12/site-packages/coverage/report.py
Normal file
298
venv/lib/python3.12/site-packages/coverage/report.py
Normal file
@@ -0,0 +1,298 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Summary reporting"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from typing import IO, TYPE_CHECKING, Any
|
||||
|
||||
from coverage.exceptions import ConfigError, NoDataError
|
||||
from coverage.misc import human_sorted_items, plural
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis, Numbers
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
|
||||
class SummaryReporter:
|
||||
"""A reporter for writing the summary report."""
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
self.branches = coverage.get_data().has_arcs()
|
||||
self.outfile: IO[str] | None = None
|
||||
self.output_format = self.config.format or "text"
|
||||
if self.output_format not in {"text", "markdown", "total"}:
|
||||
raise ConfigError(f"Unknown report format choice: {self.output_format!r}")
|
||||
self.fr_analyses: list[tuple[FileReporter, Analysis]] = []
|
||||
self.skipped_count = 0
|
||||
self.empty_count = 0
|
||||
self.total = Numbers(precision=self.config.precision)
|
||||
|
||||
def write(self, line: str) -> None:
|
||||
"""Write a line to the output, adding a newline."""
|
||||
assert self.outfile is not None
|
||||
self.outfile.write(line.rstrip())
|
||||
self.outfile.write("\n")
|
||||
|
||||
def write_items(self, items: Iterable[str]) -> None:
|
||||
"""Write a list of strings, joined together."""
|
||||
self.write("".join(items))
|
||||
|
||||
def report_text(
|
||||
self,
|
||||
header: list[str],
|
||||
lines_values: list[list[Any]],
|
||||
total_line: list[Any],
|
||||
end_lines: list[str],
|
||||
) -> None:
|
||||
"""Internal method that prints report data in text format.
|
||||
|
||||
`header` is a list with captions.
|
||||
`lines_values` is list of lists of sortable values.
|
||||
`total_line` is a list with values of the total line.
|
||||
`end_lines` is a list of ending lines with information about skipped files.
|
||||
|
||||
"""
|
||||
# Prepare the formatting strings, header, and column sorting.
|
||||
max_name = max([len(line[0]) for line in lines_values] + [5]) + 1
|
||||
max_n = max(len(total_line[header.index("Cover")]) + 2, len(" Cover")) + 1
|
||||
max_n = max([max_n] + [len(line[header.index("Cover")]) + 2 for line in lines_values])
|
||||
formats = dict(
|
||||
Name="{:{name_len}}",
|
||||
Stmts="{:>7}",
|
||||
Miss="{:>7}",
|
||||
Branch="{:>7}",
|
||||
BrPart="{:>7}",
|
||||
Cover="{:>{n}}",
|
||||
Missing="{:>10}",
|
||||
)
|
||||
header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
|
||||
header_str = "".join(header_items)
|
||||
rule = "-" * len(header_str)
|
||||
|
||||
# Write the header
|
||||
self.write(header_str)
|
||||
self.write(rule)
|
||||
|
||||
# Write the data lines
|
||||
formats.update(
|
||||
dict(
|
||||
Cover="{:>{n}}%",
|
||||
Missing=" {:9}",
|
||||
)
|
||||
)
|
||||
for values in lines_values:
|
||||
self.write_items(
|
||||
(
|
||||
formats[item].format(str(value), name_len=max_name, n=max_n - 1)
|
||||
for item, value in zip(header, values)
|
||||
)
|
||||
)
|
||||
|
||||
# Write a TOTAL line
|
||||
if lines_values:
|
||||
self.write(rule)
|
||||
|
||||
self.write_items(
|
||||
(
|
||||
formats[item].format(str(value), name_len=max_name, n=max_n - 1)
|
||||
for item, value in zip(header, total_line)
|
||||
)
|
||||
)
|
||||
|
||||
for end_line in end_lines:
|
||||
self.write(end_line)
|
||||
|
||||
def report_markdown(
|
||||
self,
|
||||
header: list[str],
|
||||
lines_values: list[list[Any]],
|
||||
total_line: list[Any],
|
||||
end_lines: list[str],
|
||||
) -> None:
|
||||
"""Internal method that prints report data in markdown format.
|
||||
|
||||
`header` is a list with captions.
|
||||
`lines_values` is a sorted list of lists containing coverage information.
|
||||
`total_line` is a list with values of the total line.
|
||||
`end_lines` is a list of ending lines with information about skipped files.
|
||||
|
||||
"""
|
||||
# Prepare the formatting strings, header, and column sorting.
|
||||
max_name = max((len(line[0].replace("_", "\\_")) for line in lines_values), default=0)
|
||||
max_name = max(max_name, len("**TOTAL**")) + 1
|
||||
formats = dict(
|
||||
Name="| {:{name_len}}|",
|
||||
Stmts="{:>9} |",
|
||||
Miss="{:>9} |",
|
||||
Branch="{:>9} |",
|
||||
BrPart="{:>9} |",
|
||||
Cover="{:>{n}} |",
|
||||
Missing="{:>10} |",
|
||||
)
|
||||
max_n = max(len(total_line[header.index("Cover")]) + 6, len(" Cover "))
|
||||
header_items = [formats[item].format(item, name_len=max_name, n=max_n) for item in header]
|
||||
header_str = "".join(header_items)
|
||||
rule_str = "|" + " ".join(
|
||||
["- |".rjust(len(header_items[0]) - 1, "-")]
|
||||
+ ["-: |".rjust(len(item) - 1, "-") for item in header_items[1:]],
|
||||
)
|
||||
|
||||
# Write the header
|
||||
self.write(header_str)
|
||||
self.write(rule_str)
|
||||
|
||||
# Write the data lines
|
||||
for values in lines_values:
|
||||
formats.update(
|
||||
dict(
|
||||
Cover="{:>{n}}% |",
|
||||
)
|
||||
)
|
||||
self.write_items(
|
||||
(
|
||||
formats[item].format(
|
||||
str(value).replace("_", "\\_"), name_len=max_name, n=max_n - 1
|
||||
)
|
||||
for item, value in zip(header, values)
|
||||
)
|
||||
)
|
||||
|
||||
# Write the TOTAL line
|
||||
formats.update(
|
||||
dict(
|
||||
Name="|{:>{name_len}} |",
|
||||
Cover="{:>{n}} |",
|
||||
),
|
||||
)
|
||||
total_line_items: list[str] = []
|
||||
for item, value in zip(header, total_line):
|
||||
if value == "":
|
||||
insert = value
|
||||
elif item == "Cover":
|
||||
insert = f" **{value}%**"
|
||||
else:
|
||||
insert = f" **{value}**"
|
||||
total_line_items += formats[item].format(insert, name_len=max_name, n=max_n)
|
||||
self.write_items(total_line_items)
|
||||
|
||||
for end_line in end_lines:
|
||||
self.write(end_line)
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
|
||||
"""Writes a report summarizing coverage statistics per module.
|
||||
|
||||
`outfile` is a text-mode file object to write the summary to.
|
||||
|
||||
"""
|
||||
self.outfile = outfile or sys.stdout
|
||||
|
||||
self.coverage.get_data().set_query_contexts(self.config.report_contexts)
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
self.report_one_file(fr, analysis)
|
||||
|
||||
if not self.total.n_files and not self.skipped_count:
|
||||
raise NoDataError("No data to report.")
|
||||
|
||||
if self.output_format == "total":
|
||||
self.write(self.total.pc_covered_str)
|
||||
else:
|
||||
self.tabular_report()
|
||||
|
||||
return self.total.pc_covered
|
||||
|
||||
def tabular_report(self) -> None:
|
||||
"""Writes tabular report formats."""
|
||||
# Prepare the header line and column sorting.
|
||||
header = ["Name", "Stmts", "Miss"]
|
||||
if self.branches:
|
||||
header += ["Branch", "BrPart"]
|
||||
header += ["Cover"]
|
||||
if self.config.show_missing:
|
||||
header += ["Missing"]
|
||||
|
||||
column_order = dict(name=0, stmts=1, miss=2, cover=-1)
|
||||
if self.branches:
|
||||
column_order.update(dict(branch=3, brpart=4))
|
||||
|
||||
# `lines_values` is list of lists of sortable values.
|
||||
lines_values = []
|
||||
|
||||
for fr, analysis in self.fr_analyses:
|
||||
nums = analysis.numbers
|
||||
args = [fr.relative_filename(), nums.n_statements, nums.n_missing]
|
||||
if self.branches:
|
||||
args += [nums.n_branches, nums.n_partial_branches]
|
||||
args += [nums.pc_covered_str]
|
||||
if self.config.show_missing:
|
||||
args += [analysis.missing_formatted(branches=True)]
|
||||
args += [nums.pc_covered]
|
||||
lines_values.append(args)
|
||||
|
||||
# Line sorting.
|
||||
sort_option = (self.config.sort or "name").lower()
|
||||
reverse = False
|
||||
if sort_option[0] == "-":
|
||||
reverse = True
|
||||
sort_option = sort_option[1:]
|
||||
elif sort_option[0] == "+":
|
||||
sort_option = sort_option[1:]
|
||||
sort_idx = column_order.get(sort_option)
|
||||
if sort_idx is None:
|
||||
raise ConfigError(f"Invalid sorting option: {self.config.sort!r}")
|
||||
if sort_option == "name":
|
||||
lines_values = human_sorted_items(lines_values, reverse=reverse)
|
||||
else:
|
||||
lines_values.sort(
|
||||
key=lambda line: (line[sort_idx], line[0]),
|
||||
reverse=reverse,
|
||||
)
|
||||
|
||||
# Calculate total if we had at least one file.
|
||||
total_line = ["TOTAL", self.total.n_statements, self.total.n_missing]
|
||||
if self.branches:
|
||||
total_line += [self.total.n_branches, self.total.n_partial_branches]
|
||||
total_line += [self.total.pc_covered_str]
|
||||
if self.config.show_missing:
|
||||
total_line += [""]
|
||||
|
||||
# Create other final lines.
|
||||
end_lines = []
|
||||
if self.config.skip_covered and self.skipped_count:
|
||||
files = plural(self.skipped_count, "file")
|
||||
end_lines.append(
|
||||
f"\n{self.skipped_count} {files} skipped due to complete coverage.",
|
||||
)
|
||||
if self.config.skip_empty and self.empty_count:
|
||||
files = plural(self.empty_count, "file")
|
||||
end_lines.append(f"\n{self.empty_count} empty {files} skipped.")
|
||||
|
||||
if self.output_format == "markdown":
|
||||
formatter = self.report_markdown
|
||||
else:
|
||||
formatter = self.report_text
|
||||
formatter(header, lines_values, total_line, end_lines)
|
||||
|
||||
def report_one_file(self, fr: FileReporter, analysis: Analysis) -> None:
|
||||
"""Report on just one file, the callback from report()."""
|
||||
nums = analysis.numbers
|
||||
self.total += nums
|
||||
|
||||
no_missing_lines = (nums.n_missing == 0) # fmt: skip
|
||||
no_missing_branches = (nums.n_partial_branches == 0) # fmt: skip
|
||||
if self.config.skip_covered and no_missing_lines and no_missing_branches:
|
||||
# Don't report on 100% files.
|
||||
self.skipped_count += 1
|
||||
elif self.config.skip_empty and nums.n_statements == 0:
|
||||
# Don't report on empty files.
|
||||
self.empty_count += 1
|
||||
else:
|
||||
self.fr_analyses.append((fr, analysis))
|
||||
117
venv/lib/python3.12/site-packages/coverage/report_core.py
Normal file
117
venv/lib/python3.12/site-packages/coverage/report_core.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Reporter foundation for coverage.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Iterable, Iterator
|
||||
from typing import IO, TYPE_CHECKING, Callable, Protocol
|
||||
|
||||
from coverage.exceptions import NoDataError, NotPython
|
||||
from coverage.files import GlobMatcher, prep_patterns
|
||||
from coverage.misc import ensure_dir_for_file, file_be_gone
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.results import Analysis
|
||||
from coverage.types import TMorf
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
|
||||
class Reporter(Protocol):
|
||||
"""What we expect of reporters."""
|
||||
|
||||
report_type: str
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str]) -> float:
|
||||
"""Generate a report of `morfs`, written to `outfile`."""
|
||||
|
||||
|
||||
def render_report(
|
||||
output_path: str,
|
||||
reporter: Reporter,
|
||||
morfs: Iterable[TMorf] | None,
|
||||
msgfn: Callable[[str], None],
|
||||
) -> float:
|
||||
"""Run a one-file report generator, managing the output file.
|
||||
|
||||
This function ensures the output file is ready to be written to. Then writes
|
||||
the report to it. Then closes the file and cleans up.
|
||||
|
||||
"""
|
||||
file_to_close = None
|
||||
delete_file = False
|
||||
|
||||
if output_path == "-":
|
||||
outfile = sys.stdout
|
||||
else:
|
||||
# Ensure that the output directory is created; done here because this
|
||||
# report pre-opens the output file. HtmlReporter does this on its own
|
||||
# because its task is more complex, being multiple files.
|
||||
ensure_dir_for_file(output_path)
|
||||
outfile = open(output_path, "w", encoding="utf-8")
|
||||
file_to_close = outfile
|
||||
delete_file = True
|
||||
|
||||
try:
|
||||
ret = reporter.report(morfs, outfile=outfile)
|
||||
if file_to_close is not None:
|
||||
msgfn(f"Wrote {reporter.report_type} to {output_path}")
|
||||
delete_file = False
|
||||
return ret
|
||||
finally:
|
||||
if file_to_close is not None:
|
||||
file_to_close.close()
|
||||
if delete_file:
|
||||
file_be_gone(output_path) # pragma: part covered (doesn't return)
|
||||
|
||||
|
||||
def get_analysis_to_report(
|
||||
coverage: Coverage,
|
||||
morfs: Iterable[TMorf] | None,
|
||||
) -> Iterator[tuple[FileReporter, Analysis]]:
|
||||
"""Get the files to report on.
|
||||
|
||||
For each morf in `morfs`, if it should be reported on (based on the omit
|
||||
and include configuration options), yield a pair, the `FileReporter` and
|
||||
`Analysis` for the morf.
|
||||
|
||||
"""
|
||||
fr_morfs = coverage._get_file_reporters(morfs)
|
||||
config = coverage.config
|
||||
|
||||
if config.report_include:
|
||||
matcher = GlobMatcher(prep_patterns(config.report_include), "report_include")
|
||||
fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if matcher.match(fr.filename)]
|
||||
|
||||
if config.report_omit:
|
||||
matcher = GlobMatcher(prep_patterns(config.report_omit), "report_omit")
|
||||
fr_morfs = [(fr, morf) for (fr, morf) in fr_morfs if not matcher.match(fr.filename)]
|
||||
|
||||
if not fr_morfs:
|
||||
raise NoDataError("No data to report.")
|
||||
|
||||
for fr, morf in sorted(fr_morfs):
|
||||
try:
|
||||
analysis = coverage._analyze(morf)
|
||||
except NotPython:
|
||||
# Only report errors for .py files, and only if we didn't
|
||||
# explicitly suppress those errors.
|
||||
# NotPython is only raised by PythonFileReporter, which has a
|
||||
# should_be_python() method.
|
||||
if fr.should_be_python(): # type: ignore[attr-defined]
|
||||
if config.ignore_errors:
|
||||
msg = f"Couldn't parse Python file '{fr.filename}'"
|
||||
coverage._warn(msg, slug="couldnt-parse")
|
||||
else:
|
||||
raise
|
||||
except Exception as exc:
|
||||
if config.ignore_errors:
|
||||
msg = f"Couldn't parse '{fr.filename}': {exc}".rstrip()
|
||||
coverage._warn(msg, slug="couldnt-parse")
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
yield (fr, analysis)
|
||||
471
venv/lib/python3.12/site-packages/coverage/results.py
Normal file
471
venv/lib/python3.12/site-packages/coverage/results.py
Normal file
@@ -0,0 +1,471 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Results of coverage measurement."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import dataclasses
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import nice_pair
|
||||
from coverage.types import TArc, TLineNo
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.data import CoverageData
|
||||
from coverage.plugin import FileReporter
|
||||
|
||||
|
||||
def analysis_from_file_reporter(
|
||||
data: CoverageData,
|
||||
precision: int,
|
||||
file_reporter: FileReporter,
|
||||
filename: str,
|
||||
) -> Analysis:
|
||||
"""Create an Analysis from a FileReporter."""
|
||||
has_arcs = data.has_arcs()
|
||||
statements = file_reporter.lines()
|
||||
excluded = file_reporter.excluded_lines()
|
||||
executed = file_reporter.translate_lines(data.lines(filename) or [])
|
||||
|
||||
if has_arcs:
|
||||
arc_possibilities_set = file_reporter.arcs()
|
||||
arcs: Iterable[TArc] = data.arcs(filename) or []
|
||||
arcs = file_reporter.translate_arcs(arcs)
|
||||
|
||||
# Reduce the set of arcs to the ones that could be branches.
|
||||
dests = collections.defaultdict(set)
|
||||
for fromno, tono in arc_possibilities_set:
|
||||
dests[fromno].add(tono)
|
||||
single_dests = {
|
||||
fromno: list(tonos)[0] for fromno, tonos in dests.items() if len(tonos) == 1
|
||||
}
|
||||
new_arcs = set()
|
||||
for fromno, tono in arcs:
|
||||
if fromno != tono:
|
||||
new_arcs.add((fromno, tono))
|
||||
else:
|
||||
if fromno in single_dests:
|
||||
new_arcs.add((fromno, single_dests[fromno]))
|
||||
|
||||
arcs_executed_set = file_reporter.translate_arcs(new_arcs)
|
||||
exit_counts = file_reporter.exit_counts()
|
||||
no_branch = file_reporter.no_branch_lines()
|
||||
else:
|
||||
arc_possibilities_set = set()
|
||||
arcs_executed_set = set()
|
||||
exit_counts = {}
|
||||
no_branch = set()
|
||||
|
||||
return Analysis(
|
||||
precision=precision,
|
||||
filename=filename,
|
||||
has_arcs=has_arcs,
|
||||
statements=statements,
|
||||
excluded=excluded,
|
||||
executed=executed,
|
||||
arc_possibilities_set=arc_possibilities_set,
|
||||
arcs_executed_set=arcs_executed_set,
|
||||
exit_counts=exit_counts,
|
||||
no_branch=no_branch,
|
||||
)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Analysis:
|
||||
"""The results of analyzing a FileReporter."""
|
||||
|
||||
precision: int
|
||||
filename: str
|
||||
has_arcs: bool
|
||||
statements: set[TLineNo]
|
||||
excluded: set[TLineNo]
|
||||
executed: set[TLineNo]
|
||||
arc_possibilities_set: set[TArc]
|
||||
arcs_executed_set: set[TArc]
|
||||
exit_counts: dict[TLineNo, int]
|
||||
no_branch: set[TLineNo]
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self.arc_possibilities = sorted(self.arc_possibilities_set)
|
||||
self.arcs_executed = sorted(self.arcs_executed_set)
|
||||
self.missing = self.statements - self.executed
|
||||
|
||||
if self.has_arcs:
|
||||
n_branches = self._total_branches()
|
||||
mba = self.missing_branch_arcs()
|
||||
n_partial_branches = sum(len(v) for k, v in mba.items() if k not in self.missing)
|
||||
n_missing_branches = sum(len(v) for k, v in mba.items())
|
||||
else:
|
||||
n_branches = n_partial_branches = n_missing_branches = 0
|
||||
|
||||
self.numbers = Numbers(
|
||||
precision=self.precision,
|
||||
n_files=1,
|
||||
n_statements=len(self.statements),
|
||||
n_excluded=len(self.excluded),
|
||||
n_missing=len(self.missing),
|
||||
n_branches=n_branches,
|
||||
n_partial_branches=n_partial_branches,
|
||||
n_missing_branches=n_missing_branches,
|
||||
)
|
||||
|
||||
def missing_formatted(self, branches: bool = False) -> str:
|
||||
"""The missing line numbers, formatted nicely.
|
||||
|
||||
Returns a string like "1-2, 5-11, 13-14".
|
||||
|
||||
If `branches` is true, includes the missing branch arcs also.
|
||||
|
||||
"""
|
||||
if branches and self.has_arcs:
|
||||
arcs = self.missing_branch_arcs().items()
|
||||
else:
|
||||
arcs = None
|
||||
|
||||
return format_lines(self.statements, self.missing, arcs=arcs)
|
||||
|
||||
def arcs_missing(self) -> list[TArc]:
|
||||
"""Returns a sorted list of the un-executed arcs in the code."""
|
||||
missing = (
|
||||
p
|
||||
for p in self.arc_possibilities
|
||||
if p not in self.arcs_executed_set
|
||||
and p[0] not in self.no_branch
|
||||
and p[1] not in self.excluded
|
||||
)
|
||||
return sorted(missing)
|
||||
|
||||
def _branch_lines(self) -> list[TLineNo]:
|
||||
"""Returns a list of line numbers that have more than one exit."""
|
||||
return [l1 for l1, count in self.exit_counts.items() if count > 1]
|
||||
|
||||
def _total_branches(self) -> int:
|
||||
"""How many total branches are there?"""
|
||||
return sum(count for count in self.exit_counts.values() if count > 1)
|
||||
|
||||
def missing_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
|
||||
"""Return arcs that weren't executed from branch lines.
|
||||
|
||||
Returns {l1:[l2a,l2b,...], ...}
|
||||
|
||||
"""
|
||||
missing = self.arcs_missing()
|
||||
branch_lines = set(self._branch_lines())
|
||||
mba = collections.defaultdict(list)
|
||||
for l1, l2 in missing:
|
||||
assert l1 != l2, f"In {self.filename}, didn't expect {l1} == {l2}"
|
||||
if l1 in branch_lines:
|
||||
mba[l1].append(l2)
|
||||
return mba
|
||||
|
||||
def executed_branch_arcs(self) -> dict[TLineNo, list[TLineNo]]:
|
||||
"""Return arcs that were executed from branch lines.
|
||||
|
||||
Only include ones that we considered possible.
|
||||
|
||||
Returns {l1:[l2a,l2b,...], ...}
|
||||
|
||||
"""
|
||||
branch_lines = set(self._branch_lines())
|
||||
eba = collections.defaultdict(list)
|
||||
for l1, l2 in self.arcs_executed:
|
||||
assert l1 != l2, f"Oops: Didn't think this could happen: {l1 = }, {l2 = }"
|
||||
if (l1, l2) not in self.arc_possibilities_set:
|
||||
continue
|
||||
if l1 in branch_lines:
|
||||
eba[l1].append(l2)
|
||||
return eba
|
||||
|
||||
def branch_stats(self) -> dict[TLineNo, tuple[int, int]]:
|
||||
"""Get stats about branches.
|
||||
|
||||
Returns a dict mapping line numbers to a tuple:
|
||||
(total_exits, taken_exits).
|
||||
|
||||
"""
|
||||
|
||||
missing_arcs = self.missing_branch_arcs()
|
||||
stats = {}
|
||||
for lnum in self._branch_lines():
|
||||
exits = self.exit_counts[lnum]
|
||||
missing = len(missing_arcs[lnum])
|
||||
stats[lnum] = (exits, exits - missing)
|
||||
return stats
|
||||
|
||||
|
||||
TRegionLines = frozenset[TLineNo]
|
||||
|
||||
|
||||
class AnalysisNarrower:
|
||||
"""
|
||||
For reducing an `Analysis` to a subset of its lines.
|
||||
|
||||
Originally this was a simpler method on Analysis, but that led to quadratic
|
||||
behavior. This class does the bulk of the work up-front to provide the
|
||||
same results in linear time.
|
||||
|
||||
Create an AnalysisNarrower from an Analysis, bulk-add region lines to it
|
||||
with `add_regions`, then individually request new narrowed Analysis objects
|
||||
for each region with `narrow`. Doing most of the work in limited calls to
|
||||
`add_regions` lets us avoid poor performance.
|
||||
"""
|
||||
|
||||
# In this class, regions are represented by a frozenset of their lines.
|
||||
|
||||
def __init__(self, analysis: Analysis) -> None:
|
||||
self.analysis = analysis
|
||||
self.region2arc_possibilities: dict[TRegionLines, set[TArc]] = collections.defaultdict(set)
|
||||
self.region2arc_executed: dict[TRegionLines, set[TArc]] = collections.defaultdict(set)
|
||||
self.region2exit_counts: dict[TRegionLines, dict[TLineNo, int]] = collections.defaultdict(
|
||||
dict
|
||||
)
|
||||
|
||||
def add_regions(self, liness: Iterable[set[TLineNo]]) -> None:
|
||||
"""
|
||||
Pre-process a number of sets of line numbers. Later calls to `narrow`
|
||||
with one of these sets will provide a narrowed Analysis.
|
||||
"""
|
||||
if self.analysis.has_arcs:
|
||||
line2region: dict[TLineNo, TRegionLines] = {}
|
||||
|
||||
for lines in liness:
|
||||
fzlines = frozenset(lines)
|
||||
for line in lines:
|
||||
line2region[line] = fzlines
|
||||
|
||||
def collect_arcs(
|
||||
arc_set: set[TArc],
|
||||
region2arcs: dict[TRegionLines, set[TArc]],
|
||||
) -> None:
|
||||
for a, b in arc_set:
|
||||
if r := line2region.get(a):
|
||||
region2arcs[r].add((a, b))
|
||||
if r := line2region.get(b):
|
||||
region2arcs[r].add((a, b))
|
||||
|
||||
collect_arcs(self.analysis.arc_possibilities_set, self.region2arc_possibilities)
|
||||
collect_arcs(self.analysis.arcs_executed_set, self.region2arc_executed)
|
||||
|
||||
for lno, num in self.analysis.exit_counts.items():
|
||||
if r := line2region.get(lno):
|
||||
self.region2exit_counts[r][lno] = num
|
||||
|
||||
def narrow(self, lines: set[TLineNo]) -> Analysis:
|
||||
"""Create a narrowed Analysis.
|
||||
|
||||
The current analysis is copied to make a new one that only considers
|
||||
the lines in `lines`.
|
||||
"""
|
||||
|
||||
# Technically, the set intersections in this method are still O(N**2)
|
||||
# since this method is called N times, but they're very fast and moving
|
||||
# them to `add_regions` won't avoid the quadratic time.
|
||||
|
||||
statements = self.analysis.statements & lines
|
||||
excluded = self.analysis.excluded & lines
|
||||
executed = self.analysis.executed & lines
|
||||
|
||||
if self.analysis.has_arcs:
|
||||
fzlines = frozenset(lines)
|
||||
arc_possibilities_set = self.region2arc_possibilities[fzlines]
|
||||
arcs_executed_set = self.region2arc_executed[fzlines]
|
||||
exit_counts = self.region2exit_counts[fzlines]
|
||||
no_branch = self.analysis.no_branch & lines
|
||||
else:
|
||||
arc_possibilities_set = set()
|
||||
arcs_executed_set = set()
|
||||
exit_counts = {}
|
||||
no_branch = set()
|
||||
|
||||
return Analysis(
|
||||
precision=self.analysis.precision,
|
||||
filename=self.analysis.filename,
|
||||
has_arcs=self.analysis.has_arcs,
|
||||
statements=statements,
|
||||
excluded=excluded,
|
||||
executed=executed,
|
||||
arc_possibilities_set=arc_possibilities_set,
|
||||
arcs_executed_set=arcs_executed_set,
|
||||
exit_counts=exit_counts,
|
||||
no_branch=no_branch,
|
||||
)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Numbers:
|
||||
"""The numerical results of measuring coverage.
|
||||
|
||||
This holds the basic statistics from `Analysis`, and is used to roll
|
||||
up statistics across files.
|
||||
|
||||
"""
|
||||
|
||||
precision: int = 0
|
||||
n_files: int = 0
|
||||
n_statements: int = 0
|
||||
n_excluded: int = 0
|
||||
n_missing: int = 0
|
||||
n_branches: int = 0
|
||||
n_partial_branches: int = 0
|
||||
n_missing_branches: int = 0
|
||||
|
||||
@property
|
||||
def n_executed(self) -> int:
|
||||
"""Returns the number of executed statements."""
|
||||
return self.n_statements - self.n_missing
|
||||
|
||||
@property
|
||||
def n_executed_branches(self) -> int:
|
||||
"""Returns the number of executed branches."""
|
||||
return self.n_branches - self.n_missing_branches
|
||||
|
||||
@property
|
||||
def pc_covered(self) -> float:
|
||||
"""Returns a single percentage value for coverage."""
|
||||
if self.n_statements > 0:
|
||||
numerator, denominator = self.ratio_covered
|
||||
pc_cov = (100.0 * numerator) / denominator
|
||||
else:
|
||||
pc_cov = 100.0
|
||||
return pc_cov
|
||||
|
||||
@property
|
||||
def pc_covered_str(self) -> str:
|
||||
"""Returns the percent covered, as a string, without a percent sign.
|
||||
|
||||
Note that "0" is only returned when the value is truly zero, and "100"
|
||||
is only returned when the value is truly 100. Rounding can never
|
||||
result in either "0" or "100".
|
||||
|
||||
"""
|
||||
return display_covered(self.pc_covered, self.precision)
|
||||
|
||||
@property
|
||||
def ratio_covered(self) -> tuple[int, int]:
|
||||
"""Return a numerator and denominator for the coverage ratio."""
|
||||
numerator = self.n_executed + self.n_executed_branches
|
||||
denominator = self.n_statements + self.n_branches
|
||||
return numerator, denominator
|
||||
|
||||
def __add__(self, other: Numbers) -> Numbers:
|
||||
return Numbers(
|
||||
self.precision,
|
||||
self.n_files + other.n_files,
|
||||
self.n_statements + other.n_statements,
|
||||
self.n_excluded + other.n_excluded,
|
||||
self.n_missing + other.n_missing,
|
||||
self.n_branches + other.n_branches,
|
||||
self.n_partial_branches + other.n_partial_branches,
|
||||
self.n_missing_branches + other.n_missing_branches,
|
||||
)
|
||||
|
||||
def __radd__(self, other: int) -> Numbers:
|
||||
# Implementing 0+Numbers allows us to sum() a list of Numbers.
|
||||
assert other == 0 # we only ever call it this way.
|
||||
return self
|
||||
|
||||
|
||||
def display_covered(pc: float, precision: int) -> str:
|
||||
"""Return a displayable total percentage, as a string.
|
||||
|
||||
Note that "0" is only returned when the value is truly zero, and "100"
|
||||
is only returned when the value is truly 100. Rounding can never
|
||||
result in either "0" or "100".
|
||||
|
||||
"""
|
||||
near0 = 1.0 / 10**precision
|
||||
if 0 < pc < near0:
|
||||
pc = near0
|
||||
elif (100.0 - near0) < pc < 100:
|
||||
pc = 100.0 - near0
|
||||
else:
|
||||
pc = round(pc, precision)
|
||||
return f"{pc:.{precision}f}"
|
||||
|
||||
|
||||
def _line_ranges(
|
||||
statements: Iterable[TLineNo],
|
||||
lines: Iterable[TLineNo],
|
||||
) -> list[tuple[TLineNo, TLineNo]]:
|
||||
"""Produce a list of ranges for `format_lines`."""
|
||||
statements = sorted(statements)
|
||||
lines = sorted(lines)
|
||||
|
||||
pairs = []
|
||||
start: TLineNo | None = None
|
||||
lidx = 0
|
||||
for stmt in statements:
|
||||
if lidx >= len(lines):
|
||||
break
|
||||
if stmt == lines[lidx]:
|
||||
lidx += 1
|
||||
if not start:
|
||||
start = stmt
|
||||
end = stmt
|
||||
elif start:
|
||||
pairs.append((start, end))
|
||||
start = None
|
||||
if start:
|
||||
pairs.append((start, end))
|
||||
return pairs
|
||||
|
||||
|
||||
def format_lines(
|
||||
statements: Iterable[TLineNo],
|
||||
lines: Iterable[TLineNo],
|
||||
arcs: Iterable[tuple[TLineNo, list[TLineNo]]] | None = None,
|
||||
) -> str:
|
||||
"""Nicely format a list of line numbers.
|
||||
|
||||
Format a list of line numbers for printing by coalescing groups of lines as
|
||||
long as the lines represent consecutive statements. This will coalesce
|
||||
even if there are gaps between statements.
|
||||
|
||||
For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
|
||||
`lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
|
||||
|
||||
Both `lines` and `statements` can be any iterable. All of the elements of
|
||||
`lines` must be in `statements`, and all of the values must be positive
|
||||
integers.
|
||||
|
||||
If `arcs` is provided, they are (start,[end,end,end]) pairs that will be
|
||||
included in the output as long as start isn't in `lines`.
|
||||
|
||||
"""
|
||||
line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)]
|
||||
if arcs is not None:
|
||||
line_exits = sorted(arcs)
|
||||
for line, exits in line_exits:
|
||||
for ex in sorted(exits):
|
||||
if line not in lines and ex not in lines:
|
||||
dest = ex if ex > 0 else "exit"
|
||||
line_items.append((line, f"{line}->{dest}"))
|
||||
|
||||
ret = ", ".join(t[-1] for t in sorted(line_items))
|
||||
return ret
|
||||
|
||||
|
||||
def should_fail_under(total: float, fail_under: float, precision: int) -> bool:
|
||||
"""Determine if a total should fail due to fail-under.
|
||||
|
||||
`total` is a float, the coverage measurement total. `fail_under` is the
|
||||
fail_under setting to compare with. `precision` is the number of digits
|
||||
to consider after the decimal point.
|
||||
|
||||
Returns True if the total should fail.
|
||||
|
||||
"""
|
||||
# We can never achieve higher than 100% coverage, or less than zero.
|
||||
if not (0 <= fail_under <= 100.0):
|
||||
msg = f"fail_under={fail_under} is invalid. Must be between 0 and 100."
|
||||
raise ConfigError(msg)
|
||||
|
||||
# Special case for fail_under=100, it must really be 100.
|
||||
if fail_under == 100.0 and total != 100.0:
|
||||
return True
|
||||
|
||||
return round(total, precision) < fail_under
|
||||
1153
venv/lib/python3.12/site-packages/coverage/sqldata.py
Normal file
1153
venv/lib/python3.12/site-packages/coverage/sqldata.py
Normal file
File diff suppressed because it is too large
Load Diff
239
venv/lib/python3.12/site-packages/coverage/sqlitedb.py
Normal file
239
venv/lib/python3.12/site-packages/coverage/sqlitedb.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""SQLite abstraction for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import re
|
||||
import sqlite3
|
||||
from collections.abc import Iterable, Iterator
|
||||
from typing import Any, cast
|
||||
|
||||
from coverage.debug import auto_repr, clipped_repr, exc_one_line
|
||||
from coverage.exceptions import DataError
|
||||
from coverage.types import TDebugCtl
|
||||
|
||||
|
||||
class SqliteDb:
|
||||
"""A simple abstraction over a SQLite database.
|
||||
|
||||
Use as a context manager, then you can use it like a
|
||||
:class:`python:sqlite3.Connection` object::
|
||||
|
||||
with SqliteDb(filename, debug_control) as db:
|
||||
with db.execute("select a, b from some_table") as cur:
|
||||
for a, b in cur:
|
||||
etc(a, b)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, filename: str, debug: TDebugCtl, no_disk: bool = False) -> None:
|
||||
self.debug = debug
|
||||
self.filename = filename
|
||||
self.no_disk = no_disk
|
||||
self.nest = 0
|
||||
self.con: sqlite3.Connection | None = None
|
||||
|
||||
__repr__ = auto_repr
|
||||
|
||||
def _connect(self) -> None:
|
||||
"""Connect to the db and do universal initialization."""
|
||||
if self.con is not None:
|
||||
return
|
||||
|
||||
# It can happen that Python switches threads while the tracer writes
|
||||
# data. The second thread will also try to write to the data,
|
||||
# effectively causing a nested context. However, given the idempotent
|
||||
# nature of the tracer operations, sharing a connection among threads
|
||||
# is not a problem.
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"Connecting to {self.filename!r}")
|
||||
try:
|
||||
# Use uri=True when connecting to memory URIs
|
||||
if self.filename.startswith("file:"):
|
||||
self.con = sqlite3.connect(self.filename, check_same_thread=False, uri=True)
|
||||
else:
|
||||
self.con = sqlite3.connect(self.filename, check_same_thread=False)
|
||||
except sqlite3.Error as exc:
|
||||
raise DataError(f"Couldn't use data file {self.filename!r}: {exc}") from exc
|
||||
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"Connected to {self.filename!r} as {self.con!r}")
|
||||
|
||||
self.con.create_function("REGEXP", 2, lambda txt, pat: re.search(txt, pat) is not None)
|
||||
|
||||
# Turning off journal_mode can speed up writing. It can't always be
|
||||
# disabled, so we have to be prepared for *-journal files elsewhere.
|
||||
# In Python 3.12+, we can change the config to allow journal_mode=off.
|
||||
if hasattr(sqlite3, "SQLITE_DBCONFIG_DEFENSIVE"):
|
||||
# Turn off defensive mode, so that journal_mode=off can succeed.
|
||||
self.con.setconfig( # type: ignore[attr-defined, unused-ignore]
|
||||
sqlite3.SQLITE_DBCONFIG_DEFENSIVE,
|
||||
False,
|
||||
)
|
||||
|
||||
# This pragma makes writing faster. It disables rollbacks, but we never need them.
|
||||
self.execute_void("pragma journal_mode=off")
|
||||
|
||||
# This pragma makes writing faster. It can fail in unusual situations
|
||||
# (https://github.com/nedbat/coveragepy/issues/1646), so use fail_ok=True
|
||||
# to keep things going.
|
||||
self.execute_void("pragma synchronous=off", fail_ok=True)
|
||||
|
||||
def close(self, force: bool = False) -> None:
|
||||
"""If needed, close the connection."""
|
||||
if self.con is not None:
|
||||
if force or not self.no_disk:
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"Closing {self.con!r} on {self.filename!r}")
|
||||
self.con.close()
|
||||
self.con = None
|
||||
|
||||
def __enter__(self) -> SqliteDb:
|
||||
if self.nest == 0:
|
||||
self._connect()
|
||||
assert self.con is not None
|
||||
self.con.__enter__()
|
||||
self.nest += 1
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback) -> None: # type: ignore[no-untyped-def]
|
||||
self.nest -= 1
|
||||
if self.nest == 0:
|
||||
try:
|
||||
assert self.con is not None
|
||||
self.con.__exit__(exc_type, exc_value, traceback)
|
||||
self.close()
|
||||
except Exception as exc:
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"EXCEPTION from __exit__: {exc_one_line(exc)}")
|
||||
raise DataError(f"Couldn't end data file {self.filename!r}: {exc}") from exc
|
||||
|
||||
def _execute(self, sql: str, parameters: Iterable[Any]) -> sqlite3.Cursor:
|
||||
"""Same as :meth:`python:sqlite3.Connection.execute`."""
|
||||
if self.debug.should("sql"):
|
||||
tail = f" with {parameters!r}" if parameters else ""
|
||||
self.debug.write(f"Executing {sql!r}{tail}")
|
||||
try:
|
||||
assert self.con is not None
|
||||
try:
|
||||
return self.con.execute(sql, parameters) # type: ignore[arg-type]
|
||||
except Exception:
|
||||
# In some cases, an error might happen that isn't really an
|
||||
# error. Try again immediately.
|
||||
# https://github.com/nedbat/coveragepy/issues/1010
|
||||
return self.con.execute(sql, parameters) # type: ignore[arg-type]
|
||||
except sqlite3.Error as exc:
|
||||
msg = str(exc)
|
||||
if not self.no_disk:
|
||||
try:
|
||||
# `execute` is the first thing we do with the database, so try
|
||||
# hard to provide useful hints if something goes wrong now.
|
||||
with open(self.filename, "rb") as bad_file:
|
||||
cov4_sig = b"!coverage.py: This is a private format"
|
||||
if bad_file.read(len(cov4_sig)) == cov4_sig:
|
||||
msg = (
|
||||
"Looks like a coverage 4.x data file. "
|
||||
+ "Are you mixing versions of coverage?"
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(f"EXCEPTION from execute: {exc_one_line(exc)}")
|
||||
raise DataError(f"Couldn't use data file {self.filename!r}: {msg}") from exc
|
||||
|
||||
@contextlib.contextmanager
|
||||
def execute(
|
||||
self,
|
||||
sql: str,
|
||||
parameters: Iterable[Any] = (),
|
||||
) -> Iterator[sqlite3.Cursor]:
|
||||
"""Context managed :meth:`python:sqlite3.Connection.execute`.
|
||||
|
||||
Use with a ``with`` statement to auto-close the returned cursor.
|
||||
"""
|
||||
cur = self._execute(sql, parameters)
|
||||
try:
|
||||
yield cur
|
||||
finally:
|
||||
cur.close()
|
||||
|
||||
def execute_void(self, sql: str, parameters: Iterable[Any] = (), fail_ok: bool = False) -> None:
|
||||
"""Same as :meth:`python:sqlite3.Connection.execute` when you don't need the cursor.
|
||||
|
||||
If `fail_ok` is True, then SQLite errors are ignored.
|
||||
"""
|
||||
try:
|
||||
# PyPy needs the .close() calls here, or sqlite gets twisted up:
|
||||
# https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
|
||||
self._execute(sql, parameters).close()
|
||||
except DataError:
|
||||
if not fail_ok:
|
||||
raise
|
||||
|
||||
def execute_for_rowid(self, sql: str, parameters: Iterable[Any] = ()) -> int:
|
||||
"""Like execute, but returns the lastrowid."""
|
||||
with self.execute(sql, parameters) as cur:
|
||||
assert cur.lastrowid is not None
|
||||
rowid: int = cur.lastrowid
|
||||
if self.debug.should("sqldata"):
|
||||
self.debug.write(f"Row id result: {rowid!r}")
|
||||
return rowid
|
||||
|
||||
def execute_one(self, sql: str, parameters: Iterable[Any] = ()) -> tuple[Any, ...] | None:
|
||||
"""Execute a statement and return the one row that results.
|
||||
|
||||
This is like execute(sql, parameters).fetchone(), except it is
|
||||
correct in reading the entire result set. This will raise an
|
||||
exception if more than one row results.
|
||||
|
||||
Returns a row, or None if there were no rows.
|
||||
"""
|
||||
with self.execute(sql, parameters) as cur:
|
||||
rows = list(cur)
|
||||
if len(rows) == 0:
|
||||
return None
|
||||
elif len(rows) == 1:
|
||||
return cast(tuple[Any, ...], rows[0])
|
||||
else:
|
||||
raise AssertionError(f"SQL {sql!r} shouldn't return {len(rows)} rows")
|
||||
|
||||
def _executemany(self, sql: str, data: list[Any]) -> sqlite3.Cursor:
|
||||
"""Same as :meth:`python:sqlite3.Connection.executemany`."""
|
||||
if self.debug.should("sql"):
|
||||
final = ":" if self.debug.should("sqldata") else ""
|
||||
self.debug.write(f"Executing many {sql!r} with {len(data)} rows{final}")
|
||||
if self.debug.should("sqldata"):
|
||||
for i, row in enumerate(data):
|
||||
self.debug.write(f"{i:4d}: {row!r}")
|
||||
assert self.con is not None
|
||||
try:
|
||||
return self.con.executemany(sql, data)
|
||||
except Exception:
|
||||
# In some cases, an error might happen that isn't really an
|
||||
# error. Try again immediately.
|
||||
# https://github.com/nedbat/coveragepy/issues/1010
|
||||
return self.con.executemany(sql, data)
|
||||
|
||||
def executemany_void(self, sql: str, data: list[Any]) -> None:
|
||||
"""Same as :meth:`python:sqlite3.Connection.executemany` when you don't need the cursor."""
|
||||
self._executemany(sql, data).close()
|
||||
|
||||
def executescript(self, script: str) -> None:
|
||||
"""Same as :meth:`python:sqlite3.Connection.executescript`."""
|
||||
if self.debug.should("sql"):
|
||||
self.debug.write(
|
||||
"Executing script with {} chars: {}".format(
|
||||
len(script),
|
||||
clipped_repr(script, 100),
|
||||
)
|
||||
)
|
||||
assert self.con is not None
|
||||
self.con.executescript(script).close()
|
||||
|
||||
def dump(self) -> str:
|
||||
"""Return a multi-line string, the SQL dump of the database."""
|
||||
assert self.con is not None
|
||||
return "\n".join(self.con.iterdump())
|
||||
461
venv/lib/python3.12/site-packages/coverage/sysmon.py
Normal file
461
venv/lib/python3.12/site-packages/coverage/sysmon.py
Normal file
@@ -0,0 +1,461 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Callback functions and support for sys.monitoring data collection."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
from dataclasses import dataclass
|
||||
from types import CodeType
|
||||
from typing import Any, Callable, NewType, Optional, cast
|
||||
|
||||
from coverage import env
|
||||
from coverage.bytecode import TBranchTrails, always_jumps, branch_trails
|
||||
from coverage.debug import short_filename, short_stack
|
||||
from coverage.misc import isolate_module
|
||||
from coverage.types import (
|
||||
AnyCallable,
|
||||
TFileDisposition,
|
||||
TLineNo,
|
||||
TOffset,
|
||||
Tracer,
|
||||
TShouldStartContextFn,
|
||||
TShouldTraceFn,
|
||||
TTraceData,
|
||||
TTraceFileData,
|
||||
TWarnFn,
|
||||
)
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
# pylint: disable=unused-argument
|
||||
|
||||
# $set_env.py: COVERAGE_SYSMON_LOG - Log sys.monitoring activity
|
||||
LOG = bool(int(os.getenv("COVERAGE_SYSMON_LOG", 0)))
|
||||
|
||||
# $set_env.py: COVERAGE_SYSMON_STATS - Collect sys.monitoring stats
|
||||
COLLECT_STATS = bool(int(os.getenv("COVERAGE_SYSMON_STATS", 0)))
|
||||
|
||||
# This module will be imported in all versions of Python, but only used in 3.12+
|
||||
# It will be type-checked for 3.12, but not for earlier versions.
|
||||
sys_monitoring = getattr(sys, "monitoring", None)
|
||||
|
||||
DISABLE_TYPE = NewType("DISABLE_TYPE", object)
|
||||
MonitorReturn = Optional[DISABLE_TYPE]
|
||||
DISABLE = cast(MonitorReturn, getattr(sys_monitoring, "DISABLE", None))
|
||||
|
||||
|
||||
if LOG: # pragma: debugging
|
||||
|
||||
class LoggingWrapper:
|
||||
"""Wrap a namespace to log all its functions."""
|
||||
|
||||
def __init__(self, wrapped: Any, namespace: str) -> None:
|
||||
self.wrapped = wrapped
|
||||
self.namespace = namespace
|
||||
|
||||
def __getattr__(self, name: str) -> Callable[..., Any]:
|
||||
def _wrapped(*args: Any, **kwargs: Any) -> Any:
|
||||
log(f"{self.namespace}.{name}{args}{kwargs}")
|
||||
return getattr(self.wrapped, name)(*args, **kwargs)
|
||||
|
||||
return _wrapped
|
||||
|
||||
sys_monitoring = LoggingWrapper(sys_monitoring, "sys.monitoring")
|
||||
assert sys_monitoring is not None
|
||||
|
||||
short_stack = functools.partial(
|
||||
short_stack,
|
||||
full=True,
|
||||
short_filenames=True,
|
||||
frame_ids=True,
|
||||
)
|
||||
seen_threads: set[int] = set()
|
||||
|
||||
def log(msg: str) -> None:
|
||||
"""Write a message to our detailed debugging log(s)."""
|
||||
# Thread ids are reused across processes?
|
||||
# Make a shorter number more likely to be unique.
|
||||
pid = os.getpid()
|
||||
tid = cast(int, threading.current_thread().ident)
|
||||
tslug = f"{(pid * tid) % 9_999_991:07d}"
|
||||
if tid not in seen_threads:
|
||||
seen_threads.add(tid)
|
||||
log(f"New thread {tid} {tslug}:\n{short_stack()}")
|
||||
# log_seq = int(os.getenv("PANSEQ", "0"))
|
||||
# root = f"/tmp/pan.{log_seq:03d}"
|
||||
for filename in [
|
||||
"/tmp/foo.out",
|
||||
# f"{root}.out",
|
||||
# f"{root}-{pid}.out",
|
||||
# f"{root}-{pid}-{tslug}.out",
|
||||
]:
|
||||
with open(filename, "a", encoding="utf-8") as f:
|
||||
try:
|
||||
print(f"{pid}:{tslug}: {msg}", file=f, flush=True)
|
||||
except UnicodeError:
|
||||
print(f"{pid}:{tslug}: {ascii(msg)}", file=f, flush=True)
|
||||
|
||||
def arg_repr(arg: Any) -> str:
|
||||
"""Make a customized repr for logged values."""
|
||||
if isinstance(arg, CodeType):
|
||||
return (
|
||||
f"<code @{id(arg):#x}"
|
||||
+ f" name={arg.co_name},"
|
||||
+ f" file={short_filename(arg.co_filename)!r}#{arg.co_firstlineno}>"
|
||||
)
|
||||
return repr(arg)
|
||||
|
||||
def panopticon(*names: str | None) -> AnyCallable:
|
||||
"""Decorate a function to log its calls."""
|
||||
|
||||
def _decorator(method: AnyCallable) -> AnyCallable:
|
||||
@functools.wraps(method)
|
||||
def _wrapped(self: Any, *args: Any) -> Any:
|
||||
try:
|
||||
# log(f"{method.__name__}() stack:\n{short_stack()}")
|
||||
args_reprs = []
|
||||
for name, arg in zip(names, args):
|
||||
if name is None:
|
||||
continue
|
||||
args_reprs.append(f"{name}={arg_repr(arg)}")
|
||||
log(f"{id(self):#x}:{method.__name__}({', '.join(args_reprs)})")
|
||||
ret = method(self, *args)
|
||||
# log(f" end {id(self):#x}:{method.__name__}({', '.join(args_reprs)})")
|
||||
return ret
|
||||
except Exception as exc:
|
||||
log(f"!!{exc.__class__.__name__}: {exc}")
|
||||
if 1:
|
||||
log("".join(traceback.format_exception(exc)))
|
||||
try:
|
||||
assert sys_monitoring is not None
|
||||
sys_monitoring.set_events(sys.monitoring.COVERAGE_ID, 0)
|
||||
except ValueError:
|
||||
# We might have already shut off monitoring.
|
||||
log("oops, shutting off events with disabled tool id")
|
||||
raise
|
||||
|
||||
return _wrapped
|
||||
|
||||
return _decorator
|
||||
|
||||
else:
|
||||
|
||||
def log(msg: str) -> None:
|
||||
"""Write a message to our detailed debugging log(s), but not really."""
|
||||
|
||||
def panopticon(*names: str | None) -> AnyCallable:
|
||||
"""Decorate a function to log its calls, but not really."""
|
||||
|
||||
def _decorator(meth: AnyCallable) -> AnyCallable:
|
||||
return meth
|
||||
|
||||
return _decorator
|
||||
|
||||
|
||||
@dataclass
|
||||
class CodeInfo:
|
||||
"""The information we want about each code object."""
|
||||
|
||||
tracing: bool
|
||||
file_data: TTraceFileData | None
|
||||
byte_to_line: dict[TOffset, TLineNo] | None
|
||||
|
||||
# Keys are start instruction offsets for branches.
|
||||
# Values are dicts:
|
||||
# {
|
||||
# (from_line, to_line): {offset, offset, ...},
|
||||
# (from_line, to_line): {offset, offset, ...},
|
||||
# }
|
||||
branch_trails: TBranchTrails
|
||||
|
||||
# Always-jumps are bytecode offsets that do no work but move
|
||||
# to another offset.
|
||||
always_jumps: dict[TOffset, TOffset]
|
||||
|
||||
|
||||
def bytes_to_lines(code: CodeType) -> dict[TOffset, TLineNo]:
|
||||
"""Make a dict mapping byte code offsets to line numbers."""
|
||||
b2l = {}
|
||||
for bstart, bend, lineno in code.co_lines():
|
||||
if lineno is not None:
|
||||
for boffset in range(bstart, bend, 2):
|
||||
b2l[boffset] = lineno
|
||||
return b2l
|
||||
|
||||
|
||||
class SysMonitor(Tracer):
|
||||
"""Python implementation of the raw data tracer for PEP669 implementations."""
|
||||
|
||||
# One of these will be used across threads. Be careful.
|
||||
|
||||
def __init__(self, tool_id: int) -> None:
|
||||
# Attributes set from the collector:
|
||||
self.data: TTraceData
|
||||
self.trace_arcs = False
|
||||
self.should_trace: TShouldTraceFn
|
||||
self.should_trace_cache: dict[str, TFileDisposition | None]
|
||||
# TODO: should_start_context and switch_context are unused!
|
||||
# Change tests/testenv.py:DYN_CONTEXTS when this is updated.
|
||||
self.should_start_context: TShouldStartContextFn | None = None
|
||||
self.switch_context: Callable[[str | None], None] | None = None
|
||||
self.lock_data: Callable[[], None]
|
||||
self.unlock_data: Callable[[], None]
|
||||
# TODO: warn is unused.
|
||||
self.warn: TWarnFn
|
||||
|
||||
self.myid = tool_id
|
||||
|
||||
# Map id(code_object) -> CodeInfo
|
||||
self.code_infos: dict[int, CodeInfo] = {}
|
||||
# A list of code_objects, just to keep them alive so that id's are
|
||||
# useful as identity.
|
||||
self.code_objects: list[CodeType] = []
|
||||
self.sysmon_on = False
|
||||
self.lock = threading.Lock()
|
||||
|
||||
self.stats: dict[str, int] | None = None
|
||||
if COLLECT_STATS:
|
||||
self.stats = dict.fromkeys(
|
||||
"starts start_tracing returns line_lines line_arcs branches branch_trails".split(),
|
||||
0,
|
||||
)
|
||||
|
||||
self._activity = False
|
||||
|
||||
def __repr__(self) -> str:
|
||||
points = sum(len(v) for v in self.data.values())
|
||||
files = len(self.data)
|
||||
return f"<SysMonitor at {id(self):#x}: {points} data points in {files} files>"
|
||||
|
||||
@panopticon()
|
||||
def start(self) -> None:
|
||||
"""Start this Tracer."""
|
||||
with self.lock:
|
||||
assert sys_monitoring is not None
|
||||
sys_monitoring.use_tool_id(self.myid, "coverage.py")
|
||||
register = functools.partial(sys_monitoring.register_callback, self.myid)
|
||||
events = sys.monitoring.events
|
||||
|
||||
sys_monitoring.set_events(self.myid, events.PY_START)
|
||||
register(events.PY_START, self.sysmon_py_start)
|
||||
if self.trace_arcs:
|
||||
register(events.PY_RETURN, self.sysmon_py_return)
|
||||
register(events.LINE, self.sysmon_line_arcs)
|
||||
if env.PYBEHAVIOR.branch_right_left:
|
||||
register(
|
||||
events.BRANCH_RIGHT, # type:ignore[attr-defined]
|
||||
self.sysmon_branch_either,
|
||||
)
|
||||
register(
|
||||
events.BRANCH_LEFT, # type:ignore[attr-defined]
|
||||
self.sysmon_branch_either,
|
||||
)
|
||||
else:
|
||||
register(events.LINE, self.sysmon_line_lines)
|
||||
sys_monitoring.restart_events()
|
||||
self.sysmon_on = True
|
||||
|
||||
@panopticon()
|
||||
def stop(self) -> None:
|
||||
"""Stop this Tracer."""
|
||||
with self.lock:
|
||||
if not self.sysmon_on:
|
||||
# In forking situations, we might try to stop when we are not
|
||||
# started. Do nothing in that case.
|
||||
return
|
||||
assert sys_monitoring is not None
|
||||
sys_monitoring.set_events(self.myid, 0)
|
||||
self.sysmon_on = False
|
||||
sys_monitoring.free_tool_id(self.myid)
|
||||
|
||||
@panopticon()
|
||||
def post_fork(self) -> None:
|
||||
"""The process has forked, clean up as needed."""
|
||||
self.stop()
|
||||
|
||||
def activity(self) -> bool:
|
||||
"""Has there been any activity?"""
|
||||
return self._activity
|
||||
|
||||
def reset_activity(self) -> None:
|
||||
"""Reset the activity() flag."""
|
||||
self._activity = False
|
||||
|
||||
def get_stats(self) -> dict[str, int] | None:
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
return self.stats
|
||||
|
||||
@panopticon("code", "@")
|
||||
def sysmon_py_start(self, code: CodeType, instruction_offset: TOffset) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.PY_START events."""
|
||||
# Entering a new frame. Decide if we should trace in this file.
|
||||
self._activity = True
|
||||
if self.stats is not None:
|
||||
self.stats["starts"] += 1
|
||||
|
||||
code_info = self.code_infos.get(id(code))
|
||||
tracing_code: bool | None = None
|
||||
file_data: TTraceFileData | None = None
|
||||
if code_info is not None:
|
||||
tracing_code = code_info.tracing
|
||||
file_data = code_info.file_data
|
||||
|
||||
if tracing_code is None:
|
||||
filename = code.co_filename
|
||||
disp = self.should_trace_cache.get(filename)
|
||||
if disp is None:
|
||||
frame = inspect.currentframe()
|
||||
if frame is not None:
|
||||
frame = inspect.currentframe().f_back # type: ignore[union-attr]
|
||||
if LOG:
|
||||
# @panopticon adds a frame.
|
||||
frame = frame.f_back # type: ignore[union-attr]
|
||||
disp = self.should_trace(filename, frame) # type: ignore[arg-type]
|
||||
self.should_trace_cache[filename] = disp
|
||||
|
||||
tracing_code = disp.trace
|
||||
if tracing_code:
|
||||
tracename = disp.source_filename
|
||||
assert tracename is not None
|
||||
self.lock_data()
|
||||
try:
|
||||
if tracename not in self.data:
|
||||
self.data[tracename] = set()
|
||||
finally:
|
||||
self.unlock_data()
|
||||
file_data = self.data[tracename]
|
||||
b2l = bytes_to_lines(code)
|
||||
else:
|
||||
file_data = None
|
||||
b2l = None
|
||||
|
||||
code_info = CodeInfo(
|
||||
tracing=tracing_code,
|
||||
file_data=file_data,
|
||||
byte_to_line=b2l,
|
||||
branch_trails={},
|
||||
always_jumps={},
|
||||
)
|
||||
self.code_infos[id(code)] = code_info
|
||||
self.code_objects.append(code)
|
||||
|
||||
if tracing_code:
|
||||
if self.stats is not None:
|
||||
self.stats["start_tracing"] += 1
|
||||
events = sys.monitoring.events
|
||||
with self.lock:
|
||||
if self.sysmon_on:
|
||||
assert sys_monitoring is not None
|
||||
local_events = events.PY_RETURN | events.PY_RESUME | events.LINE
|
||||
if self.trace_arcs:
|
||||
assert env.PYBEHAVIOR.branch_right_left
|
||||
local_events |= (
|
||||
events.BRANCH_RIGHT # type:ignore[attr-defined]
|
||||
| events.BRANCH_LEFT # type:ignore[attr-defined]
|
||||
)
|
||||
sys_monitoring.set_local_events(self.myid, code, local_events)
|
||||
|
||||
return DISABLE
|
||||
|
||||
@panopticon("code", "@", None)
|
||||
def sysmon_py_return(
|
||||
self,
|
||||
code: CodeType,
|
||||
instruction_offset: TOffset,
|
||||
retval: object,
|
||||
) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.PY_RETURN events for branch coverage."""
|
||||
if self.stats is not None:
|
||||
self.stats["returns"] += 1
|
||||
code_info = self.code_infos.get(id(code))
|
||||
# code_info is not None and code_info.file_data is not None, since we
|
||||
# wouldn't have enabled this event if they were.
|
||||
last_line = code_info.byte_to_line[instruction_offset] # type: ignore
|
||||
if last_line is not None:
|
||||
arc = (last_line, -code.co_firstlineno)
|
||||
code_info.file_data.add(arc) # type: ignore
|
||||
# log(f"adding {arc=}")
|
||||
return DISABLE
|
||||
|
||||
@panopticon("code", "line")
|
||||
def sysmon_line_lines(self, code: CodeType, line_number: TLineNo) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.LINE events for line coverage."""
|
||||
if self.stats is not None:
|
||||
self.stats["line_lines"] += 1
|
||||
code_info = self.code_infos.get(id(code))
|
||||
# It should be true that code_info is not None and code_info.file_data
|
||||
# is not None, since we wouldn't have enabled this event if they were.
|
||||
# But somehow code_info can be None here, so we have to check.
|
||||
if code_info is not None and code_info.file_data is not None:
|
||||
code_info.file_data.add(line_number) # type: ignore
|
||||
# log(f"adding {line_number=}")
|
||||
return DISABLE
|
||||
|
||||
@panopticon("code", "line")
|
||||
def sysmon_line_arcs(self, code: CodeType, line_number: TLineNo) -> MonitorReturn:
|
||||
"""Handle sys.monitoring.events.LINE events for branch coverage."""
|
||||
if self.stats is not None:
|
||||
self.stats["line_arcs"] += 1
|
||||
code_info = self.code_infos[id(code)]
|
||||
# code_info is not None and code_info.file_data is not None, since we
|
||||
# wouldn't have enabled this event if they were.
|
||||
arc = (line_number, line_number)
|
||||
code_info.file_data.add(arc) # type: ignore
|
||||
# log(f"adding {arc=}")
|
||||
return DISABLE
|
||||
|
||||
@panopticon("code", "@", "@")
|
||||
def sysmon_branch_either(
|
||||
self, code: CodeType, instruction_offset: TOffset, destination_offset: TOffset
|
||||
) -> MonitorReturn:
|
||||
"""Handle BRANCH_RIGHT and BRANCH_LEFT events."""
|
||||
if self.stats is not None:
|
||||
self.stats["branches"] += 1
|
||||
code_info = self.code_infos[id(code)]
|
||||
# code_info is not None and code_info.file_data is not None, since we
|
||||
# wouldn't have enabled this event if they were.
|
||||
if not code_info.branch_trails:
|
||||
if self.stats is not None:
|
||||
self.stats["branch_trails"] += 1
|
||||
code_info.branch_trails = branch_trails(code)
|
||||
code_info.always_jumps = always_jumps(code)
|
||||
# log(f"branch_trails for {code}:\n {code_info.branch_trails}")
|
||||
added_arc = False
|
||||
dest_info = code_info.branch_trails.get(instruction_offset)
|
||||
|
||||
# Re-map the destination offset through always-jumps to deal with NOP etc.
|
||||
dests = {destination_offset}
|
||||
while (dest := code_info.always_jumps.get(destination_offset)) is not None:
|
||||
destination_offset = dest
|
||||
dests.add(destination_offset)
|
||||
|
||||
# log(f"{dest_info = }")
|
||||
if dest_info is not None:
|
||||
for arc, offsets in dest_info.items():
|
||||
if arc is None:
|
||||
continue
|
||||
if dests & offsets:
|
||||
code_info.file_data.add(arc) # type: ignore
|
||||
# log(f"adding {arc=}")
|
||||
added_arc = True
|
||||
break
|
||||
|
||||
if not added_arc:
|
||||
# This could be an exception jumping from line to line.
|
||||
assert code_info.byte_to_line is not None
|
||||
l1 = code_info.byte_to_line[instruction_offset]
|
||||
l2 = code_info.byte_to_line.get(destination_offset)
|
||||
if l2 is not None and l1 != l2:
|
||||
arc = (l1, l2)
|
||||
code_info.file_data.add(arc) # type: ignore
|
||||
# log(f"adding unforeseen {arc=}")
|
||||
|
||||
return DISABLE
|
||||
306
venv/lib/python3.12/site-packages/coverage/templite.py
Normal file
306
venv/lib/python3.12/site-packages/coverage/templite.py
Normal file
@@ -0,0 +1,306 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""A simple Python template renderer, for a nano-subset of Django syntax.
|
||||
|
||||
For a detailed discussion of this code, see this chapter from 500 Lines:
|
||||
http://aosabook.org/en/500L/a-template-engine.html
|
||||
|
||||
"""
|
||||
|
||||
# Coincidentally named the same as http://code.activestate.com/recipes/496702/
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any, Callable, NoReturn, cast
|
||||
|
||||
|
||||
class TempliteSyntaxError(ValueError):
|
||||
"""Raised when a template has a syntax error."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class TempliteValueError(ValueError):
|
||||
"""Raised when an expression won't evaluate in a template."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CodeBuilder:
|
||||
"""Build source code conveniently."""
|
||||
|
||||
def __init__(self, indent: int = 0) -> None:
|
||||
self.code: list[str | CodeBuilder] = []
|
||||
self.indent_level = indent
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "".join(str(c) for c in self.code)
|
||||
|
||||
def add_line(self, line: str) -> None:
|
||||
"""Add a line of source to the code.
|
||||
|
||||
Indentation and newline will be added for you, don't provide them.
|
||||
|
||||
"""
|
||||
self.code.extend([" " * self.indent_level, line, "\n"])
|
||||
|
||||
def add_section(self) -> CodeBuilder:
|
||||
"""Add a section, a sub-CodeBuilder."""
|
||||
section = CodeBuilder(self.indent_level)
|
||||
self.code.append(section)
|
||||
return section
|
||||
|
||||
INDENT_STEP = 4 # PEP8 says so!
|
||||
|
||||
def indent(self) -> None:
|
||||
"""Increase the current indent for following lines."""
|
||||
self.indent_level += self.INDENT_STEP
|
||||
|
||||
def dedent(self) -> None:
|
||||
"""Decrease the current indent for following lines."""
|
||||
self.indent_level -= self.INDENT_STEP
|
||||
|
||||
def get_globals(self) -> dict[str, Any]:
|
||||
"""Execute the code, and return a dict of globals it defines."""
|
||||
# A check that the caller really finished all the blocks they started.
|
||||
assert self.indent_level == 0
|
||||
# Get the Python source as a single string.
|
||||
python_source = str(self)
|
||||
# Execute the source, defining globals, and return them.
|
||||
global_namespace: dict[str, Any] = {}
|
||||
exec(python_source, global_namespace)
|
||||
return global_namespace
|
||||
|
||||
|
||||
class Templite:
|
||||
"""A simple template renderer, for a nano-subset of Django syntax.
|
||||
|
||||
Supported constructs are extended variable access::
|
||||
|
||||
{{var.modifier.modifier|filter|filter}}
|
||||
|
||||
loops::
|
||||
|
||||
{% for var in list %}...{% endfor %}
|
||||
|
||||
and ifs::
|
||||
|
||||
{% if var %}...{% endif %}
|
||||
|
||||
Comments are within curly-hash markers::
|
||||
|
||||
{# This will be ignored #}
|
||||
|
||||
Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped
|
||||
and joined. Be careful, this could join words together!
|
||||
|
||||
Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`),
|
||||
which will collapse the white space following the tag.
|
||||
|
||||
Construct a Templite with the template text, then use `render` against a
|
||||
dictionary context to create a finished string::
|
||||
|
||||
templite = Templite('''
|
||||
<h1>Hello {{name|upper}}!</h1>
|
||||
{% for topic in topics %}
|
||||
<p>You are interested in {{topic}}.</p>
|
||||
{% endif %}
|
||||
''',
|
||||
{"upper": str.upper},
|
||||
)
|
||||
text = templite.render({
|
||||
"name": "Ned",
|
||||
"topics": ["Python", "Geometry", "Juggling"],
|
||||
})
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, text: str, *contexts: dict[str, Any]) -> None:
|
||||
"""Construct a Templite with the given `text`.
|
||||
|
||||
`contexts` are dictionaries of values to use for future renderings.
|
||||
These are good for filters and global values.
|
||||
|
||||
"""
|
||||
self.context = {}
|
||||
for context in contexts:
|
||||
self.context.update(context)
|
||||
|
||||
self.all_vars: set[str] = set()
|
||||
self.loop_vars: set[str] = set()
|
||||
|
||||
# We construct a function in source form, then compile it and hold onto
|
||||
# it, and execute it to render the template.
|
||||
code = CodeBuilder()
|
||||
|
||||
code.add_line("def render_function(context, do_dots):")
|
||||
code.indent()
|
||||
vars_code = code.add_section()
|
||||
code.add_line("result = []")
|
||||
code.add_line("append_result = result.append")
|
||||
code.add_line("extend_result = result.extend")
|
||||
code.add_line("to_str = str")
|
||||
|
||||
buffered: list[str] = []
|
||||
|
||||
def flush_output() -> None:
|
||||
"""Force `buffered` to the code builder."""
|
||||
if len(buffered) == 1:
|
||||
code.add_line("append_result(%s)" % buffered[0])
|
||||
elif len(buffered) > 1:
|
||||
code.add_line("extend_result([%s])" % ", ".join(buffered))
|
||||
del buffered[:]
|
||||
|
||||
ops_stack = []
|
||||
|
||||
# Split the text to form a list of tokens.
|
||||
tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
|
||||
|
||||
squash = in_joined = False
|
||||
|
||||
for token in tokens:
|
||||
if token.startswith("{"):
|
||||
start, end = 2, -2
|
||||
squash = (token[-3] == "-") # fmt: skip
|
||||
if squash:
|
||||
end = -3
|
||||
|
||||
if token.startswith("{#"):
|
||||
# Comment: ignore it and move on.
|
||||
continue
|
||||
elif token.startswith("{{"):
|
||||
# An expression to evaluate.
|
||||
expr = self._expr_code(token[start:end].strip())
|
||||
buffered.append("to_str(%s)" % expr)
|
||||
else:
|
||||
# token.startswith("{%")
|
||||
# Action tag: split into words and parse further.
|
||||
flush_output()
|
||||
|
||||
words = token[start:end].strip().split()
|
||||
if words[0] == "if":
|
||||
# An if statement: evaluate the expression to determine if.
|
||||
if len(words) != 2:
|
||||
self._syntax_error("Don't understand if", token)
|
||||
ops_stack.append("if")
|
||||
code.add_line("if %s:" % self._expr_code(words[1]))
|
||||
code.indent()
|
||||
elif words[0] == "for":
|
||||
# A loop: iterate over expression result.
|
||||
if len(words) != 4 or words[2] != "in":
|
||||
self._syntax_error("Don't understand for", token)
|
||||
ops_stack.append("for")
|
||||
self._variable(words[1], self.loop_vars)
|
||||
code.add_line(
|
||||
f"for c_{words[1]} in {self._expr_code(words[3])}:",
|
||||
)
|
||||
code.indent()
|
||||
elif words[0] == "joined":
|
||||
ops_stack.append("joined")
|
||||
in_joined = True
|
||||
elif words[0].startswith("end"):
|
||||
# Endsomething. Pop the ops stack.
|
||||
if len(words) != 1:
|
||||
self._syntax_error("Don't understand end", token)
|
||||
end_what = words[0][3:]
|
||||
if not ops_stack:
|
||||
self._syntax_error("Too many ends", token)
|
||||
start_what = ops_stack.pop()
|
||||
if start_what != end_what:
|
||||
self._syntax_error("Mismatched end tag", end_what)
|
||||
if end_what == "joined":
|
||||
in_joined = False
|
||||
else:
|
||||
code.dedent()
|
||||
else:
|
||||
self._syntax_error("Don't understand tag", words[0])
|
||||
else:
|
||||
# Literal content. If it isn't empty, output it.
|
||||
if in_joined:
|
||||
token = re.sub(r"\s*\n\s*", "", token.strip())
|
||||
elif squash:
|
||||
token = token.lstrip()
|
||||
if token:
|
||||
buffered.append(repr(token))
|
||||
|
||||
if ops_stack:
|
||||
self._syntax_error("Unmatched action tag", ops_stack[-1])
|
||||
|
||||
flush_output()
|
||||
|
||||
for var_name in self.all_vars - self.loop_vars:
|
||||
vars_code.add_line(f"c_{var_name} = context[{var_name!r}]")
|
||||
|
||||
code.add_line("return ''.join(result)")
|
||||
code.dedent()
|
||||
self._render_function = cast(
|
||||
Callable[
|
||||
[dict[str, Any], Callable[..., Any]],
|
||||
str,
|
||||
],
|
||||
code.get_globals()["render_function"],
|
||||
)
|
||||
|
||||
def _expr_code(self, expr: str) -> str:
|
||||
"""Generate a Python expression for `expr`."""
|
||||
if "|" in expr:
|
||||
pipes = expr.split("|")
|
||||
code = self._expr_code(pipes[0])
|
||||
for func in pipes[1:]:
|
||||
self._variable(func, self.all_vars)
|
||||
code = f"c_{func}({code})"
|
||||
elif "." in expr:
|
||||
dots = expr.split(".")
|
||||
code = self._expr_code(dots[0])
|
||||
args = ", ".join(repr(d) for d in dots[1:])
|
||||
code = f"do_dots({code}, {args})"
|
||||
else:
|
||||
self._variable(expr, self.all_vars)
|
||||
code = "c_%s" % expr
|
||||
return code
|
||||
|
||||
def _syntax_error(self, msg: str, thing: Any) -> NoReturn:
|
||||
"""Raise a syntax error using `msg`, and showing `thing`."""
|
||||
raise TempliteSyntaxError(f"{msg}: {thing!r}")
|
||||
|
||||
def _variable(self, name: str, vars_set: set[str]) -> None:
|
||||
"""Track that `name` is used as a variable.
|
||||
|
||||
Adds the name to `vars_set`, a set of variable names.
|
||||
|
||||
Raises an syntax error if `name` is not a valid name.
|
||||
|
||||
"""
|
||||
if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
|
||||
self._syntax_error("Not a valid name", name)
|
||||
vars_set.add(name)
|
||||
|
||||
def render(self, context: dict[str, Any] | None = None) -> str:
|
||||
"""Render this template by applying it to `context`.
|
||||
|
||||
`context` is a dictionary of values to use in this rendering.
|
||||
|
||||
"""
|
||||
# Make the complete context we'll use.
|
||||
render_context = dict(self.context)
|
||||
if context:
|
||||
render_context.update(context)
|
||||
return self._render_function(render_context, self._do_dots)
|
||||
|
||||
def _do_dots(self, value: Any, *dots: str) -> Any:
|
||||
"""Evaluate dotted expressions at run-time."""
|
||||
for dot in dots:
|
||||
try:
|
||||
value = getattr(value, dot)
|
||||
except AttributeError:
|
||||
try:
|
||||
value = value[dot]
|
||||
except (TypeError, KeyError) as exc:
|
||||
raise TempliteValueError(
|
||||
f"Couldn't evaluate {value!r}.{dot}",
|
||||
) from exc
|
||||
if callable(value):
|
||||
value = value()
|
||||
return value
|
||||
210
venv/lib/python3.12/site-packages/coverage/tomlconfig.py
Normal file
210
venv/lib/python3.12/site-packages/coverage/tomlconfig.py
Normal file
@@ -0,0 +1,210 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""TOML configuration support for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
from collections.abc import Iterable
|
||||
from typing import Any, Callable, TypeVar
|
||||
|
||||
from coverage import config, env
|
||||
from coverage.exceptions import ConfigError
|
||||
from coverage.misc import import_third_party, isolate_module, substitute_variables
|
||||
from coverage.types import TConfigSectionOut, TConfigValueOut
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
if env.PYVERSION >= (3, 11, 0, "alpha", 7):
|
||||
import tomllib # pylint: disable=import-error
|
||||
|
||||
has_tomllib = True
|
||||
else:
|
||||
# TOML support on Python 3.10 and below is an install-time extra option.
|
||||
tomllib, has_tomllib = import_third_party("tomli")
|
||||
|
||||
|
||||
class TomlDecodeError(Exception):
|
||||
"""An exception class that exists even when toml isn't installed."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
TWant = TypeVar("TWant")
|
||||
|
||||
|
||||
class TomlConfigParser:
|
||||
"""TOML file reading with the interface of HandyConfigParser."""
|
||||
|
||||
# This class has the same interface as config.HandyConfigParser, no
|
||||
# need for docstrings.
|
||||
# pylint: disable=missing-function-docstring
|
||||
|
||||
def __init__(self, our_file: bool) -> None:
|
||||
self.our_file = our_file
|
||||
self.data: dict[str, Any] = {}
|
||||
|
||||
def read(self, filenames: Iterable[str]) -> list[str]:
|
||||
# RawConfigParser takes a filename or list of filenames, but we only
|
||||
# ever call this with a single filename.
|
||||
assert isinstance(filenames, (bytes, str, os.PathLike))
|
||||
filename = os.fspath(filenames)
|
||||
|
||||
try:
|
||||
with open(filename, encoding="utf-8") as fp:
|
||||
toml_text = fp.read()
|
||||
except OSError:
|
||||
return []
|
||||
if has_tomllib:
|
||||
try:
|
||||
self.data = tomllib.loads(toml_text)
|
||||
except tomllib.TOMLDecodeError as err:
|
||||
raise TomlDecodeError(str(err)) from err
|
||||
return [filename]
|
||||
else:
|
||||
has_toml = re.search(r"^\[tool\.coverage(\.|])", toml_text, flags=re.MULTILINE)
|
||||
if self.our_file or has_toml:
|
||||
# Looks like they meant to read TOML, but we can't read it.
|
||||
msg = "Can't read {!r} without TOML support. Install with [toml] extra"
|
||||
raise ConfigError(msg.format(filename))
|
||||
return []
|
||||
|
||||
def _get_section(self, section: str) -> tuple[str | None, TConfigSectionOut | None]:
|
||||
"""Get a section from the data.
|
||||
|
||||
Arguments:
|
||||
section (str): A section name, which can be dotted.
|
||||
|
||||
Returns:
|
||||
name (str): the actual name of the section that was found, if any,
|
||||
or None.
|
||||
data (str): the dict of data in the section, or None if not found.
|
||||
|
||||
"""
|
||||
prefixes = ["tool.coverage."]
|
||||
for prefix in prefixes:
|
||||
real_section = prefix + section
|
||||
parts = real_section.split(".")
|
||||
try:
|
||||
data = self.data[parts[0]]
|
||||
for part in parts[1:]:
|
||||
data = data[part]
|
||||
except KeyError:
|
||||
continue
|
||||
break
|
||||
else:
|
||||
return None, None
|
||||
return real_section, data
|
||||
|
||||
def _get(self, section: str, option: str) -> tuple[str, TConfigValueOut]:
|
||||
"""Like .get, but returns the real section name and the value."""
|
||||
name, data = self._get_section(section)
|
||||
if data is None:
|
||||
raise ConfigError(f"No section: {section!r}")
|
||||
assert name is not None
|
||||
try:
|
||||
value = data[option]
|
||||
except KeyError:
|
||||
raise ConfigError(f"No option {option!r} in section: {name!r}") from None
|
||||
return name, value
|
||||
|
||||
def _get_single(self, section: str, option: str) -> Any:
|
||||
"""Get a single-valued option.
|
||||
|
||||
Performs environment substitution if the value is a string. Other types
|
||||
will be converted later as needed.
|
||||
"""
|
||||
name, value = self._get(section, option)
|
||||
if isinstance(value, str):
|
||||
value = substitute_variables(value, os.environ)
|
||||
return name, value
|
||||
|
||||
def has_option(self, section: str, option: str) -> bool:
|
||||
_, data = self._get_section(section)
|
||||
if data is None:
|
||||
return False
|
||||
return option in data
|
||||
|
||||
def real_section(self, section: str) -> str | None:
|
||||
name, _ = self._get_section(section)
|
||||
return name
|
||||
|
||||
def has_section(self, section: str) -> bool:
|
||||
name, _ = self._get_section(section)
|
||||
return bool(name)
|
||||
|
||||
def options(self, section: str) -> list[str]:
|
||||
_, data = self._get_section(section)
|
||||
if data is None:
|
||||
raise ConfigError(f"No section: {section!r}")
|
||||
return list(data.keys())
|
||||
|
||||
def get_section(self, section: str) -> TConfigSectionOut:
|
||||
_, data = self._get_section(section)
|
||||
return data or {}
|
||||
|
||||
def get(self, section: str, option: str) -> Any:
|
||||
_, value = self._get_single(section, option)
|
||||
return value
|
||||
|
||||
def _check_type(
|
||||
self,
|
||||
section: str,
|
||||
option: str,
|
||||
value: Any,
|
||||
type_: type[TWant],
|
||||
converter: Callable[[Any], TWant] | None,
|
||||
type_desc: str,
|
||||
) -> TWant:
|
||||
"""Check that `value` has the type we want, converting if needed.
|
||||
|
||||
Returns the resulting value of the desired type.
|
||||
"""
|
||||
if isinstance(value, type_):
|
||||
return value
|
||||
if isinstance(value, str) and converter is not None:
|
||||
try:
|
||||
return converter(value)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Option [{section}]{option} couldn't convert to {type_desc}: {value!r}",
|
||||
) from e
|
||||
raise ValueError(
|
||||
f"Option [{section}]{option} is not {type_desc}: {value!r}",
|
||||
)
|
||||
|
||||
def getboolean(self, section: str, option: str) -> bool:
|
||||
name, value = self._get_single(section, option)
|
||||
bool_strings = {"true": True, "false": False}
|
||||
return self._check_type(name, option, value, bool, bool_strings.__getitem__, "a boolean")
|
||||
|
||||
def getfile(self, section: str, option: str) -> str:
|
||||
_, value = self._get_single(section, option)
|
||||
return config.process_file_value(value)
|
||||
|
||||
def _get_list(self, section: str, option: str) -> tuple[str, list[str]]:
|
||||
"""Get a list of strings, substituting environment variables in the elements."""
|
||||
name, values = self._get(section, option)
|
||||
values = self._check_type(name, option, values, list, None, "a list")
|
||||
values = [substitute_variables(value, os.environ) for value in values]
|
||||
return name, values
|
||||
|
||||
def getlist(self, section: str, option: str) -> list[str]:
|
||||
_, values = self._get_list(section, option)
|
||||
return values
|
||||
|
||||
def getregexlist(self, section: str, option: str) -> list[str]:
|
||||
name, values = self._get_list(section, option)
|
||||
return config.process_regexlist(name, option, values)
|
||||
|
||||
def getint(self, section: str, option: str) -> int:
|
||||
name, value = self._get_single(section, option)
|
||||
return self._check_type(name, option, value, int, int, "an integer")
|
||||
|
||||
def getfloat(self, section: str, option: str) -> float:
|
||||
name, value = self._get_single(section, option)
|
||||
if isinstance(value, int):
|
||||
value = float(value)
|
||||
return self._check_type(name, option, value, float, float, "a float")
|
||||
Binary file not shown.
43
venv/lib/python3.12/site-packages/coverage/tracer.pyi
Normal file
43
venv/lib/python3.12/site-packages/coverage/tracer.pyi
Normal file
@@ -0,0 +1,43 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""Typing information for the constructs from our .c files."""
|
||||
|
||||
from typing import Any, Dict
|
||||
|
||||
from coverage.types import TFileDisposition, TTraceData, TTraceFn, Tracer
|
||||
|
||||
class CFileDisposition(TFileDisposition):
|
||||
"""CFileDisposition is in ctracer/filedisp.c"""
|
||||
|
||||
canonical_filename: Any
|
||||
file_tracer: Any
|
||||
has_dynamic_filename: Any
|
||||
original_filename: Any
|
||||
reason: Any
|
||||
source_filename: Any
|
||||
trace: Any
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
class CTracer(Tracer):
|
||||
"""CTracer is in ctracer/tracer.c"""
|
||||
|
||||
check_include: Any
|
||||
concur_id_func: Any
|
||||
data: TTraceData
|
||||
disable_plugin: Any
|
||||
file_tracers: Any
|
||||
should_start_context: Any
|
||||
should_trace: Any
|
||||
should_trace_cache: Any
|
||||
switch_context: Any
|
||||
lock_data: Any
|
||||
unlock_data: Any
|
||||
trace_arcs: Any
|
||||
warn: Any
|
||||
def __init__(self) -> None: ...
|
||||
def activity(self) -> bool: ...
|
||||
def get_stats(self) -> Dict[str, int]: ...
|
||||
def reset_activity(self) -> Any: ...
|
||||
def start(self) -> TTraceFn: ...
|
||||
def stop(self) -> None: ...
|
||||
210
venv/lib/python3.12/site-packages/coverage/types.py
Normal file
210
venv/lib/python3.12/site-packages/coverage/types.py
Normal file
@@ -0,0 +1,210 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""
|
||||
Types for use throughout coverage.py.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
from collections.abc import Iterable, Mapping
|
||||
from types import FrameType, ModuleType
|
||||
from typing import TYPE_CHECKING, Any, Callable, Optional, Protocol, Union
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage.plugin import FileTracer
|
||||
|
||||
|
||||
AnyCallable = Callable[..., Any]
|
||||
|
||||
## File paths
|
||||
|
||||
# For arguments that are file paths:
|
||||
if TYPE_CHECKING:
|
||||
FilePath = Union[str, os.PathLike[str]]
|
||||
else:
|
||||
# PathLike < python3.9 doesn't support subscription
|
||||
FilePath = Union[str, os.PathLike]
|
||||
# For testing FilePath arguments
|
||||
FilePathClasses = [str, pathlib.Path]
|
||||
FilePathType = Union[type[str], type[pathlib.Path]]
|
||||
|
||||
## Python tracing
|
||||
|
||||
|
||||
class TTraceFn(Protocol):
|
||||
"""A Python trace function."""
|
||||
|
||||
def __call__(
|
||||
self,
|
||||
frame: FrameType,
|
||||
event: str,
|
||||
arg: Any,
|
||||
lineno: TLineNo | None = None, # Our own twist, see collector.py
|
||||
) -> TTraceFn | None: ...
|
||||
|
||||
|
||||
## Coverage.py tracing
|
||||
|
||||
# Line numbers are pervasive enough that they deserve their own type.
|
||||
TLineNo = int
|
||||
|
||||
# Bytecode offsets are pervasive enough that they deserve their own type.
|
||||
TOffset = int
|
||||
|
||||
TArc = tuple[TLineNo, TLineNo]
|
||||
|
||||
|
||||
class TFileDisposition(Protocol):
|
||||
"""A simple value type for recording what to do with a file."""
|
||||
|
||||
original_filename: str
|
||||
canonical_filename: str
|
||||
source_filename: str | None
|
||||
trace: bool
|
||||
reason: str
|
||||
file_tracer: FileTracer | None
|
||||
has_dynamic_filename: bool
|
||||
|
||||
|
||||
# When collecting data, we use a dictionary with a few possible shapes. The
|
||||
# keys are always file names.
|
||||
# - If measuring line coverage, the values are sets of line numbers.
|
||||
# - If measuring arcs in the Python tracer, the values are sets of arcs (pairs
|
||||
# of line numbers).
|
||||
# - If measuring arcs in the C tracer, the values are sets of packed arcs (two
|
||||
# line numbers combined into one integer).
|
||||
|
||||
TTraceFileData = Union[set[TLineNo], set[TArc], set[int]]
|
||||
|
||||
TTraceData = dict[str, TTraceFileData]
|
||||
|
||||
# Functions passed into collectors.
|
||||
TShouldTraceFn = Callable[[str, FrameType], TFileDisposition]
|
||||
TCheckIncludeFn = Callable[[str, FrameType], bool]
|
||||
TShouldStartContextFn = Callable[[FrameType], Union[str, None]]
|
||||
|
||||
|
||||
class Tracer(Protocol):
|
||||
"""Anything that can report on Python execution."""
|
||||
|
||||
data: TTraceData
|
||||
trace_arcs: bool
|
||||
should_trace: TShouldTraceFn
|
||||
should_trace_cache: Mapping[str, TFileDisposition | None]
|
||||
should_start_context: TShouldStartContextFn | None
|
||||
switch_context: Callable[[str | None], None] | None
|
||||
lock_data: Callable[[], None]
|
||||
unlock_data: Callable[[], None]
|
||||
warn: TWarnFn
|
||||
|
||||
def __init__(self) -> None: ...
|
||||
|
||||
def start(self) -> TTraceFn | None:
|
||||
"""Start this tracer, return a trace function if based on sys.settrace."""
|
||||
|
||||
def stop(self) -> None:
|
||||
"""Stop this tracer."""
|
||||
|
||||
def activity(self) -> bool:
|
||||
"""Has there been any activity?"""
|
||||
|
||||
def reset_activity(self) -> None:
|
||||
"""Reset the activity() flag."""
|
||||
|
||||
def get_stats(self) -> dict[str, int] | None:
|
||||
"""Return a dictionary of statistics, or None."""
|
||||
|
||||
|
||||
## Coverage
|
||||
|
||||
# Many places use kwargs as Coverage kwargs.
|
||||
TCovKwargs = Any
|
||||
|
||||
|
||||
## Configuration
|
||||
|
||||
# One value read from a config file.
|
||||
TConfigValueIn = Optional[Union[bool, int, float, str, Iterable[str], Mapping[str, Iterable[str]]]]
|
||||
TConfigValueOut = Optional[Union[bool, int, float, str, list[str], dict[str, list[str]]]]
|
||||
# An entire config section, mapping option names to values.
|
||||
TConfigSectionIn = Mapping[str, TConfigValueIn]
|
||||
TConfigSectionOut = Mapping[str, TConfigValueOut]
|
||||
|
||||
|
||||
class TConfigurable(Protocol):
|
||||
"""Something that can proxy to the coverage configuration settings."""
|
||||
|
||||
def get_option(self, option_name: str) -> TConfigValueOut | None:
|
||||
"""Get an option from the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
Returns the value of the option.
|
||||
|
||||
"""
|
||||
|
||||
def set_option(self, option_name: str, value: TConfigValueIn | TConfigSectionIn) -> None:
|
||||
"""Set an option in the configuration.
|
||||
|
||||
`option_name` is a colon-separated string indicating the section and
|
||||
option name. For example, the ``branch`` option in the ``[run]``
|
||||
section of the config file would be indicated with `"run:branch"`.
|
||||
|
||||
`value` is the new value for the option.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class TPluginConfig(Protocol):
|
||||
"""Something that can provide options to a plugin."""
|
||||
|
||||
def get_plugin_options(self, plugin: str) -> TConfigSectionOut:
|
||||
"""Get the options for a plugin."""
|
||||
|
||||
|
||||
## Parsing
|
||||
|
||||
TMorf = Union[ModuleType, str]
|
||||
|
||||
TSourceTokenLines = Iterable[list[tuple[str, str]]]
|
||||
|
||||
|
||||
## Plugins
|
||||
|
||||
|
||||
class TPlugin(Protocol):
|
||||
"""What all plugins have in common."""
|
||||
|
||||
_coverage_plugin_name: str
|
||||
_coverage_enabled: bool
|
||||
|
||||
|
||||
## Debugging
|
||||
|
||||
|
||||
class TWarnFn(Protocol):
|
||||
"""A callable warn() function."""
|
||||
|
||||
def __call__(self, msg: str, slug: str | None = None, once: bool = False) -> None: ...
|
||||
|
||||
|
||||
class TDebugCtl(Protocol):
|
||||
"""A DebugControl object, or something like it."""
|
||||
|
||||
def should(self, option: str) -> bool:
|
||||
"""Decide whether to output debug information in category `option`."""
|
||||
|
||||
def write(self, msg: str) -> None:
|
||||
"""Write a line of debug output."""
|
||||
|
||||
|
||||
class TWritable(Protocol):
|
||||
"""Anything that can be written to."""
|
||||
|
||||
def write(self, msg: str) -> None:
|
||||
"""Write a message."""
|
||||
35
venv/lib/python3.12/site-packages/coverage/version.py
Normal file
35
venv/lib/python3.12/site-packages/coverage/version.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""The version and URL for coverage.py"""
|
||||
# This file is exec'ed in setup.py, don't import anything!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# version_info: same semantics as sys.version_info.
|
||||
# _dev: the .devN suffix if any.
|
||||
version_info = (7, 10, 7, "final", 0)
|
||||
_dev = 0
|
||||
|
||||
|
||||
def _make_version(
|
||||
major: int,
|
||||
minor: int,
|
||||
micro: int,
|
||||
releaselevel: str = "final",
|
||||
serial: int = 0,
|
||||
dev: int = 0,
|
||||
) -> str:
|
||||
"""Create a readable version string from version_info tuple components."""
|
||||
assert releaselevel in ["alpha", "beta", "candidate", "final"]
|
||||
version = f"{major}.{minor}.{micro}"
|
||||
if releaselevel != "final":
|
||||
short = {"alpha": "a", "beta": "b", "candidate": "rc"}[releaselevel]
|
||||
version += f"{short}{serial}"
|
||||
if dev != 0:
|
||||
version += f".dev{dev}"
|
||||
return version
|
||||
|
||||
|
||||
__version__ = _make_version(*version_info, _dev)
|
||||
__url__ = f"https://coverage.readthedocs.io/en/{__version__}"
|
||||
264
venv/lib/python3.12/site-packages/coverage/xmlreport.py
Normal file
264
venv/lib/python3.12/site-packages/coverage/xmlreport.py
Normal file
@@ -0,0 +1,264 @@
|
||||
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
|
||||
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
|
||||
|
||||
"""XML reporting for coverage.py"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
import xml.dom.minidom
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
from typing import IO, TYPE_CHECKING, Any
|
||||
|
||||
from coverage import __version__, files
|
||||
from coverage.misc import human_sorted, human_sorted_items, isolate_module
|
||||
from coverage.plugin import FileReporter
|
||||
from coverage.report_core import get_analysis_to_report
|
||||
from coverage.results import Analysis
|
||||
from coverage.types import TMorf
|
||||
from coverage.version import __url__
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from coverage import Coverage
|
||||
|
||||
os = isolate_module(os)
|
||||
|
||||
|
||||
DTD_URL = "https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd"
|
||||
|
||||
|
||||
def rate(hit: int, num: int) -> str:
|
||||
"""Return the fraction of `hit`/`num`, as a string."""
|
||||
if num == 0:
|
||||
return "1"
|
||||
else:
|
||||
return f"{hit / num:.4g}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class PackageData:
|
||||
"""Data we keep about each "package" (in Java terms)."""
|
||||
|
||||
elements: dict[str, xml.dom.minidom.Element]
|
||||
hits: int
|
||||
lines: int
|
||||
br_hits: int
|
||||
branches: int
|
||||
|
||||
|
||||
def appendChild(parent: Any, child: Any) -> None:
|
||||
"""Append a child to a parent, in a way mypy will shut up about."""
|
||||
parent.appendChild(child)
|
||||
|
||||
|
||||
class XmlReporter:
|
||||
"""A reporter for writing Cobertura-style XML coverage results."""
|
||||
|
||||
report_type = "XML report"
|
||||
|
||||
def __init__(self, coverage: Coverage) -> None:
|
||||
self.coverage = coverage
|
||||
self.config = self.coverage.config
|
||||
|
||||
self.source_paths = set()
|
||||
if self.config.source:
|
||||
for src in self.config.source:
|
||||
if os.path.exists(src):
|
||||
if self.config.relative_files:
|
||||
src = src.rstrip(r"\/")
|
||||
else:
|
||||
src = files.canonical_filename(src)
|
||||
self.source_paths.add(src)
|
||||
self.packages: dict[str, PackageData] = {}
|
||||
self.xml_out: xml.dom.minidom.Document
|
||||
|
||||
def report(self, morfs: Iterable[TMorf] | None, outfile: IO[str] | None = None) -> float:
|
||||
"""Generate a Cobertura-compatible XML report for `morfs`.
|
||||
|
||||
`morfs` is a list of modules or file names.
|
||||
|
||||
`outfile` is a file object to write the XML to.
|
||||
|
||||
"""
|
||||
# Initial setup.
|
||||
outfile = outfile or sys.stdout
|
||||
has_arcs = self.coverage.get_data().has_arcs()
|
||||
|
||||
# Create the DOM that will store the data.
|
||||
impl = xml.dom.minidom.getDOMImplementation()
|
||||
assert impl is not None
|
||||
self.xml_out = impl.createDocument(None, "coverage", None)
|
||||
|
||||
# Write header stuff.
|
||||
xcoverage = self.xml_out.documentElement
|
||||
assert xcoverage is not None
|
||||
xcoverage.setAttribute("version", __version__)
|
||||
xcoverage.setAttribute("timestamp", str(int(time.time() * 1000)))
|
||||
xcoverage.appendChild(
|
||||
self.xml_out.createComment(
|
||||
f" Generated by coverage.py: {__url__} ",
|
||||
)
|
||||
)
|
||||
xcoverage.appendChild(self.xml_out.createComment(f" Based on {DTD_URL} "))
|
||||
|
||||
# Call xml_file for each file in the data.
|
||||
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
|
||||
self.xml_file(fr, analysis, has_arcs)
|
||||
|
||||
xsources = self.xml_out.createElement("sources")
|
||||
xcoverage.appendChild(xsources)
|
||||
|
||||
# Populate the XML DOM with the source info.
|
||||
for path in human_sorted(self.source_paths):
|
||||
xsource = self.xml_out.createElement("source")
|
||||
appendChild(xsources, xsource)
|
||||
txt = self.xml_out.createTextNode(path)
|
||||
appendChild(xsource, txt)
|
||||
|
||||
lnum_tot, lhits_tot = 0, 0
|
||||
bnum_tot, bhits_tot = 0, 0
|
||||
|
||||
xpackages = self.xml_out.createElement("packages")
|
||||
xcoverage.appendChild(xpackages)
|
||||
|
||||
# Populate the XML DOM with the package info.
|
||||
for pkg_name, pkg_data in human_sorted_items(self.packages.items()):
|
||||
xpackage = self.xml_out.createElement("package")
|
||||
appendChild(xpackages, xpackage)
|
||||
xclasses = self.xml_out.createElement("classes")
|
||||
appendChild(xpackage, xclasses)
|
||||
for _, class_elt in human_sorted_items(pkg_data.elements.items()):
|
||||
appendChild(xclasses, class_elt)
|
||||
xpackage.setAttribute("name", pkg_name.replace(os.sep, "."))
|
||||
xpackage.setAttribute("line-rate", rate(pkg_data.hits, pkg_data.lines))
|
||||
if has_arcs:
|
||||
branch_rate = rate(pkg_data.br_hits, pkg_data.branches)
|
||||
else:
|
||||
branch_rate = "0"
|
||||
xpackage.setAttribute("branch-rate", branch_rate)
|
||||
xpackage.setAttribute("complexity", "0")
|
||||
|
||||
lhits_tot += pkg_data.hits
|
||||
lnum_tot += pkg_data.lines
|
||||
bhits_tot += pkg_data.br_hits
|
||||
bnum_tot += pkg_data.branches
|
||||
|
||||
xcoverage.setAttribute("lines-valid", str(lnum_tot))
|
||||
xcoverage.setAttribute("lines-covered", str(lhits_tot))
|
||||
xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
|
||||
if has_arcs:
|
||||
xcoverage.setAttribute("branches-valid", str(bnum_tot))
|
||||
xcoverage.setAttribute("branches-covered", str(bhits_tot))
|
||||
xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
|
||||
else:
|
||||
xcoverage.setAttribute("branches-covered", "0")
|
||||
xcoverage.setAttribute("branches-valid", "0")
|
||||
xcoverage.setAttribute("branch-rate", "0")
|
||||
xcoverage.setAttribute("complexity", "0")
|
||||
|
||||
# Write the output file.
|
||||
outfile.write(serialize_xml(self.xml_out))
|
||||
|
||||
# Return the total percentage.
|
||||
denom = lnum_tot + bnum_tot
|
||||
if denom == 0:
|
||||
pct = 0.0
|
||||
else:
|
||||
pct = 100.0 * (lhits_tot + bhits_tot) / denom
|
||||
return pct
|
||||
|
||||
def xml_file(self, fr: FileReporter, analysis: Analysis, has_arcs: bool) -> None:
|
||||
"""Add to the XML report for a single file."""
|
||||
|
||||
if self.config.skip_empty:
|
||||
if analysis.numbers.n_statements == 0:
|
||||
return
|
||||
|
||||
# Create the "lines" and "package" XML elements, which
|
||||
# are populated later. Note that a package == a directory.
|
||||
filename = fr.filename.replace("\\", "/")
|
||||
for source_path in self.source_paths:
|
||||
if not self.config.relative_files:
|
||||
source_path = files.canonical_filename(source_path)
|
||||
if filename.startswith(source_path.replace("\\", "/") + "/"):
|
||||
rel_name = filename[len(source_path) + 1 :]
|
||||
break
|
||||
else:
|
||||
rel_name = fr.relative_filename().replace("\\", "/")
|
||||
self.source_paths.add(fr.filename[: -len(rel_name)].rstrip(r"\/"))
|
||||
|
||||
dirname = os.path.dirname(rel_name) or "."
|
||||
dirname = "/".join(dirname.split("/")[: self.config.xml_package_depth])
|
||||
package_name = dirname.replace("/", ".")
|
||||
|
||||
package = self.packages.setdefault(package_name, PackageData({}, 0, 0, 0, 0))
|
||||
|
||||
xclass: xml.dom.minidom.Element = self.xml_out.createElement("class")
|
||||
|
||||
appendChild(xclass, self.xml_out.createElement("methods"))
|
||||
|
||||
xlines = self.xml_out.createElement("lines")
|
||||
appendChild(xclass, xlines)
|
||||
|
||||
xclass.setAttribute("name", os.path.relpath(rel_name, dirname))
|
||||
xclass.setAttribute("filename", rel_name.replace("\\", "/"))
|
||||
xclass.setAttribute("complexity", "0")
|
||||
|
||||
branch_stats = analysis.branch_stats()
|
||||
missing_branch_arcs = analysis.missing_branch_arcs()
|
||||
|
||||
# For each statement, create an XML "line" element.
|
||||
for line in sorted(analysis.statements):
|
||||
xline = self.xml_out.createElement("line")
|
||||
xline.setAttribute("number", str(line))
|
||||
|
||||
# Q: can we get info about the number of times a statement is
|
||||
# executed? If so, that should be recorded here.
|
||||
xline.setAttribute("hits", str(int(line not in analysis.missing)))
|
||||
|
||||
if has_arcs:
|
||||
if line in branch_stats:
|
||||
total, taken = branch_stats[line]
|
||||
xline.setAttribute("branch", "true")
|
||||
xline.setAttribute(
|
||||
"condition-coverage",
|
||||
f"{100 * taken // total}% ({taken}/{total})",
|
||||
)
|
||||
if line in missing_branch_arcs:
|
||||
annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
|
||||
xline.setAttribute("missing-branches", ",".join(annlines))
|
||||
appendChild(xlines, xline)
|
||||
|
||||
class_lines = len(analysis.statements)
|
||||
class_hits = class_lines - len(analysis.missing)
|
||||
|
||||
if has_arcs:
|
||||
class_branches = sum(t for t, k in branch_stats.values())
|
||||
missing_branches = sum(t - k for t, k in branch_stats.values())
|
||||
class_br_hits = class_branches - missing_branches
|
||||
else:
|
||||
class_branches = 0
|
||||
class_br_hits = 0
|
||||
|
||||
# Finalize the statistics that are collected in the XML DOM.
|
||||
xclass.setAttribute("line-rate", rate(class_hits, class_lines))
|
||||
if has_arcs:
|
||||
branch_rate = rate(class_br_hits, class_branches)
|
||||
else:
|
||||
branch_rate = "0"
|
||||
xclass.setAttribute("branch-rate", branch_rate)
|
||||
|
||||
package.elements[rel_name] = xclass
|
||||
package.hits += class_hits
|
||||
package.lines += class_lines
|
||||
package.br_hits += class_br_hits
|
||||
package.branches += class_branches
|
||||
|
||||
|
||||
def serialize_xml(dom: xml.dom.minidom.Document) -> str:
|
||||
"""Serialize a minidom node to XML."""
|
||||
return dom.toprettyxml()
|
||||
Reference in New Issue
Block a user