Major fixes and new features
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
28
venv/lib/python3.12/site-packages/mypy/test/config.py
Normal file
28
venv/lib/python3.12/site-packages/mypy/test/config.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os.path
|
||||
|
||||
provided_prefix = os.getenv("MYPY_TEST_PREFIX", None)
|
||||
if provided_prefix:
|
||||
PREFIX = provided_prefix
|
||||
else:
|
||||
this_file_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
PREFIX = os.path.dirname(os.path.dirname(this_file_dir))
|
||||
|
||||
# Location of test data files such as test case descriptions.
|
||||
test_data_prefix = os.path.join(PREFIX, "test-data", "unit")
|
||||
package_path = os.path.join(PREFIX, "test-data", "packages")
|
||||
|
||||
# Temp directory used for the temp files created when running test cases.
|
||||
# This is *within* the tempfile.TemporaryDirectory that is chroot'ed per testcase.
|
||||
# It is also hard-coded in numerous places, so don't change it.
|
||||
test_temp_dir = "tmp"
|
||||
|
||||
# The PEP 561 tests do a bunch of pip installs which, even though they operate
|
||||
# on distinct temporary virtual environments, run into race conditions on shared
|
||||
# file-system state. To make this work reliably in parallel mode, we'll use a
|
||||
# FileLock courtesy of the tox-dev/py-filelock package.
|
||||
# Ref. https://github.com/python/mypy/issues/12615
|
||||
# Ref. mypy/test/testpep561.py
|
||||
pip_lock = os.path.join(package_path, ".pip_lock")
|
||||
pip_timeout = 60
|
||||
823
venv/lib/python3.12/site-packages/mypy/test/data.py
Normal file
823
venv/lib/python3.12/site-packages/mypy/test/data.py
Normal file
@@ -0,0 +1,823 @@
|
||||
"""Utilities for processing .test files containing test case descriptions."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import posixpath
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from abc import abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Final, Iterator, NamedTuple, NoReturn, Pattern, Union
|
||||
from typing_extensions import TypeAlias as _TypeAlias
|
||||
|
||||
import pytest
|
||||
|
||||
from mypy import defaults
|
||||
from mypy.test.config import PREFIX, test_data_prefix, test_temp_dir
|
||||
|
||||
root_dir = os.path.normpath(PREFIX)
|
||||
|
||||
# Debuggers that we support for debugging mypyc run tests
|
||||
# implementation of using each of these debuggers is in test_run.py
|
||||
# TODO: support more debuggers
|
||||
SUPPORTED_DEBUGGERS: Final = ["gdb", "lldb"]
|
||||
|
||||
|
||||
# File modify/create operation: copy module contents from source_path.
|
||||
class UpdateFile(NamedTuple):
|
||||
module: str
|
||||
content: str
|
||||
target_path: str
|
||||
|
||||
|
||||
# File delete operation: delete module file.
|
||||
class DeleteFile(NamedTuple):
|
||||
module: str
|
||||
path: str
|
||||
|
||||
|
||||
FileOperation: _TypeAlias = Union[UpdateFile, DeleteFile]
|
||||
|
||||
|
||||
def _file_arg_to_module(filename: str) -> str:
|
||||
filename, _ = os.path.splitext(filename)
|
||||
parts = filename.split("/") # not os.sep since it comes from test data
|
||||
if parts[-1] == "__init__":
|
||||
parts.pop()
|
||||
return ".".join(parts)
|
||||
|
||||
|
||||
def parse_test_case(case: DataDrivenTestCase) -> None:
|
||||
"""Parse and prepare a single case from suite with test case descriptions.
|
||||
|
||||
This method is part of the setup phase, just before the test case is run.
|
||||
"""
|
||||
test_items = parse_test_data(case.data, case.name)
|
||||
base_path = case.suite.base_path
|
||||
if case.suite.native_sep:
|
||||
join = os.path.join
|
||||
else:
|
||||
join = posixpath.join
|
||||
|
||||
out_section_missing = case.suite.required_out_section
|
||||
|
||||
files: list[tuple[str, str]] = [] # path and contents
|
||||
output_files: list[tuple[str, str | Pattern[str]]] = [] # output path and contents
|
||||
output: list[str] = [] # Regular output errors
|
||||
output2: dict[int, list[str]] = {} # Output errors for incremental, runs 2+
|
||||
deleted_paths: dict[int, set[str]] = {} # from run number of paths
|
||||
stale_modules: dict[int, set[str]] = {} # from run number to module names
|
||||
rechecked_modules: dict[int, set[str]] = {} # from run number module names
|
||||
triggered: list[str] = [] # Active triggers (one line per incremental step)
|
||||
targets: dict[int, list[str]] = {} # Fine-grained targets (per fine-grained update)
|
||||
test_modules: list[str] = [] # Modules which are deemed "test" (vs "fixture")
|
||||
|
||||
def _case_fail(msg: str) -> NoReturn:
|
||||
pytest.fail(f"{case.file}:{case.line}: {msg}", pytrace=False)
|
||||
|
||||
# Process the parsed items. Each item has a header of form [id args],
|
||||
# optionally followed by lines of text.
|
||||
item = first_item = test_items[0]
|
||||
test_modules.append("__main__")
|
||||
for item in test_items[1:]:
|
||||
|
||||
def _item_fail(msg: str) -> NoReturn:
|
||||
item_abs_line = case.line + item.line - 2
|
||||
pytest.fail(f"{case.file}:{item_abs_line}: {msg}", pytrace=False)
|
||||
|
||||
if item.id in {"file", "fixture", "outfile", "outfile-re"}:
|
||||
# Record an extra file needed for the test case.
|
||||
assert item.arg is not None
|
||||
contents = expand_variables("\n".join(item.data))
|
||||
path = join(base_path, item.arg)
|
||||
if item.id != "fixture":
|
||||
test_modules.append(_file_arg_to_module(item.arg))
|
||||
if item.id in {"file", "fixture"}:
|
||||
files.append((path, contents))
|
||||
elif item.id == "outfile-re":
|
||||
output_files.append((path, re.compile(contents.rstrip(), re.S)))
|
||||
elif item.id == "outfile":
|
||||
output_files.append((path, contents))
|
||||
elif item.id == "builtins":
|
||||
# Use an alternative stub file for the builtins module.
|
||||
assert item.arg is not None
|
||||
mpath = join(os.path.dirname(case.file), item.arg)
|
||||
with open(mpath, encoding="utf8") as f:
|
||||
files.append((join(base_path, "builtins.pyi"), f.read()))
|
||||
elif item.id == "typing":
|
||||
# Use an alternative stub file for the typing module.
|
||||
assert item.arg is not None
|
||||
src_path = join(os.path.dirname(case.file), item.arg)
|
||||
with open(src_path, encoding="utf8") as f:
|
||||
files.append((join(base_path, "typing.pyi"), f.read()))
|
||||
elif item.id == "_typeshed":
|
||||
# Use an alternative stub file for the _typeshed module.
|
||||
assert item.arg is not None
|
||||
src_path = join(os.path.dirname(case.file), item.arg)
|
||||
with open(src_path, encoding="utf8") as f:
|
||||
files.append((join(base_path, "_typeshed.pyi"), f.read()))
|
||||
elif re.match(r"stale[0-9]*$", item.id):
|
||||
passnum = 1 if item.id == "stale" else int(item.id[len("stale") :])
|
||||
assert passnum > 0
|
||||
modules = set() if item.arg is None else {t.strip() for t in item.arg.split(",")}
|
||||
stale_modules[passnum] = modules
|
||||
elif re.match(r"rechecked[0-9]*$", item.id):
|
||||
passnum = 1 if item.id == "rechecked" else int(item.id[len("rechecked") :])
|
||||
assert passnum > 0
|
||||
modules = set() if item.arg is None else {t.strip() for t in item.arg.split(",")}
|
||||
rechecked_modules[passnum] = modules
|
||||
elif re.match(r"targets[0-9]*$", item.id):
|
||||
passnum = 1 if item.id == "targets" else int(item.id[len("targets") :])
|
||||
assert passnum > 0
|
||||
reprocessed = [] if item.arg is None else [t.strip() for t in item.arg.split(",")]
|
||||
targets[passnum] = reprocessed
|
||||
elif item.id == "delete":
|
||||
# File/directory to delete during a multi-step test case
|
||||
assert item.arg is not None
|
||||
m = re.match(r"(.*)\.([0-9]+)$", item.arg)
|
||||
if m is None:
|
||||
_item_fail(f"Invalid delete section {item.arg!r}")
|
||||
num = int(m.group(2))
|
||||
if num < 2:
|
||||
_item_fail(f"Can't delete during step {num}")
|
||||
full = join(base_path, m.group(1))
|
||||
deleted_paths.setdefault(num, set()).add(full)
|
||||
elif re.match(r"out[0-9]*$", item.id):
|
||||
if item.arg is None:
|
||||
args = []
|
||||
else:
|
||||
args = item.arg.split(",")
|
||||
|
||||
version_check = True
|
||||
for arg in args:
|
||||
if arg.startswith("version"):
|
||||
compare_op = arg[7:9]
|
||||
if compare_op not in {">=", "=="}:
|
||||
_item_fail("Only >= and == version checks are currently supported")
|
||||
version_str = arg[9:]
|
||||
try:
|
||||
version = tuple(int(x) for x in version_str.split("."))
|
||||
except ValueError:
|
||||
_item_fail(f"{version_str!r} is not a valid python version")
|
||||
if compare_op == ">=":
|
||||
if version <= defaults.PYTHON3_VERSION:
|
||||
_item_fail(
|
||||
f"{arg} always true since minimum runtime version is {defaults.PYTHON3_VERSION}"
|
||||
)
|
||||
version_check = sys.version_info >= version
|
||||
elif compare_op == "==":
|
||||
if version < defaults.PYTHON3_VERSION:
|
||||
_item_fail(
|
||||
f"{arg} always false since minimum runtime version is {defaults.PYTHON3_VERSION}"
|
||||
)
|
||||
if not 1 < len(version) < 4:
|
||||
_item_fail(
|
||||
f'Only minor or patch version checks are currently supported with "==": {version_str!r}'
|
||||
)
|
||||
version_check = sys.version_info[: len(version)] == version
|
||||
if version_check:
|
||||
tmp_output = [expand_variables(line) for line in item.data]
|
||||
if os.path.sep == "\\" and case.normalize_output:
|
||||
tmp_output = [fix_win_path(line) for line in tmp_output]
|
||||
if item.id == "out" or item.id == "out1":
|
||||
output = tmp_output
|
||||
else:
|
||||
passnum = int(item.id[len("out") :])
|
||||
assert passnum > 1
|
||||
output2[passnum] = tmp_output
|
||||
out_section_missing = False
|
||||
elif item.id == "triggered" and item.arg is None:
|
||||
triggered = item.data
|
||||
else:
|
||||
section_str = item.id + (f" {item.arg}" if item.arg else "")
|
||||
_item_fail(f"Invalid section header [{section_str}] in case {case.name!r}")
|
||||
|
||||
if out_section_missing:
|
||||
_case_fail(f"Required output section not found in case {case.name!r}")
|
||||
|
||||
for passnum in stale_modules.keys():
|
||||
if passnum not in rechecked_modules:
|
||||
# If the set of rechecked modules isn't specified, make it the same as the set
|
||||
# of modules with a stale public interface.
|
||||
rechecked_modules[passnum] = stale_modules[passnum]
|
||||
if (
|
||||
passnum in stale_modules
|
||||
and passnum in rechecked_modules
|
||||
and not stale_modules[passnum].issubset(rechecked_modules[passnum])
|
||||
):
|
||||
_case_fail(f"Stale modules after pass {passnum} must be a subset of rechecked modules")
|
||||
|
||||
output_inline_start = len(output)
|
||||
input = first_item.data
|
||||
expand_errors(input, output, "main")
|
||||
for file_path, contents in files:
|
||||
expand_errors(contents.split("\n"), output, file_path)
|
||||
|
||||
seen_files = set()
|
||||
for file, _ in files:
|
||||
if file in seen_files:
|
||||
_case_fail(f"Duplicated filename {file}. Did you include it multiple times?")
|
||||
|
||||
seen_files.add(file)
|
||||
|
||||
case.input = input
|
||||
case.output = output
|
||||
case.output_inline_start = output_inline_start
|
||||
case.output2 = output2
|
||||
case.last_line = case.line + item.line + len(item.data) - 2
|
||||
case.files = files
|
||||
case.output_files = output_files
|
||||
case.expected_stale_modules = stale_modules
|
||||
case.expected_rechecked_modules = rechecked_modules
|
||||
case.deleted_paths = deleted_paths
|
||||
case.triggered = triggered or []
|
||||
case.expected_fine_grained_targets = targets
|
||||
case.test_modules = test_modules
|
||||
|
||||
|
||||
class DataDrivenTestCase(pytest.Item):
|
||||
"""Holds parsed data-driven test cases, and handles directory setup and teardown."""
|
||||
|
||||
# Override parent member type
|
||||
parent: DataSuiteCollector
|
||||
|
||||
input: list[str]
|
||||
output: list[str] # Output for the first pass
|
||||
output_inline_start: int
|
||||
output2: dict[int, list[str]] # Output for runs 2+, indexed by run number
|
||||
|
||||
# full path of test suite
|
||||
file = ""
|
||||
line = 0
|
||||
|
||||
# (file path, file content) tuples
|
||||
files: list[tuple[str, str]]
|
||||
# Modules which is to be considered "test" rather than "fixture"
|
||||
test_modules: list[str]
|
||||
expected_stale_modules: dict[int, set[str]]
|
||||
expected_rechecked_modules: dict[int, set[str]]
|
||||
expected_fine_grained_targets: dict[int, list[str]]
|
||||
|
||||
# Whether or not we should normalize the output to standardize things like
|
||||
# forward vs backward slashes in file paths for Windows vs Linux.
|
||||
normalize_output: bool
|
||||
|
||||
# Extra attributes used by some tests.
|
||||
last_line: int
|
||||
output_files: list[tuple[str, str | Pattern[str]]] # Path and contents for output files
|
||||
deleted_paths: dict[int, set[str]] # Mapping run number -> paths
|
||||
triggered: list[str] # Active triggers (one line per incremental step)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
parent: DataSuiteCollector,
|
||||
suite: DataSuite,
|
||||
*,
|
||||
file: str,
|
||||
name: str,
|
||||
writescache: bool,
|
||||
only_when: str,
|
||||
normalize_output: bool,
|
||||
platform: str | None,
|
||||
skip: bool,
|
||||
xfail: bool,
|
||||
data: str,
|
||||
line: int,
|
||||
) -> None:
|
||||
super().__init__(name, parent)
|
||||
self.suite = suite
|
||||
self.file = file
|
||||
self.writescache = writescache
|
||||
self.only_when = only_when
|
||||
self.normalize_output = normalize_output
|
||||
if (platform == "windows" and sys.platform != "win32") or (
|
||||
platform == "posix" and sys.platform == "win32"
|
||||
):
|
||||
skip = True
|
||||
self.skip = skip
|
||||
self.xfail = xfail
|
||||
self.data = data
|
||||
self.line = line
|
||||
self.old_cwd: str | None = None
|
||||
self.tmpdir: tempfile.TemporaryDirectory[str] | None = None
|
||||
|
||||
def runtest(self) -> None:
|
||||
if self.skip:
|
||||
pytest.skip()
|
||||
# TODO: add a better error message for when someone uses skip and xfail at the same time
|
||||
elif self.xfail:
|
||||
self.add_marker(pytest.mark.xfail)
|
||||
parent = self.getparent(DataSuiteCollector)
|
||||
assert parent is not None, "Should not happen"
|
||||
suite = parent.obj()
|
||||
suite.setup()
|
||||
try:
|
||||
suite.run_case(self)
|
||||
except Exception:
|
||||
# As a debugging aid, support copying the contents of the tmp directory somewhere
|
||||
save_dir: str | None = self.config.getoption("--save-failures-to", None)
|
||||
if save_dir:
|
||||
assert self.tmpdir is not None
|
||||
target_dir = os.path.join(save_dir, os.path.basename(self.tmpdir.name))
|
||||
print(f"Copying data from test {self.name} to {target_dir}")
|
||||
if not os.path.isabs(target_dir):
|
||||
assert self.old_cwd
|
||||
target_dir = os.path.join(self.old_cwd, target_dir)
|
||||
shutil.copytree(self.tmpdir.name, target_dir)
|
||||
raise
|
||||
|
||||
def setup(self) -> None:
|
||||
parse_test_case(case=self)
|
||||
self.old_cwd = os.getcwd()
|
||||
self.tmpdir = tempfile.TemporaryDirectory(prefix="mypy-test-")
|
||||
os.chdir(self.tmpdir.name)
|
||||
os.mkdir(test_temp_dir)
|
||||
|
||||
# Precalculate steps for find_steps()
|
||||
steps: dict[int, list[FileOperation]] = {}
|
||||
|
||||
for path, content in self.files:
|
||||
m = re.match(r".*\.([0-9]+)$", path)
|
||||
if m:
|
||||
# Skip writing subsequent incremental steps - rather
|
||||
# store them as operations.
|
||||
num = int(m.group(1))
|
||||
assert num >= 2
|
||||
target_path = re.sub(r"\.[0-9]+$", "", path)
|
||||
module = module_from_path(target_path)
|
||||
operation = UpdateFile(module, content, target_path)
|
||||
steps.setdefault(num, []).append(operation)
|
||||
else:
|
||||
# Write the first incremental steps
|
||||
dir = os.path.dirname(path)
|
||||
os.makedirs(dir, exist_ok=True)
|
||||
with open(path, "w", encoding="utf8") as f:
|
||||
f.write(content)
|
||||
|
||||
for num, paths in self.deleted_paths.items():
|
||||
assert num >= 2
|
||||
for path in paths:
|
||||
module = module_from_path(path)
|
||||
steps.setdefault(num, []).append(DeleteFile(module, path))
|
||||
max_step = max(steps) if steps else 2
|
||||
self.steps = [steps.get(num, []) for num in range(2, max_step + 1)]
|
||||
|
||||
def teardown(self) -> None:
|
||||
if self.old_cwd is not None:
|
||||
os.chdir(self.old_cwd)
|
||||
if self.tmpdir is not None:
|
||||
try:
|
||||
self.tmpdir.cleanup()
|
||||
except OSError:
|
||||
pass
|
||||
self.old_cwd = None
|
||||
self.tmpdir = None
|
||||
|
||||
def reportinfo(self) -> tuple[str, int, str]:
|
||||
return self.file, self.line, self.name
|
||||
|
||||
def repr_failure(
|
||||
self, excinfo: pytest.ExceptionInfo[BaseException], style: Any | None = None
|
||||
) -> str:
|
||||
excrepr: object
|
||||
if isinstance(excinfo.value, SystemExit):
|
||||
# We assume that before doing exit() (which raises SystemExit) we've printed
|
||||
# enough context about what happened so that a stack trace is not useful.
|
||||
# In particular, uncaught exceptions during semantic analysis or type checking
|
||||
# call exit() and they already print out a stack trace.
|
||||
excrepr = excinfo.exconly()
|
||||
elif isinstance(excinfo.value, pytest.fail.Exception) and not excinfo.value.pytrace:
|
||||
excrepr = excinfo.exconly()
|
||||
else:
|
||||
excinfo.traceback = self.parent._traceback_filter(excinfo)
|
||||
excrepr = excinfo.getrepr(style="short")
|
||||
|
||||
return f"data: {self.file}:{self.line}:\n{excrepr}"
|
||||
|
||||
def find_steps(self) -> list[list[FileOperation]]:
|
||||
"""Return a list of descriptions of file operations for each incremental step.
|
||||
|
||||
The first list item corresponds to the first incremental step, the second for the
|
||||
second step, etc. Each operation can either be a file modification/creation (UpdateFile)
|
||||
or deletion (DeleteFile).
|
||||
|
||||
Defaults to having two steps if there aern't any operations.
|
||||
"""
|
||||
return self.steps
|
||||
|
||||
|
||||
def module_from_path(path: str) -> str:
|
||||
path = re.sub(r"\.pyi?$", "", path)
|
||||
# We can have a mix of Unix-style and Windows-style separators.
|
||||
parts = re.split(r"[/\\]", path)
|
||||
del parts[0]
|
||||
module = ".".join(parts)
|
||||
module = re.sub(r"\.__init__$", "", module)
|
||||
return module
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestItem:
|
||||
"""Parsed test caseitem.
|
||||
|
||||
An item is of the form
|
||||
[id arg]
|
||||
.. data ..
|
||||
"""
|
||||
|
||||
id: str
|
||||
arg: str | None
|
||||
# Processed, collapsed text data
|
||||
data: list[str]
|
||||
# Start line: 1-based, inclusive, relative to testcase
|
||||
line: int
|
||||
# End line: 1-based, exclusive, relative to testcase; not same as `line + len(test_item.data)` due to collapsing
|
||||
end_line: int
|
||||
|
||||
@property
|
||||
def trimmed_newlines(self) -> int: # compensates for strip_list
|
||||
return self.end_line - self.line - len(self.data)
|
||||
|
||||
|
||||
def parse_test_data(raw_data: str, name: str) -> list[TestItem]:
|
||||
"""Parse a list of lines that represent a sequence of test items."""
|
||||
|
||||
lines = ["", "[case " + name + "]"] + raw_data.split("\n")
|
||||
ret: list[TestItem] = []
|
||||
data: list[str] = []
|
||||
|
||||
id: str | None = None
|
||||
arg: str | None = None
|
||||
|
||||
i = 0
|
||||
i0 = 0
|
||||
while i < len(lines):
|
||||
s = lines[i].strip()
|
||||
|
||||
if lines[i].startswith("[") and s.endswith("]"):
|
||||
if id:
|
||||
data = collapse_line_continuation(data)
|
||||
data = strip_list(data)
|
||||
ret.append(TestItem(id, arg, data, i0 + 1, i))
|
||||
|
||||
i0 = i
|
||||
id = s[1:-1]
|
||||
arg = None
|
||||
if " " in id:
|
||||
arg = id[id.index(" ") + 1 :]
|
||||
id = id[: id.index(" ")]
|
||||
data = []
|
||||
elif lines[i].startswith("\\["):
|
||||
data.append(lines[i][1:])
|
||||
elif not lines[i].startswith("--"):
|
||||
data.append(lines[i])
|
||||
elif lines[i].startswith("----"):
|
||||
data.append(lines[i][2:])
|
||||
i += 1
|
||||
|
||||
# Process the last item.
|
||||
if id:
|
||||
data = collapse_line_continuation(data)
|
||||
data = strip_list(data)
|
||||
ret.append(TestItem(id, arg, data, i0 + 1, i - 1))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def strip_list(l: list[str]) -> list[str]:
|
||||
"""Return a stripped copy of l.
|
||||
|
||||
Strip whitespace at the end of all lines, and strip all empty
|
||||
lines from the end of the array.
|
||||
"""
|
||||
|
||||
r: list[str] = []
|
||||
for s in l:
|
||||
# Strip spaces at end of line
|
||||
r.append(re.sub(r"\s+$", "", s))
|
||||
|
||||
while r and r[-1] == "":
|
||||
r.pop()
|
||||
|
||||
return r
|
||||
|
||||
|
||||
def collapse_line_continuation(l: list[str]) -> list[str]:
|
||||
r: list[str] = []
|
||||
cont = False
|
||||
for s in l:
|
||||
ss = re.sub(r"\\$", "", s)
|
||||
if cont:
|
||||
r[-1] += re.sub("^ +", "", ss)
|
||||
else:
|
||||
r.append(ss)
|
||||
cont = s.endswith("\\")
|
||||
return r
|
||||
|
||||
|
||||
def expand_variables(s: str) -> str:
|
||||
return s.replace("<ROOT>", root_dir)
|
||||
|
||||
|
||||
def expand_errors(input: list[str], output: list[str], fnam: str) -> None:
|
||||
"""Transform comments such as '# E: message' or
|
||||
'# E:3: message' in input.
|
||||
|
||||
The result is lines like 'fnam:line: error: message'.
|
||||
"""
|
||||
|
||||
for i in range(len(input)):
|
||||
# The first in the split things isn't a comment
|
||||
for possible_err_comment in input[i].split(" # ")[1:]:
|
||||
m = re.search(
|
||||
r"^([ENW]):((?P<col>\d+):)? (?P<message>.*)$", possible_err_comment.strip()
|
||||
)
|
||||
if m:
|
||||
if m.group(1) == "E":
|
||||
severity = "error"
|
||||
elif m.group(1) == "N":
|
||||
severity = "note"
|
||||
elif m.group(1) == "W":
|
||||
severity = "warning"
|
||||
col = m.group("col")
|
||||
message = m.group("message")
|
||||
message = message.replace("\\#", "#") # adds back escaped # character
|
||||
if col is None:
|
||||
output.append(f"{fnam}:{i + 1}: {severity}: {message}")
|
||||
else:
|
||||
output.append(f"{fnam}:{i + 1}:{col}: {severity}: {message}")
|
||||
|
||||
|
||||
def fix_win_path(line: str) -> str:
|
||||
r"""Changes Windows paths to Linux paths in error messages.
|
||||
|
||||
E.g. foo\bar.py -> foo/bar.py.
|
||||
"""
|
||||
line = line.replace(root_dir, root_dir.replace("\\", "/"))
|
||||
m = re.match(r"^([\S/]+):(\d+:)?(\s+.*)", line)
|
||||
if not m:
|
||||
return line
|
||||
else:
|
||||
filename, lineno, message = m.groups()
|
||||
return "{}:{}{}".format(filename.replace("\\", "/"), lineno or "", message)
|
||||
|
||||
|
||||
def fix_cobertura_filename(line: str) -> str:
|
||||
r"""Changes filename paths to Linux paths in Cobertura output files.
|
||||
|
||||
E.g. filename="pkg\subpkg\a.py" -> filename="pkg/subpkg/a.py".
|
||||
"""
|
||||
m = re.search(r'<class .* filename="(?P<filename>.*?)"', line)
|
||||
if not m:
|
||||
return line
|
||||
return "{}{}{}".format(
|
||||
line[: m.start(1)], m.group("filename").replace("\\", "/"), line[m.end(1) :]
|
||||
)
|
||||
|
||||
|
||||
##
|
||||
#
|
||||
# pytest setup
|
||||
#
|
||||
##
|
||||
|
||||
|
||||
# This function name is special to pytest. See
|
||||
# https://docs.pytest.org/en/latest/reference.html#initialization-hooks
|
||||
def pytest_addoption(parser: Any) -> None:
|
||||
group = parser.getgroup("mypy")
|
||||
group.addoption(
|
||||
"--update-data",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Update test data to reflect actual output (supported only for certain tests)",
|
||||
)
|
||||
group.addoption(
|
||||
"--save-failures-to",
|
||||
default=None,
|
||||
help="Copy the temp directories from failing tests to a target directory",
|
||||
)
|
||||
group.addoption(
|
||||
"--mypy-verbose", action="count", help="Set the verbose flag when creating mypy Options"
|
||||
)
|
||||
group.addoption(
|
||||
"--mypyc-showc",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Display C code on mypyc test failures",
|
||||
)
|
||||
group.addoption(
|
||||
"--mypyc-debug",
|
||||
default=None,
|
||||
dest="debugger",
|
||||
choices=SUPPORTED_DEBUGGERS,
|
||||
help="Run the first mypyc run test with the specified debugger",
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config: pytest.Config) -> None:
|
||||
if config.getoption("--update-data") and config.getoption("--numprocesses", default=1) > 1:
|
||||
raise pytest.UsageError(
|
||||
"--update-data incompatible with parallelized tests; re-run with -n 1"
|
||||
)
|
||||
|
||||
|
||||
# This function name is special to pytest. See
|
||||
# https://doc.pytest.org/en/latest/how-to/writing_plugins.html#collection-hooks
|
||||
def pytest_pycollect_makeitem(collector: Any, name: str, obj: object) -> Any | None:
|
||||
"""Called by pytest on each object in modules configured in conftest.py files.
|
||||
|
||||
collector is pytest.Collector, returns Optional[pytest.Class]
|
||||
"""
|
||||
if isinstance(obj, type):
|
||||
# Only classes derived from DataSuite contain test cases, not the DataSuite class itself
|
||||
if issubclass(obj, DataSuite) and obj is not DataSuite:
|
||||
# Non-None result means this obj is a test case.
|
||||
# The collect method of the returned DataSuiteCollector instance will be called later,
|
||||
# with self.obj being obj.
|
||||
return DataSuiteCollector.from_parent( # type: ignore[no-untyped-call]
|
||||
parent=collector, name=name
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
_case_name_pattern = re.compile(
|
||||
r"(?P<name>[a-zA-Z_0-9]+)"
|
||||
r"(?P<writescache>-writescache)?"
|
||||
r"(?P<only_when>-only_when_cache|-only_when_nocache)?"
|
||||
r"(?P<skip_path_normalization>-skip_path_normalization)?"
|
||||
r"(-(?P<platform>posix|windows))?"
|
||||
r"(?P<skip>-skip)?"
|
||||
r"(?P<xfail>-xfail)?"
|
||||
)
|
||||
|
||||
|
||||
def split_test_cases(
|
||||
parent: DataFileCollector, suite: DataSuite, file: str
|
||||
) -> Iterator[DataDrivenTestCase]:
|
||||
"""Iterate over raw test cases in file, at collection time, ignoring sub items.
|
||||
|
||||
The collection phase is slow, so any heavy processing should be deferred to after
|
||||
uninteresting tests are filtered (when using -k PATTERN switch).
|
||||
"""
|
||||
with open(file, encoding="utf-8") as f:
|
||||
data = f.read()
|
||||
cases = re.split(r"^\[case ([^]+)]+)\][ \t]*$\n", data, flags=re.DOTALL | re.MULTILINE)
|
||||
cases_iter = iter(cases)
|
||||
line_no = next(cases_iter).count("\n") + 1
|
||||
test_names = set()
|
||||
for case_id in cases_iter:
|
||||
data = next(cases_iter)
|
||||
|
||||
m = _case_name_pattern.fullmatch(case_id)
|
||||
if not m:
|
||||
raise RuntimeError(f"Invalid testcase id {case_id!r}")
|
||||
name = m.group("name")
|
||||
if name in test_names:
|
||||
raise RuntimeError(
|
||||
'Found a duplicate test name "{}" in {} on line {}'.format(
|
||||
name, parent.name, line_no
|
||||
)
|
||||
)
|
||||
yield DataDrivenTestCase.from_parent(
|
||||
parent=parent,
|
||||
suite=suite,
|
||||
file=file,
|
||||
name=add_test_name_suffix(name, suite.test_name_suffix),
|
||||
writescache=bool(m.group("writescache")),
|
||||
only_when=m.group("only_when"),
|
||||
platform=m.group("platform"),
|
||||
skip=bool(m.group("skip")),
|
||||
xfail=bool(m.group("xfail")),
|
||||
normalize_output=not m.group("skip_path_normalization"),
|
||||
data=data,
|
||||
line=line_no,
|
||||
)
|
||||
line_no += data.count("\n") + 1
|
||||
|
||||
# Record existing tests to prevent duplicates:
|
||||
test_names.update({name})
|
||||
|
||||
|
||||
class DataSuiteCollector(pytest.Class):
|
||||
def collect(self) -> Iterator[DataFileCollector]:
|
||||
"""Called by pytest on each of the object returned from pytest_pycollect_makeitem"""
|
||||
|
||||
# obj is the object for which pytest_pycollect_makeitem returned self.
|
||||
suite: DataSuite = self.obj
|
||||
|
||||
assert os.path.isdir(
|
||||
suite.data_prefix
|
||||
), f"Test data prefix ({suite.data_prefix}) not set correctly"
|
||||
|
||||
for data_file in suite.files:
|
||||
yield DataFileCollector.from_parent(parent=self, name=data_file)
|
||||
|
||||
|
||||
class DataFileFix(NamedTuple):
|
||||
lineno: int # 1-offset, inclusive
|
||||
end_lineno: int # 1-offset, exclusive
|
||||
lines: list[str]
|
||||
|
||||
|
||||
class DataFileCollector(pytest.Collector):
|
||||
"""Represents a single `.test` data driven test file.
|
||||
|
||||
More context: https://github.com/python/mypy/issues/11662
|
||||
"""
|
||||
|
||||
parent: DataSuiteCollector
|
||||
|
||||
_fixes: list[DataFileFix]
|
||||
|
||||
@classmethod # We have to fight with pytest here:
|
||||
def from_parent(
|
||||
cls, parent: DataSuiteCollector, *, name: str # type: ignore[override]
|
||||
) -> DataFileCollector:
|
||||
collector = super().from_parent(parent, name=name)
|
||||
assert isinstance(collector, DataFileCollector)
|
||||
return collector
|
||||
|
||||
def collect(self) -> Iterator[DataDrivenTestCase]:
|
||||
yield from split_test_cases(
|
||||
parent=self,
|
||||
suite=self.parent.obj,
|
||||
file=os.path.join(self.parent.obj.data_prefix, self.name),
|
||||
)
|
||||
|
||||
def setup(self) -> None:
|
||||
super().setup()
|
||||
self._fixes = []
|
||||
|
||||
def teardown(self) -> None:
|
||||
super().teardown()
|
||||
self._apply_fixes()
|
||||
|
||||
def enqueue_fix(self, fix: DataFileFix) -> None:
|
||||
self._fixes.append(fix)
|
||||
|
||||
def _apply_fixes(self) -> None:
|
||||
if not self._fixes:
|
||||
return
|
||||
data_path = Path(self.parent.obj.data_prefix) / self.name
|
||||
lines = data_path.read_text().split("\n")
|
||||
# start from end to prevent line offsets from shifting as we update
|
||||
for fix in sorted(self._fixes, reverse=True):
|
||||
lines[fix.lineno - 1 : fix.end_lineno - 1] = fix.lines
|
||||
data_path.write_text("\n".join(lines))
|
||||
|
||||
|
||||
def add_test_name_suffix(name: str, suffix: str) -> str:
|
||||
# Find magic suffix of form "-foobar" (used for things like "-skip").
|
||||
m = re.search(r"-[-A-Za-z0-9]+$", name)
|
||||
if m:
|
||||
# Insert suite-specific test name suffix before the magic suffix
|
||||
# which must be the last thing in the test case name since we
|
||||
# are using endswith() checks.
|
||||
magic_suffix = m.group(0)
|
||||
return name[: -len(magic_suffix)] + suffix + magic_suffix
|
||||
else:
|
||||
return name + suffix
|
||||
|
||||
|
||||
def is_incremental(testcase: DataDrivenTestCase) -> bool:
|
||||
return "incremental" in testcase.name.lower() or "incremental" in testcase.file
|
||||
|
||||
|
||||
def has_stable_flags(testcase: DataDrivenTestCase) -> bool:
|
||||
if any(re.match(r"# flags[2-9]:", line) for line in testcase.input):
|
||||
return False
|
||||
for filename, contents in testcase.files:
|
||||
if os.path.basename(filename).startswith("mypy.ini."):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class DataSuite:
|
||||
# option fields - class variables
|
||||
files: list[str]
|
||||
|
||||
base_path = test_temp_dir
|
||||
|
||||
# Allow external users of the test code to override the data prefix
|
||||
data_prefix = test_data_prefix
|
||||
|
||||
required_out_section = False
|
||||
|
||||
native_sep = False
|
||||
|
||||
# Name suffix automatically added to each test case in the suite (can be
|
||||
# used to distinguish test cases in suites that share data files)
|
||||
test_name_suffix = ""
|
||||
|
||||
def setup(self) -> None:
|
||||
"""Setup fixtures (ad-hoc)"""
|
||||
|
||||
@abstractmethod
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
raise NotImplementedError
|
||||
463
venv/lib/python3.12/site-packages/mypy/test/helpers.py
Normal file
463
venv/lib/python3.12/site-packages/mypy/test/helpers.py
Normal file
@@ -0,0 +1,463 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from typing import Any, Callable, Iterable, Iterator, Pattern
|
||||
|
||||
# Exporting Suite as alias to TestCase for backwards compatibility
|
||||
# TODO: avoid aliasing - import and subclass TestCase directly
|
||||
from unittest import TestCase
|
||||
|
||||
Suite = TestCase # re-exporting
|
||||
|
||||
import pytest
|
||||
|
||||
import mypy.api as api
|
||||
import mypy.version
|
||||
from mypy import defaults
|
||||
from mypy.main import process_options
|
||||
from mypy.options import Options
|
||||
from mypy.test.config import test_data_prefix, test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DeleteFile, UpdateFile, fix_cobertura_filename
|
||||
|
||||
skip = pytest.mark.skip
|
||||
|
||||
# AssertStringArraysEqual displays special line alignment helper messages if
|
||||
# the first different line has at least this many characters,
|
||||
MIN_LINE_LENGTH_FOR_ALIGNMENT = 5
|
||||
|
||||
|
||||
def run_mypy(args: list[str]) -> None:
|
||||
__tracebackhide__ = True
|
||||
# We must enable site packages even though they could cause problems,
|
||||
# since stubs for typing_extensions live there.
|
||||
outval, errval, status = api.run(args + ["--show-traceback", "--no-silence-site-packages"])
|
||||
if status != 0:
|
||||
sys.stdout.write(outval)
|
||||
sys.stderr.write(errval)
|
||||
pytest.fail(msg="Sample check failed", pytrace=False)
|
||||
|
||||
|
||||
def assert_string_arrays_equal(expected: list[str], actual: list[str], msg: str) -> None:
|
||||
"""Assert that two string arrays are equal.
|
||||
|
||||
Display any differences in a human-readable form.
|
||||
"""
|
||||
actual = clean_up(actual)
|
||||
if actual != expected:
|
||||
num_skip_start = num_skipped_prefix_lines(expected, actual)
|
||||
num_skip_end = num_skipped_suffix_lines(expected, actual)
|
||||
|
||||
sys.stderr.write("Expected:\n")
|
||||
|
||||
# If omit some lines at the beginning, indicate it by displaying a line
|
||||
# with '...'.
|
||||
if num_skip_start > 0:
|
||||
sys.stderr.write(" ...\n")
|
||||
|
||||
# Keep track of the first different line.
|
||||
first_diff = -1
|
||||
|
||||
# Display only this many first characters of identical lines.
|
||||
width = 75
|
||||
|
||||
for i in range(num_skip_start, len(expected) - num_skip_end):
|
||||
if i >= len(actual) or expected[i] != actual[i]:
|
||||
if first_diff < 0:
|
||||
first_diff = i
|
||||
sys.stderr.write(f" {expected[i]:<45} (diff)")
|
||||
else:
|
||||
e = expected[i]
|
||||
sys.stderr.write(" " + e[:width])
|
||||
if len(e) > width:
|
||||
sys.stderr.write("...")
|
||||
sys.stderr.write("\n")
|
||||
if num_skip_end > 0:
|
||||
sys.stderr.write(" ...\n")
|
||||
|
||||
sys.stderr.write("Actual:\n")
|
||||
|
||||
if num_skip_start > 0:
|
||||
sys.stderr.write(" ...\n")
|
||||
|
||||
for j in range(num_skip_start, len(actual) - num_skip_end):
|
||||
if j >= len(expected) or expected[j] != actual[j]:
|
||||
sys.stderr.write(f" {actual[j]:<45} (diff)")
|
||||
else:
|
||||
a = actual[j]
|
||||
sys.stderr.write(" " + a[:width])
|
||||
if len(a) > width:
|
||||
sys.stderr.write("...")
|
||||
sys.stderr.write("\n")
|
||||
if not actual:
|
||||
sys.stderr.write(" (empty)\n")
|
||||
if num_skip_end > 0:
|
||||
sys.stderr.write(" ...\n")
|
||||
|
||||
sys.stderr.write("\n")
|
||||
|
||||
if 0 <= first_diff < len(actual) and (
|
||||
len(expected[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT
|
||||
or len(actual[first_diff]) >= MIN_LINE_LENGTH_FOR_ALIGNMENT
|
||||
):
|
||||
# Display message that helps visualize the differences between two
|
||||
# long lines.
|
||||
show_align_message(expected[first_diff], actual[first_diff])
|
||||
|
||||
pytest.fail(msg, pytrace=False)
|
||||
|
||||
|
||||
def assert_module_equivalence(name: str, expected: Iterable[str], actual: Iterable[str]) -> None:
|
||||
expected_normalized = sorted(expected)
|
||||
actual_normalized = sorted(set(actual).difference({"__main__"}))
|
||||
assert_string_arrays_equal(
|
||||
expected_normalized,
|
||||
actual_normalized,
|
||||
("Actual modules ({}) do not match expected modules ({}) " 'for "[{} ...]"').format(
|
||||
", ".join(actual_normalized), ", ".join(expected_normalized), name
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def assert_target_equivalence(name: str, expected: list[str], actual: list[str]) -> None:
|
||||
"""Compare actual and expected targets (order sensitive)."""
|
||||
assert_string_arrays_equal(
|
||||
expected,
|
||||
actual,
|
||||
("Actual targets ({}) do not match expected targets ({}) " 'for "[{} ...]"').format(
|
||||
", ".join(actual), ", ".join(expected), name
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def show_align_message(s1: str, s2: str) -> None:
|
||||
"""Align s1 and s2 so that the their first difference is highlighted.
|
||||
|
||||
For example, if s1 is 'foobar' and s2 is 'fobar', display the
|
||||
following lines:
|
||||
|
||||
E: foobar
|
||||
A: fobar
|
||||
^
|
||||
|
||||
If s1 and s2 are long, only display a fragment of the strings around the
|
||||
first difference. If s1 is very short, do nothing.
|
||||
"""
|
||||
|
||||
# Seeing what went wrong is trivial even without alignment if the expected
|
||||
# string is very short. In this case do nothing to simplify output.
|
||||
if len(s1) < 4:
|
||||
return
|
||||
|
||||
maxw = 72 # Maximum number of characters shown
|
||||
|
||||
sys.stderr.write("Alignment of first line difference:\n")
|
||||
|
||||
trunc = False
|
||||
while s1[:30] == s2[:30]:
|
||||
s1 = s1[10:]
|
||||
s2 = s2[10:]
|
||||
trunc = True
|
||||
|
||||
if trunc:
|
||||
s1 = "..." + s1
|
||||
s2 = "..." + s2
|
||||
|
||||
max_len = max(len(s1), len(s2))
|
||||
extra = ""
|
||||
if max_len > maxw:
|
||||
extra = "..."
|
||||
|
||||
# Write a chunk of both lines, aligned.
|
||||
sys.stderr.write(f" E: {s1[:maxw]}{extra}\n")
|
||||
sys.stderr.write(f" A: {s2[:maxw]}{extra}\n")
|
||||
# Write an indicator character under the different columns.
|
||||
sys.stderr.write(" ")
|
||||
for j in range(min(maxw, max(len(s1), len(s2)))):
|
||||
if s1[j : j + 1] != s2[j : j + 1]:
|
||||
sys.stderr.write("^") # Difference
|
||||
break
|
||||
else:
|
||||
sys.stderr.write(" ") # Equal
|
||||
sys.stderr.write("\n")
|
||||
|
||||
|
||||
def clean_up(a: list[str]) -> list[str]:
|
||||
"""Remove common directory prefix from all strings in a.
|
||||
|
||||
This uses a naive string replace; it seems to work well enough. Also
|
||||
remove trailing carriage returns.
|
||||
"""
|
||||
res = []
|
||||
pwd = os.getcwd()
|
||||
driver = pwd + "/driver.py"
|
||||
for s in a:
|
||||
prefix = os.sep
|
||||
ss = s
|
||||
for p in prefix, prefix.replace(os.sep, "/"):
|
||||
if p != "/" and p != "//" and p != "\\" and p != "\\\\":
|
||||
ss = ss.replace(p, "")
|
||||
# Ignore spaces at end of line.
|
||||
ss = re.sub(" +$", "", ss)
|
||||
# Remove pwd from driver.py's path
|
||||
ss = ss.replace(driver, "driver.py")
|
||||
res.append(re.sub("\\r$", "", ss))
|
||||
return res
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def local_sys_path_set() -> Iterator[None]:
|
||||
"""Temporary insert current directory into sys.path.
|
||||
|
||||
This can be used by test cases that do runtime imports, for example
|
||||
by the stubgen tests.
|
||||
"""
|
||||
old_sys_path = sys.path.copy()
|
||||
if not ("" in sys.path or "." in sys.path):
|
||||
sys.path.insert(0, "")
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
sys.path = old_sys_path
|
||||
|
||||
|
||||
def num_skipped_prefix_lines(a1: list[str], a2: list[str]) -> int:
|
||||
num_eq = 0
|
||||
while num_eq < min(len(a1), len(a2)) and a1[num_eq] == a2[num_eq]:
|
||||
num_eq += 1
|
||||
return max(0, num_eq - 4)
|
||||
|
||||
|
||||
def num_skipped_suffix_lines(a1: list[str], a2: list[str]) -> int:
|
||||
num_eq = 0
|
||||
while num_eq < min(len(a1), len(a2)) and a1[-num_eq - 1] == a2[-num_eq - 1]:
|
||||
num_eq += 1
|
||||
return max(0, num_eq - 4)
|
||||
|
||||
|
||||
def testfile_pyversion(path: str) -> tuple[int, int]:
|
||||
if path.endswith("python312.test"):
|
||||
return 3, 12
|
||||
elif path.endswith("python311.test"):
|
||||
return 3, 11
|
||||
elif path.endswith("python310.test"):
|
||||
return 3, 10
|
||||
elif path.endswith("python39.test"):
|
||||
return 3, 9
|
||||
elif path.endswith("python38.test"):
|
||||
return 3, 8
|
||||
else:
|
||||
return defaults.PYTHON3_VERSION
|
||||
|
||||
|
||||
def normalize_error_messages(messages: list[str]) -> list[str]:
|
||||
"""Translate an array of error messages to use / as path separator."""
|
||||
|
||||
a = []
|
||||
for m in messages:
|
||||
a.append(m.replace(os.sep, "/"))
|
||||
return a
|
||||
|
||||
|
||||
def retry_on_error(func: Callable[[], Any], max_wait: float = 1.0) -> None:
|
||||
"""Retry callback with exponential backoff when it raises OSError.
|
||||
|
||||
If the function still generates an error after max_wait seconds, propagate
|
||||
the exception.
|
||||
|
||||
This can be effective against random file system operation failures on
|
||||
Windows.
|
||||
"""
|
||||
t0 = time.time()
|
||||
wait_time = 0.01
|
||||
while True:
|
||||
try:
|
||||
func()
|
||||
return
|
||||
except OSError:
|
||||
wait_time = min(wait_time * 2, t0 + max_wait - time.time())
|
||||
if wait_time <= 0.01:
|
||||
# Done enough waiting, the error seems persistent.
|
||||
raise
|
||||
time.sleep(wait_time)
|
||||
|
||||
|
||||
def good_repr(obj: object) -> str:
|
||||
if isinstance(obj, str):
|
||||
if obj.count("\n") > 1:
|
||||
bits = ["'''\\"]
|
||||
for line in obj.split("\n"):
|
||||
# force repr to use ' not ", then cut it off
|
||||
bits.append(repr('"' + line)[2:-1])
|
||||
bits[-1] += "'''"
|
||||
return "\n".join(bits)
|
||||
return repr(obj)
|
||||
|
||||
|
||||
def assert_equal(a: object, b: object, fmt: str = "{} != {}") -> None:
|
||||
__tracebackhide__ = True
|
||||
if a != b:
|
||||
raise AssertionError(fmt.format(good_repr(a), good_repr(b)))
|
||||
|
||||
|
||||
def typename(t: type) -> str:
|
||||
if "." in str(t):
|
||||
return str(t).split(".")[-1].rstrip("'>")
|
||||
else:
|
||||
return str(t)[8:-2]
|
||||
|
||||
|
||||
def assert_type(typ: type, value: object) -> None:
|
||||
__tracebackhide__ = True
|
||||
if type(value) != typ:
|
||||
raise AssertionError(f"Invalid type {typename(type(value))}, expected {typename(typ)}")
|
||||
|
||||
|
||||
def parse_options(
|
||||
program_text: str, testcase: DataDrivenTestCase, incremental_step: int
|
||||
) -> Options:
|
||||
"""Parse comments like '# flags: --foo' in a test case."""
|
||||
options = Options()
|
||||
flags = re.search("# flags: (.*)$", program_text, flags=re.MULTILINE)
|
||||
if incremental_step > 1:
|
||||
flags2 = re.search(f"# flags{incremental_step}: (.*)$", program_text, flags=re.MULTILINE)
|
||||
if flags2:
|
||||
flags = flags2
|
||||
|
||||
if flags:
|
||||
flag_list = flags.group(1).split()
|
||||
flag_list.append("--no-site-packages") # the tests shouldn't need an installed Python
|
||||
targets, options = process_options(flag_list, require_targets=False)
|
||||
if targets:
|
||||
# TODO: support specifying targets via the flags pragma
|
||||
raise RuntimeError("Specifying targets via the flags pragma is not supported.")
|
||||
if "--show-error-codes" not in flag_list:
|
||||
options.hide_error_codes = True
|
||||
else:
|
||||
flag_list = []
|
||||
options = Options()
|
||||
options.error_summary = False
|
||||
options.hide_error_codes = True
|
||||
options.force_uppercase_builtins = True
|
||||
options.force_union_syntax = True
|
||||
|
||||
# Allow custom python version to override testfile_pyversion.
|
||||
if all(flag.split("=")[0] != "--python-version" for flag in flag_list):
|
||||
options.python_version = testfile_pyversion(testcase.file)
|
||||
|
||||
if testcase.config.getoption("--mypy-verbose"):
|
||||
options.verbosity = testcase.config.getoption("--mypy-verbose")
|
||||
|
||||
return options
|
||||
|
||||
|
||||
def split_lines(*streams: bytes) -> list[str]:
|
||||
"""Returns a single list of string lines from the byte streams in args."""
|
||||
return [s for stream in streams for s in stream.decode("utf8").splitlines()]
|
||||
|
||||
|
||||
def write_and_fudge_mtime(content: str, target_path: str) -> None:
|
||||
# In some systems, mtime has a resolution of 1 second which can
|
||||
# cause annoying-to-debug issues when a file has the same size
|
||||
# after a change. We manually set the mtime to circumvent this.
|
||||
# Note that we increment the old file's mtime, which guarantees a
|
||||
# different value, rather than incrementing the mtime after the
|
||||
# copy, which could leave the mtime unchanged if the old file had
|
||||
# a similarly fudged mtime.
|
||||
new_time = None
|
||||
if os.path.isfile(target_path):
|
||||
new_time = os.stat(target_path).st_mtime + 1
|
||||
|
||||
dir = os.path.dirname(target_path)
|
||||
os.makedirs(dir, exist_ok=True)
|
||||
with open(target_path, "w", encoding="utf-8") as target:
|
||||
target.write(content)
|
||||
|
||||
if new_time:
|
||||
os.utime(target_path, times=(new_time, new_time))
|
||||
|
||||
|
||||
def perform_file_operations(operations: list[UpdateFile | DeleteFile]) -> None:
|
||||
for op in operations:
|
||||
if isinstance(op, UpdateFile):
|
||||
# Modify/create file
|
||||
write_and_fudge_mtime(op.content, op.target_path)
|
||||
else:
|
||||
# Delete file/directory
|
||||
if os.path.isdir(op.path):
|
||||
# Sanity check to avoid unexpected deletions
|
||||
assert op.path.startswith("tmp")
|
||||
shutil.rmtree(op.path)
|
||||
else:
|
||||
# Use retries to work around potential flakiness on Windows (AppVeyor).
|
||||
path = op.path
|
||||
retry_on_error(lambda: os.remove(path))
|
||||
|
||||
|
||||
def check_test_output_files(
|
||||
testcase: DataDrivenTestCase, step: int, strip_prefix: str = ""
|
||||
) -> None:
|
||||
for path, expected_content in testcase.output_files:
|
||||
if path.startswith(strip_prefix):
|
||||
path = path[len(strip_prefix) :]
|
||||
if not os.path.exists(path):
|
||||
raise AssertionError(
|
||||
"Expected file {} was not produced by test case{}".format(
|
||||
path, " on step %d" % step if testcase.output2 else ""
|
||||
)
|
||||
)
|
||||
with open(path, encoding="utf8") as output_file:
|
||||
actual_output_content = output_file.read()
|
||||
|
||||
if isinstance(expected_content, Pattern):
|
||||
if expected_content.fullmatch(actual_output_content) is not None:
|
||||
continue
|
||||
raise AssertionError(
|
||||
"Output file {} did not match its expected output pattern\n---\n{}\n---".format(
|
||||
path, actual_output_content
|
||||
)
|
||||
)
|
||||
|
||||
normalized_output = normalize_file_output(
|
||||
actual_output_content.splitlines(), os.path.abspath(test_temp_dir)
|
||||
)
|
||||
# We always normalize things like timestamp, but only handle operating-system
|
||||
# specific things if requested.
|
||||
if testcase.normalize_output:
|
||||
if testcase.suite.native_sep and os.path.sep == "\\":
|
||||
normalized_output = [fix_cobertura_filename(line) for line in normalized_output]
|
||||
normalized_output = normalize_error_messages(normalized_output)
|
||||
assert_string_arrays_equal(
|
||||
expected_content.splitlines(),
|
||||
normalized_output,
|
||||
"Output file {} did not match its expected output{}".format(
|
||||
path, " on step %d" % step if testcase.output2 else ""
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def normalize_file_output(content: list[str], current_abs_path: str) -> list[str]:
|
||||
"""Normalize file output for comparison."""
|
||||
timestamp_regex = re.compile(r"\d{10}")
|
||||
result = [x.replace(current_abs_path, "$PWD") for x in content]
|
||||
version = mypy.version.__version__
|
||||
result = [re.sub(r"\b" + re.escape(version) + r"\b", "$VERSION", x) for x in result]
|
||||
# We generate a new mypy.version when building mypy wheels that
|
||||
# lacks base_version, so handle that case.
|
||||
base_version = getattr(mypy.version, "base_version", version)
|
||||
result = [re.sub(r"\b" + re.escape(base_version) + r"\b", "$VERSION", x) for x in result]
|
||||
result = [timestamp_regex.sub("$TIMESTAMP", x) for x in result]
|
||||
return result
|
||||
|
||||
|
||||
def find_test_files(pattern: str, exclude: list[str] | None = None) -> list[str]:
|
||||
return [
|
||||
path.name
|
||||
for path in (pathlib.Path(test_data_prefix).rglob(pattern))
|
||||
if path.name not in (exclude or [])
|
||||
]
|
||||
@@ -0,0 +1,103 @@
|
||||
"""
|
||||
A "meta test" which tests the parsing of .test files. This is not meant to become exhaustive
|
||||
but to ensure we maintain a basic level of ergonomics for mypy contributors.
|
||||
"""
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from mypy.test.config import test_data_prefix
|
||||
from mypy.test.helpers import Suite
|
||||
|
||||
|
||||
class ParseTestDataSuite(Suite):
|
||||
def _dedent(self, s: str) -> str:
|
||||
return textwrap.dedent(s).lstrip()
|
||||
|
||||
def _run_pytest(self, data_suite: str) -> str:
|
||||
p_test_data = Path(test_data_prefix)
|
||||
p_root = p_test_data.parent.parent
|
||||
p = p_test_data / f"check-meta-{uuid.uuid4()}.test"
|
||||
assert not p.exists()
|
||||
try:
|
||||
p.write_text(data_suite)
|
||||
test_nodeid = f"mypy/test/testcheck.py::TypeCheckSuite::{p.name}"
|
||||
args = [sys.executable, "-m", "pytest", "-n", "0", "-s", test_nodeid]
|
||||
proc = subprocess.run(args, cwd=p_root, capture_output=True, check=False)
|
||||
return proc.stdout.decode()
|
||||
finally:
|
||||
p.unlink()
|
||||
|
||||
def test_parse_invalid_case(self) -> None:
|
||||
# Arrange
|
||||
data = self._dedent(
|
||||
"""
|
||||
[case abc]
|
||||
s: str
|
||||
[case foo-XFAIL]
|
||||
s: str
|
||||
"""
|
||||
)
|
||||
|
||||
# Act
|
||||
actual = self._run_pytest(data)
|
||||
|
||||
# Assert
|
||||
assert "Invalid testcase id 'foo-XFAIL'" in actual
|
||||
|
||||
def test_parse_invalid_section(self) -> None:
|
||||
# Arrange
|
||||
data = self._dedent(
|
||||
"""
|
||||
[case abc]
|
||||
s: str
|
||||
[unknownsection]
|
||||
abc
|
||||
"""
|
||||
)
|
||||
|
||||
# Act
|
||||
actual = self._run_pytest(data)
|
||||
|
||||
# Assert
|
||||
expected_lineno = data.splitlines().index("[unknownsection]") + 1
|
||||
expected = (
|
||||
f".test:{expected_lineno}: Invalid section header [unknownsection] in case 'abc'"
|
||||
)
|
||||
assert expected in actual
|
||||
|
||||
def test_bad_ge_version_check(self) -> None:
|
||||
# Arrange
|
||||
data = self._dedent(
|
||||
"""
|
||||
[case abc]
|
||||
s: str
|
||||
[out version>=3.8]
|
||||
abc
|
||||
"""
|
||||
)
|
||||
|
||||
# Act
|
||||
actual = self._run_pytest(data)
|
||||
|
||||
# Assert
|
||||
assert "version>=3.8 always true since minimum runtime version is (3, 8)" in actual
|
||||
|
||||
def test_bad_eq_version_check(self) -> None:
|
||||
# Arrange
|
||||
data = self._dedent(
|
||||
"""
|
||||
[case abc]
|
||||
s: str
|
||||
[out version==3.7]
|
||||
abc
|
||||
"""
|
||||
)
|
||||
|
||||
# Act
|
||||
actual = self._run_pytest(data)
|
||||
|
||||
# Assert
|
||||
assert "version==3.7 always false since minimum runtime version is (3, 8)" in actual
|
||||
@@ -0,0 +1,157 @@
|
||||
"""
|
||||
A "meta test" which tests the `--update-data` feature for updating .test files.
|
||||
Updating the expected output, especially when it's in the form of inline (comment) assertions,
|
||||
can be brittle, which is why we're "meta-testing" here.
|
||||
"""
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import textwrap
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
from mypy.test.config import test_data_prefix
|
||||
from mypy.test.helpers import Suite
|
||||
|
||||
|
||||
class UpdateDataSuite(Suite):
|
||||
def _run_pytest_update_data(self, data_suite: str, *, max_attempts: int) -> str:
|
||||
"""
|
||||
Runs a suite of data test cases through 'pytest --update-data' until either tests pass
|
||||
or until a maximum number of attempts (needed for incremental tests).
|
||||
"""
|
||||
p_test_data = Path(test_data_prefix)
|
||||
p_root = p_test_data.parent.parent
|
||||
p = p_test_data / f"check-meta-{uuid.uuid4()}.test"
|
||||
assert not p.exists()
|
||||
try:
|
||||
p.write_text(textwrap.dedent(data_suite).lstrip())
|
||||
|
||||
test_nodeid = f"mypy/test/testcheck.py::TypeCheckSuite::{p.name}"
|
||||
args = [sys.executable, "-m", "pytest", "-n", "0", "-s", "--update-data", test_nodeid]
|
||||
cmd = shlex.join(args)
|
||||
for i in range(max_attempts - 1, -1, -1):
|
||||
res = subprocess.run(args, cwd=p_root)
|
||||
if res.returncode == 0:
|
||||
break
|
||||
print(f"`{cmd}` returned {res.returncode}: {i} attempts remaining")
|
||||
|
||||
return p.read_text()
|
||||
finally:
|
||||
p.unlink()
|
||||
|
||||
def test_update_data(self) -> None:
|
||||
# Note: We test multiple testcases rather than 'test case per test case'
|
||||
# so we could also exercise rewriting multiple testcases at once.
|
||||
actual = self._run_pytest_update_data(
|
||||
"""
|
||||
[case testCorrect]
|
||||
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testWrong]
|
||||
s: str = 42 # E: wrong error
|
||||
|
||||
[case testXfail-xfail]
|
||||
s: str = 42 # E: wrong error
|
||||
|
||||
[case testWrongMultiline]
|
||||
s: str = 42 # E: foo \
|
||||
# N: bar
|
||||
|
||||
[case testMissingMultiline]
|
||||
s: str = 42; i: int = 'foo'
|
||||
|
||||
[case testExtraneous]
|
||||
s: str = 'foo' # E: wrong error
|
||||
|
||||
[case testExtraneousMultiline]
|
||||
s: str = 'foo' # E: foo \
|
||||
# E: bar
|
||||
|
||||
[case testExtraneousMultilineNonError]
|
||||
s: str = 'foo' # W: foo \
|
||||
# N: bar
|
||||
|
||||
[case testOutCorrect]
|
||||
s: str = 42
|
||||
[out]
|
||||
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testOutWrong]
|
||||
s: str = 42
|
||||
[out]
|
||||
main:1: error: foobar
|
||||
|
||||
[case testOutWrongIncremental]
|
||||
s: str = 42
|
||||
[out]
|
||||
main:1: error: foobar
|
||||
[out2]
|
||||
main:1: error: foobar
|
||||
|
||||
[case testWrongMultipleFiles]
|
||||
import a, b
|
||||
s: str = 42 # E: foo
|
||||
[file a.py]
|
||||
s1: str = 42 # E: bar
|
||||
[file b.py]
|
||||
s2: str = 43 # E: baz
|
||||
[builtins fixtures/list.pyi]
|
||||
""",
|
||||
max_attempts=3,
|
||||
)
|
||||
|
||||
# Assert
|
||||
expected = """
|
||||
[case testCorrect]
|
||||
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testWrong]
|
||||
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testXfail-xfail]
|
||||
s: str = 42 # E: wrong error
|
||||
|
||||
[case testWrongMultiline]
|
||||
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testMissingMultiline]
|
||||
s: str = 42; i: int = 'foo' # E: Incompatible types in assignment (expression has type "int", variable has type "str") \\
|
||||
# E: Incompatible types in assignment (expression has type "str", variable has type "int")
|
||||
|
||||
[case testExtraneous]
|
||||
s: str = 'foo'
|
||||
|
||||
[case testExtraneousMultiline]
|
||||
s: str = 'foo'
|
||||
|
||||
[case testExtraneousMultilineNonError]
|
||||
s: str = 'foo'
|
||||
|
||||
[case testOutCorrect]
|
||||
s: str = 42
|
||||
[out]
|
||||
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testOutWrong]
|
||||
s: str = 42
|
||||
[out]
|
||||
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testOutWrongIncremental]
|
||||
s: str = 42
|
||||
[out]
|
||||
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
[out2]
|
||||
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
|
||||
[case testWrongMultipleFiles]
|
||||
import a, b
|
||||
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
[file a.py]
|
||||
s1: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
[file b.py]
|
||||
s2: str = 43 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
|
||||
[builtins fixtures/list.pyi]
|
||||
"""
|
||||
assert actual == textwrap.dedent(expected).lstrip()
|
||||
376
venv/lib/python3.12/site-packages/mypy/test/test_find_sources.py
Normal file
376
venv/lib/python3.12/site-packages/mypy/test/test_find_sources.py
Normal file
@@ -0,0 +1,376 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import pytest
|
||||
|
||||
from mypy.find_sources import InvalidSourceList, SourceFinder, create_source_list
|
||||
from mypy.fscache import FileSystemCache
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.options import Options
|
||||
|
||||
|
||||
class FakeFSCache(FileSystemCache):
|
||||
def __init__(self, files: set[str]) -> None:
|
||||
self.files = {os.path.abspath(f) for f in files}
|
||||
|
||||
def isfile(self, file: str) -> bool:
|
||||
return file in self.files
|
||||
|
||||
def isdir(self, dir: str) -> bool:
|
||||
if not dir.endswith(os.sep):
|
||||
dir += os.sep
|
||||
return any(f.startswith(dir) for f in self.files)
|
||||
|
||||
def listdir(self, dir: str) -> list[str]:
|
||||
if not dir.endswith(os.sep):
|
||||
dir += os.sep
|
||||
return list({f[len(dir) :].split(os.sep)[0] for f in self.files if f.startswith(dir)})
|
||||
|
||||
def init_under_package_root(self, file: str) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def normalise_path(path: str) -> str:
|
||||
path = os.path.splitdrive(path)[1]
|
||||
path = path.replace(os.sep, "/")
|
||||
return path
|
||||
|
||||
|
||||
def normalise_build_source_list(sources: list[BuildSource]) -> list[tuple[str, str | None]]:
|
||||
return sorted(
|
||||
(s.module, (normalise_path(s.base_dir) if s.base_dir is not None else None))
|
||||
for s in sources
|
||||
)
|
||||
|
||||
|
||||
def crawl(finder: SourceFinder, f: str) -> tuple[str, str]:
|
||||
module, base_dir = finder.crawl_up(f)
|
||||
return module, normalise_path(base_dir)
|
||||
|
||||
|
||||
def find_sources_in_dir(finder: SourceFinder, f: str) -> list[tuple[str, str | None]]:
|
||||
return normalise_build_source_list(finder.find_sources_in_dir(os.path.abspath(f)))
|
||||
|
||||
|
||||
def find_sources(
|
||||
paths: list[str], options: Options, fscache: FileSystemCache
|
||||
) -> list[tuple[str, str | None]]:
|
||||
paths = [os.path.abspath(p) for p in paths]
|
||||
return normalise_build_source_list(create_source_list(paths, options, fscache))
|
||||
|
||||
|
||||
class SourceFinderSuite(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.oldcwd = os.getcwd()
|
||||
os.chdir(self.tempdir)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
os.chdir(self.oldcwd)
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
def test_crawl_no_namespace(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = False
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/setup.py"}), options)
|
||||
assert crawl(finder, "/setup.py") == ("setup", "/")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/setup.py"}), options)
|
||||
assert crawl(finder, "/a/setup.py") == ("setup", "/a")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/setup.py"}), options)
|
||||
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/setup.py") == ("a.setup", "/")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/invalid-name/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/invalid-name/setup.py") == ("setup", "/a/invalid-name")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
|
||||
|
||||
finder = SourceFinder(
|
||||
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}), options
|
||||
)
|
||||
assert crawl(finder, "/a/b/c/setup.py") == ("c.setup", "/a/b")
|
||||
|
||||
def test_crawl_namespace(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/setup.py"}), options)
|
||||
assert crawl(finder, "/setup.py") == ("setup", "/")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/setup.py"}), options)
|
||||
assert crawl(finder, "/a/setup.py") == ("setup", "/a")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/setup.py"}), options)
|
||||
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/setup.py") == ("a.setup", "/")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/invalid-name/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/invalid-name/setup.py") == ("setup", "/a/invalid-name")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/b/setup.py") == ("a.b.setup", "/")
|
||||
|
||||
finder = SourceFinder(
|
||||
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}), options
|
||||
)
|
||||
assert crawl(finder, "/a/b/c/setup.py") == ("a.b.c.setup", "/")
|
||||
|
||||
def test_crawl_namespace_explicit_base(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
options.explicit_package_bases = True
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/setup.py"}), options)
|
||||
assert crawl(finder, "/setup.py") == ("setup", "/")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/setup.py"}), options)
|
||||
assert crawl(finder, "/a/setup.py") == ("setup", "/a")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/setup.py"}), options)
|
||||
assert crawl(finder, "/a/b/setup.py") == ("setup", "/a/b")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/setup.py") == ("a.setup", "/")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/invalid-name/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/invalid-name/setup.py") == ("setup", "/a/invalid-name")
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/setup.py", "/a/__init__.py"}), options)
|
||||
assert crawl(finder, "/a/b/setup.py") == ("a.b.setup", "/")
|
||||
|
||||
finder = SourceFinder(
|
||||
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}), options
|
||||
)
|
||||
assert crawl(finder, "/a/b/c/setup.py") == ("a.b.c.setup", "/")
|
||||
|
||||
# set mypy path, so we actually have some explicit base dirs
|
||||
options.mypy_path = ["/a/b"]
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/c/setup.py"}), options)
|
||||
assert crawl(finder, "/a/b/c/setup.py") == ("c.setup", "/a/b")
|
||||
|
||||
finder = SourceFinder(
|
||||
FakeFSCache({"/a/b/c/setup.py", "/a/__init__.py", "/a/b/c/__init__.py"}), options
|
||||
)
|
||||
assert crawl(finder, "/a/b/c/setup.py") == ("c.setup", "/a/b")
|
||||
|
||||
options.mypy_path = ["/a/b", "/a/b/c"]
|
||||
finder = SourceFinder(FakeFSCache({"/a/b/c/setup.py"}), options)
|
||||
assert crawl(finder, "/a/b/c/setup.py") == ("setup", "/a/b/c")
|
||||
|
||||
def test_crawl_namespace_multi_dir(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
options.explicit_package_bases = True
|
||||
options.mypy_path = ["/a", "/b"]
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/pkg/a.py", "/b/pkg/b.py"}), options)
|
||||
assert crawl(finder, "/a/pkg/a.py") == ("pkg.a", "/a")
|
||||
assert crawl(finder, "/b/pkg/b.py") == ("pkg.b", "/b")
|
||||
|
||||
def test_find_sources_in_dir_no_namespace(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = False
|
||||
|
||||
files = {
|
||||
"/pkg/a1/b/c/d/e.py",
|
||||
"/pkg/a1/b/f.py",
|
||||
"/pkg/a2/__init__.py",
|
||||
"/pkg/a2/b/c/d/e.py",
|
||||
"/pkg/a2/b/f.py",
|
||||
}
|
||||
finder = SourceFinder(FakeFSCache(files), options)
|
||||
assert find_sources_in_dir(finder, "/") == [
|
||||
("a2", "/pkg"),
|
||||
("e", "/pkg/a1/b/c/d"),
|
||||
("e", "/pkg/a2/b/c/d"),
|
||||
("f", "/pkg/a1/b"),
|
||||
("f", "/pkg/a2/b"),
|
||||
]
|
||||
|
||||
def test_find_sources_in_dir_namespace(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
|
||||
files = {
|
||||
"/pkg/a1/b/c/d/e.py",
|
||||
"/pkg/a1/b/f.py",
|
||||
"/pkg/a2/__init__.py",
|
||||
"/pkg/a2/b/c/d/e.py",
|
||||
"/pkg/a2/b/f.py",
|
||||
}
|
||||
finder = SourceFinder(FakeFSCache(files), options)
|
||||
assert find_sources_in_dir(finder, "/") == [
|
||||
("a2", "/pkg"),
|
||||
("a2.b.c.d.e", "/pkg"),
|
||||
("a2.b.f", "/pkg"),
|
||||
("e", "/pkg/a1/b/c/d"),
|
||||
("f", "/pkg/a1/b"),
|
||||
]
|
||||
|
||||
def test_find_sources_in_dir_namespace_explicit_base(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
options.explicit_package_bases = True
|
||||
options.mypy_path = ["/"]
|
||||
|
||||
files = {
|
||||
"/pkg/a1/b/c/d/e.py",
|
||||
"/pkg/a1/b/f.py",
|
||||
"/pkg/a2/__init__.py",
|
||||
"/pkg/a2/b/c/d/e.py",
|
||||
"/pkg/a2/b/f.py",
|
||||
}
|
||||
finder = SourceFinder(FakeFSCache(files), options)
|
||||
assert find_sources_in_dir(finder, "/") == [
|
||||
("pkg.a1.b.c.d.e", "/"),
|
||||
("pkg.a1.b.f", "/"),
|
||||
("pkg.a2", "/"),
|
||||
("pkg.a2.b.c.d.e", "/"),
|
||||
("pkg.a2.b.f", "/"),
|
||||
]
|
||||
|
||||
options.mypy_path = ["/pkg"]
|
||||
finder = SourceFinder(FakeFSCache(files), options)
|
||||
assert find_sources_in_dir(finder, "/") == [
|
||||
("a1.b.c.d.e", "/pkg"),
|
||||
("a1.b.f", "/pkg"),
|
||||
("a2", "/pkg"),
|
||||
("a2.b.c.d.e", "/pkg"),
|
||||
("a2.b.f", "/pkg"),
|
||||
]
|
||||
|
||||
def test_find_sources_in_dir_namespace_multi_dir(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
options.explicit_package_bases = True
|
||||
options.mypy_path = ["/a", "/b"]
|
||||
|
||||
finder = SourceFinder(FakeFSCache({"/a/pkg/a.py", "/b/pkg/b.py"}), options)
|
||||
assert find_sources_in_dir(finder, "/") == [("pkg.a", "/a"), ("pkg.b", "/b")]
|
||||
|
||||
def test_find_sources_exclude(self) -> None:
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
|
||||
# default
|
||||
for excluded_dir in ["site-packages", ".whatever", "node_modules", ".x/.z"]:
|
||||
fscache = FakeFSCache({"/dir/a.py", f"/dir/venv/{excluded_dir}/b.py"})
|
||||
assert find_sources(["/"], options, fscache) == [("a", "/dir")]
|
||||
with pytest.raises(InvalidSourceList):
|
||||
find_sources(["/dir/venv/"], options, fscache)
|
||||
assert find_sources([f"/dir/venv/{excluded_dir}"], options, fscache) == [
|
||||
("b", f"/dir/venv/{excluded_dir}")
|
||||
]
|
||||
assert find_sources([f"/dir/venv/{excluded_dir}/b.py"], options, fscache) == [
|
||||
("b", f"/dir/venv/{excluded_dir}")
|
||||
]
|
||||
|
||||
files = {
|
||||
"/pkg/a1/b/c/d/e.py",
|
||||
"/pkg/a1/b/f.py",
|
||||
"/pkg/a2/__init__.py",
|
||||
"/pkg/a2/b/c/d/e.py",
|
||||
"/pkg/a2/b/f.py",
|
||||
}
|
||||
|
||||
# file name
|
||||
options.exclude = [r"/f\.py$"]
|
||||
fscache = FakeFSCache(files)
|
||||
assert find_sources(["/"], options, fscache) == [
|
||||
("a2", "/pkg"),
|
||||
("a2.b.c.d.e", "/pkg"),
|
||||
("e", "/pkg/a1/b/c/d"),
|
||||
]
|
||||
assert find_sources(["/pkg/a1/b/f.py"], options, fscache) == [("f", "/pkg/a1/b")]
|
||||
assert find_sources(["/pkg/a2/b/f.py"], options, fscache) == [("a2.b.f", "/pkg")]
|
||||
|
||||
# directory name
|
||||
options.exclude = ["/a1/"]
|
||||
fscache = FakeFSCache(files)
|
||||
assert find_sources(["/"], options, fscache) == [
|
||||
("a2", "/pkg"),
|
||||
("a2.b.c.d.e", "/pkg"),
|
||||
("a2.b.f", "/pkg"),
|
||||
]
|
||||
with pytest.raises(InvalidSourceList):
|
||||
find_sources(["/pkg/a1"], options, fscache)
|
||||
with pytest.raises(InvalidSourceList):
|
||||
find_sources(["/pkg/a1/"], options, fscache)
|
||||
with pytest.raises(InvalidSourceList):
|
||||
find_sources(["/pkg/a1/b"], options, fscache)
|
||||
|
||||
options.exclude = ["/a1/$"]
|
||||
assert find_sources(["/pkg/a1"], options, fscache) == [
|
||||
("e", "/pkg/a1/b/c/d"),
|
||||
("f", "/pkg/a1/b"),
|
||||
]
|
||||
|
||||
# paths
|
||||
options.exclude = ["/pkg/a1/"]
|
||||
fscache = FakeFSCache(files)
|
||||
assert find_sources(["/"], options, fscache) == [
|
||||
("a2", "/pkg"),
|
||||
("a2.b.c.d.e", "/pkg"),
|
||||
("a2.b.f", "/pkg"),
|
||||
]
|
||||
with pytest.raises(InvalidSourceList):
|
||||
find_sources(["/pkg/a1"], options, fscache)
|
||||
|
||||
# OR two patterns together
|
||||
for orred in [["/(a1|a3)/"], ["a1", "a3"], ["a3", "a1"]]:
|
||||
options.exclude = orred
|
||||
fscache = FakeFSCache(files)
|
||||
assert find_sources(["/"], options, fscache) == [
|
||||
("a2", "/pkg"),
|
||||
("a2.b.c.d.e", "/pkg"),
|
||||
("a2.b.f", "/pkg"),
|
||||
]
|
||||
|
||||
options.exclude = ["b/c/"]
|
||||
fscache = FakeFSCache(files)
|
||||
assert find_sources(["/"], options, fscache) == [
|
||||
("a2", "/pkg"),
|
||||
("a2.b.f", "/pkg"),
|
||||
("f", "/pkg/a1/b"),
|
||||
]
|
||||
|
||||
# nothing should be ignored as a result of this
|
||||
big_exclude1 = [
|
||||
"/pkg/a/",
|
||||
"/2",
|
||||
"/1",
|
||||
"/pk/",
|
||||
"/kg",
|
||||
"/g.py",
|
||||
"/bc",
|
||||
"/xxx/pkg/a2/b/f.py",
|
||||
"xxx/pkg/a2/b/f.py",
|
||||
]
|
||||
big_exclude2 = ["|".join(big_exclude1)]
|
||||
for big_exclude in [big_exclude1, big_exclude2]:
|
||||
options.exclude = big_exclude
|
||||
fscache = FakeFSCache(files)
|
||||
assert len(find_sources(["/"], options, fscache)) == len(files)
|
||||
|
||||
files = {
|
||||
"pkg/a1/b/c/d/e.py",
|
||||
"pkg/a1/b/f.py",
|
||||
"pkg/a2/__init__.py",
|
||||
"pkg/a2/b/c/d/e.py",
|
||||
"pkg/a2/b/f.py",
|
||||
}
|
||||
fscache = FakeFSCache(files)
|
||||
assert len(find_sources(["."], options, fscache)) == len(files)
|
||||
45
venv/lib/python3.12/site-packages/mypy/test/test_ref_info.py
Normal file
45
venv/lib/python3.12/site-packages/mypy/test/test_ref_info.py
Normal file
@@ -0,0 +1,45 @@
|
||||
"""Test exporting line-level reference information (undocumented feature)"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from mypy import build
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.options import Options
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal
|
||||
|
||||
|
||||
class RefInfoSuite(DataSuite):
|
||||
required_out_section = True
|
||||
files = ["ref-info.test"]
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
options = Options()
|
||||
options.use_builtins_fixtures = True
|
||||
options.show_traceback = True
|
||||
options.export_ref_info = True # This is the flag we are testing
|
||||
|
||||
src = "\n".join(testcase.input)
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, src)], options=options, alt_lib_path=test_temp_dir
|
||||
)
|
||||
assert not result.errors
|
||||
|
||||
major, minor = sys.version_info[:2]
|
||||
ref_path = os.path.join(options.cache_dir, f"{major}.{minor}", "__main__.refs.json")
|
||||
|
||||
with open(ref_path) as refs_file:
|
||||
data = json.load(refs_file)
|
||||
|
||||
a = []
|
||||
for item in data:
|
||||
a.append(f"{item['line']}:{item['column']}:{item['target']}")
|
||||
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
45
venv/lib/python3.12/site-packages/mypy/test/testapi.py
Normal file
45
venv/lib/python3.12/site-packages/mypy/test/testapi.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from io import StringIO
|
||||
|
||||
import mypy.api
|
||||
from mypy.test.helpers import Suite
|
||||
|
||||
|
||||
class APISuite(Suite):
|
||||
def setUp(self) -> None:
|
||||
self.sys_stdout = sys.stdout
|
||||
self.sys_stderr = sys.stderr
|
||||
sys.stdout = self.stdout = StringIO()
|
||||
sys.stderr = self.stderr = StringIO()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
sys.stdout = self.sys_stdout
|
||||
sys.stderr = self.sys_stderr
|
||||
assert self.stdout.getvalue() == ""
|
||||
assert self.stderr.getvalue() == ""
|
||||
|
||||
def test_capture_bad_opt(self) -> None:
|
||||
"""stderr should be captured when a bad option is passed."""
|
||||
_, stderr, _ = mypy.api.run(["--some-bad-option"])
|
||||
assert isinstance(stderr, str)
|
||||
assert stderr != ""
|
||||
|
||||
def test_capture_empty(self) -> None:
|
||||
"""stderr should be captured when a bad option is passed."""
|
||||
_, stderr, _ = mypy.api.run([])
|
||||
assert isinstance(stderr, str)
|
||||
assert stderr != ""
|
||||
|
||||
def test_capture_help(self) -> None:
|
||||
"""stdout should be captured when --help is passed."""
|
||||
stdout, _, _ = mypy.api.run(["--help"])
|
||||
assert isinstance(stdout, str)
|
||||
assert stdout != ""
|
||||
|
||||
def test_capture_version(self) -> None:
|
||||
"""stdout should be captured when --version is passed."""
|
||||
stdout, _, _ = mypy.api.run(["--version"])
|
||||
assert isinstance(stdout, str)
|
||||
assert stdout != ""
|
||||
76
venv/lib/python3.12/site-packages/mypy/test/testargs.py
Normal file
76
venv/lib/python3.12/site-packages/mypy/test/testargs.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""Ensure the argparse parser and Options class are in sync.
|
||||
|
||||
In particular, verify that the argparse defaults are the same as the Options
|
||||
defaults, and that argparse doesn't assign any new members to the Options
|
||||
object it creates.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from mypy.main import infer_python_executable, process_options
|
||||
from mypy.options import Options
|
||||
from mypy.test.helpers import Suite, assert_equal
|
||||
|
||||
|
||||
class ArgSuite(Suite):
|
||||
def test_coherence(self) -> None:
|
||||
options = Options()
|
||||
_, parsed_options = process_options([], require_targets=False)
|
||||
# FIX: test this too. Requires changing working dir to avoid finding 'setup.cfg'
|
||||
options.config_file = parsed_options.config_file
|
||||
assert_equal(options.snapshot(), parsed_options.snapshot())
|
||||
|
||||
def test_executable_inference(self) -> None:
|
||||
"""Test the --python-executable flag with --python-version"""
|
||||
sys_ver_str = "{ver.major}.{ver.minor}".format(ver=sys.version_info)
|
||||
|
||||
base = ["file.py"] # dummy file
|
||||
|
||||
# test inference given one (infer the other)
|
||||
matching_version = base + [f"--python-version={sys_ver_str}"]
|
||||
_, options = process_options(matching_version)
|
||||
assert options.python_version == sys.version_info[:2]
|
||||
assert options.python_executable == sys.executable
|
||||
|
||||
matching_version = base + [f"--python-executable={sys.executable}"]
|
||||
_, options = process_options(matching_version)
|
||||
assert options.python_version == sys.version_info[:2]
|
||||
assert options.python_executable == sys.executable
|
||||
|
||||
# test inference given both
|
||||
matching_version = base + [
|
||||
f"--python-version={sys_ver_str}",
|
||||
f"--python-executable={sys.executable}",
|
||||
]
|
||||
_, options = process_options(matching_version)
|
||||
assert options.python_version == sys.version_info[:2]
|
||||
assert options.python_executable == sys.executable
|
||||
|
||||
# test that --no-site-packages will disable executable inference
|
||||
matching_version = base + [f"--python-version={sys_ver_str}", "--no-site-packages"]
|
||||
_, options = process_options(matching_version)
|
||||
assert options.python_version == sys.version_info[:2]
|
||||
assert options.python_executable is None
|
||||
|
||||
# Test setting python_version/executable from config file
|
||||
special_opts = argparse.Namespace()
|
||||
special_opts.python_executable = None
|
||||
special_opts.python_version = None
|
||||
special_opts.no_executable = None
|
||||
|
||||
# first test inferring executable from version
|
||||
options = Options()
|
||||
options.python_executable = None
|
||||
options.python_version = sys.version_info[:2]
|
||||
infer_python_executable(options, special_opts)
|
||||
assert options.python_version == sys.version_info[:2]
|
||||
assert options.python_executable == sys.executable
|
||||
|
||||
# then test inferring version from executable
|
||||
options = Options()
|
||||
options.python_executable = sys.executable
|
||||
infer_python_executable(options, special_opts)
|
||||
assert options.python_version == sys.version_info[:2]
|
||||
assert options.python_executable == sys.executable
|
||||
322
venv/lib/python3.12/site-packages/mypy/test/testcheck.py
Normal file
322
venv/lib/python3.12/site-packages/mypy/test/testcheck.py
Normal file
@@ -0,0 +1,322 @@
|
||||
"""Type checker test cases"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
from mypy import build
|
||||
from mypy.build import Graph
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource, FindModuleCache, SearchPaths
|
||||
from mypy.options import TYPE_VAR_TUPLE, UNPACK
|
||||
from mypy.test.config import test_data_prefix, test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite, FileOperation, module_from_path
|
||||
from mypy.test.helpers import (
|
||||
assert_module_equivalence,
|
||||
assert_string_arrays_equal,
|
||||
assert_target_equivalence,
|
||||
check_test_output_files,
|
||||
find_test_files,
|
||||
normalize_error_messages,
|
||||
parse_options,
|
||||
perform_file_operations,
|
||||
)
|
||||
from mypy.test.update_data import update_testcase_output
|
||||
|
||||
try:
|
||||
import lxml # type: ignore[import-untyped]
|
||||
except ImportError:
|
||||
lxml = None
|
||||
|
||||
import pytest
|
||||
|
||||
# List of files that contain test case descriptions.
|
||||
# Includes all check-* files with the .test extension in the test-data/unit directory
|
||||
typecheck_files = find_test_files(pattern="check-*.test")
|
||||
|
||||
# Tests that use Python version specific features:
|
||||
if sys.version_info < (3, 9):
|
||||
typecheck_files.remove("check-python39.test")
|
||||
if sys.version_info < (3, 10):
|
||||
typecheck_files.remove("check-python310.test")
|
||||
if sys.version_info < (3, 11):
|
||||
typecheck_files.remove("check-python311.test")
|
||||
if sys.version_info < (3, 12):
|
||||
typecheck_files.remove("check-python312.test")
|
||||
|
||||
# Special tests for platforms with case-insensitive filesystems.
|
||||
if sys.platform not in ("darwin", "win32"):
|
||||
typecheck_files.remove("check-modules-case.test")
|
||||
|
||||
|
||||
class TypeCheckSuite(DataSuite):
|
||||
files = typecheck_files
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
if lxml is None and os.path.basename(testcase.file) == "check-reports.test":
|
||||
pytest.skip("Cannot import lxml. Is it installed?")
|
||||
incremental = (
|
||||
"incremental" in testcase.name.lower()
|
||||
or "incremental" in testcase.file
|
||||
or "serialize" in testcase.file
|
||||
)
|
||||
if incremental:
|
||||
# Incremental tests are run once with a cold cache, once with a warm cache.
|
||||
# Expect success on first run, errors from testcase.output (if any) on second run.
|
||||
num_steps = max([2] + list(testcase.output2.keys()))
|
||||
# Check that there are no file changes beyond the last run (they would be ignored).
|
||||
for dn, dirs, files in os.walk(os.curdir):
|
||||
for file in files:
|
||||
m = re.search(r"\.([2-9])$", file)
|
||||
if m and int(m.group(1)) > num_steps:
|
||||
raise ValueError(
|
||||
"Output file {} exists though test case only has {} runs".format(
|
||||
file, num_steps
|
||||
)
|
||||
)
|
||||
steps = testcase.find_steps()
|
||||
for step in range(1, num_steps + 1):
|
||||
idx = step - 2
|
||||
ops = steps[idx] if idx < len(steps) and idx >= 0 else []
|
||||
self.run_case_once(testcase, ops, step)
|
||||
else:
|
||||
self.run_case_once(testcase)
|
||||
|
||||
def _sort_output_if_needed(self, testcase: DataDrivenTestCase, a: list[str]) -> None:
|
||||
idx = testcase.output_inline_start
|
||||
if not testcase.files or idx == len(testcase.output):
|
||||
return
|
||||
|
||||
def _filename(_msg: str) -> str:
|
||||
return _msg.partition(":")[0]
|
||||
|
||||
file_weights = {file: idx for idx, file in enumerate(_filename(msg) for msg in a)}
|
||||
testcase.output[idx:] = sorted(
|
||||
testcase.output[idx:], key=lambda msg: file_weights.get(_filename(msg), -1)
|
||||
)
|
||||
|
||||
def run_case_once(
|
||||
self,
|
||||
testcase: DataDrivenTestCase,
|
||||
operations: list[FileOperation] = [],
|
||||
incremental_step: int = 0,
|
||||
) -> None:
|
||||
original_program_text = "\n".join(testcase.input)
|
||||
module_data = self.parse_module(original_program_text, incremental_step)
|
||||
|
||||
# Unload already loaded plugins, they may be updated.
|
||||
for file, _ in testcase.files:
|
||||
module = module_from_path(file)
|
||||
if module.endswith("_plugin") and module in sys.modules:
|
||||
del sys.modules[module]
|
||||
if incremental_step == 0 or incremental_step == 1:
|
||||
# In run 1, copy program text to program file.
|
||||
for module_name, program_path, program_text in module_data:
|
||||
if module_name == "__main__":
|
||||
with open(program_path, "w", encoding="utf8") as f:
|
||||
f.write(program_text)
|
||||
break
|
||||
elif incremental_step > 1:
|
||||
# In runs 2+, copy *.[num] files to * files.
|
||||
perform_file_operations(operations)
|
||||
|
||||
# Parse options after moving files (in case mypy.ini is being moved).
|
||||
options = parse_options(original_program_text, testcase, incremental_step)
|
||||
options.use_builtins_fixtures = True
|
||||
if not testcase.name.endswith("_no_incomplete"):
|
||||
options.enable_incomplete_feature = [TYPE_VAR_TUPLE, UNPACK]
|
||||
options.show_traceback = True
|
||||
|
||||
# Enable some options automatically based on test file name.
|
||||
if "columns" in testcase.file:
|
||||
options.show_column_numbers = True
|
||||
if "errorcodes" in testcase.file:
|
||||
options.hide_error_codes = False
|
||||
if "abstract" not in testcase.file:
|
||||
options.allow_empty_bodies = not testcase.name.endswith("_no_empty")
|
||||
if "lowercase" not in testcase.file:
|
||||
options.force_uppercase_builtins = True
|
||||
if "union-error" not in testcase.file:
|
||||
options.force_union_syntax = True
|
||||
|
||||
if incremental_step and options.incremental:
|
||||
# Don't overwrite # flags: --no-incremental in incremental test cases
|
||||
options.incremental = True
|
||||
else:
|
||||
options.incremental = False
|
||||
# Don't waste time writing cache unless we are specifically looking for it
|
||||
if not testcase.writescache:
|
||||
options.cache_dir = os.devnull
|
||||
|
||||
sources = []
|
||||
for module_name, program_path, program_text in module_data:
|
||||
# Always set to none so we're forced to reread the module in incremental mode
|
||||
sources.append(
|
||||
BuildSource(program_path, module_name, None if incremental_step else program_text)
|
||||
)
|
||||
|
||||
plugin_dir = os.path.join(test_data_prefix, "plugins")
|
||||
sys.path.insert(0, plugin_dir)
|
||||
|
||||
res = None
|
||||
try:
|
||||
res = build.build(sources=sources, options=options, alt_lib_path=test_temp_dir)
|
||||
a = res.errors
|
||||
except CompileError as e:
|
||||
a = e.messages
|
||||
finally:
|
||||
assert sys.path[0] == plugin_dir
|
||||
del sys.path[0]
|
||||
|
||||
if testcase.normalize_output:
|
||||
a = normalize_error_messages(a)
|
||||
|
||||
# Make sure error messages match
|
||||
if incremental_step < 2:
|
||||
if incremental_step == 1:
|
||||
msg = "Unexpected type checker output in incremental, run 1 ({}, line {})"
|
||||
else:
|
||||
assert incremental_step == 0
|
||||
msg = "Unexpected type checker output ({}, line {})"
|
||||
self._sort_output_if_needed(testcase, a)
|
||||
output = testcase.output
|
||||
else:
|
||||
msg = (
|
||||
f"Unexpected type checker output in incremental, run {incremental_step}"
|
||||
+ " ({}, line {})"
|
||||
)
|
||||
output = testcase.output2.get(incremental_step, [])
|
||||
|
||||
if output != a and testcase.config.getoption("--update-data", False):
|
||||
update_testcase_output(testcase, a, incremental_step=incremental_step)
|
||||
|
||||
assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line))
|
||||
|
||||
if res:
|
||||
if options.cache_dir != os.devnull:
|
||||
self.verify_cache(module_data, res.errors, res.manager, res.graph)
|
||||
|
||||
name = "targets"
|
||||
if incremental_step:
|
||||
name += str(incremental_step + 1)
|
||||
expected = testcase.expected_fine_grained_targets.get(incremental_step + 1)
|
||||
actual = [
|
||||
target
|
||||
for module, target in res.manager.processed_targets
|
||||
if module in testcase.test_modules
|
||||
]
|
||||
if expected is not None:
|
||||
assert_target_equivalence(name, expected, actual)
|
||||
if incremental_step > 1:
|
||||
suffix = "" if incremental_step == 2 else str(incremental_step - 1)
|
||||
expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
|
||||
if expected_rechecked is not None:
|
||||
assert_module_equivalence(
|
||||
"rechecked" + suffix, expected_rechecked, res.manager.rechecked_modules
|
||||
)
|
||||
expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
|
||||
if expected_stale is not None:
|
||||
assert_module_equivalence(
|
||||
"stale" + suffix, expected_stale, res.manager.stale_modules
|
||||
)
|
||||
|
||||
if testcase.output_files:
|
||||
check_test_output_files(testcase, incremental_step, strip_prefix="tmp/")
|
||||
|
||||
def verify_cache(
|
||||
self,
|
||||
module_data: list[tuple[str, str, str]],
|
||||
a: list[str],
|
||||
manager: build.BuildManager,
|
||||
graph: Graph,
|
||||
) -> None:
|
||||
# There should be valid cache metadata for each module except
|
||||
# for those that had an error in themselves or one of their
|
||||
# dependencies.
|
||||
error_paths = self.find_error_message_paths(a)
|
||||
busted_paths = {m.path for id, m in manager.modules.items() if graph[id].transitive_error}
|
||||
modules = self.find_module_files(manager)
|
||||
modules.update({module_name: path for module_name, path, text in module_data})
|
||||
missing_paths = self.find_missing_cache_files(modules, manager)
|
||||
# We would like to assert error_paths.issubset(busted_paths)
|
||||
# but this runs into trouble because while some 'notes' are
|
||||
# really errors that cause an error to be marked, many are
|
||||
# just notes attached to other errors.
|
||||
assert error_paths or not busted_paths, "Some modules reported error despite no errors"
|
||||
if not missing_paths == busted_paths:
|
||||
raise AssertionError(f"cache data discrepancy {missing_paths} != {busted_paths}")
|
||||
assert os.path.isfile(os.path.join(manager.options.cache_dir, ".gitignore"))
|
||||
cachedir_tag = os.path.join(manager.options.cache_dir, "CACHEDIR.TAG")
|
||||
assert os.path.isfile(cachedir_tag)
|
||||
with open(cachedir_tag) as f:
|
||||
assert f.read().startswith("Signature: 8a477f597d28d172789f06886806bc55")
|
||||
|
||||
def find_error_message_paths(self, a: list[str]) -> set[str]:
|
||||
hits = set()
|
||||
for line in a:
|
||||
m = re.match(r"([^\s:]+):(\d+:)?(\d+:)? (error|warning|note):", line)
|
||||
if m:
|
||||
p = m.group(1)
|
||||
hits.add(p)
|
||||
return hits
|
||||
|
||||
def find_module_files(self, manager: build.BuildManager) -> dict[str, str]:
|
||||
return {id: module.path for id, module in manager.modules.items()}
|
||||
|
||||
def find_missing_cache_files(
|
||||
self, modules: dict[str, str], manager: build.BuildManager
|
||||
) -> set[str]:
|
||||
ignore_errors = True
|
||||
missing = {}
|
||||
for id, path in modules.items():
|
||||
meta = build.find_cache_meta(id, path, manager)
|
||||
if not build.validate_meta(meta, id, path, ignore_errors, manager):
|
||||
missing[id] = path
|
||||
return set(missing.values())
|
||||
|
||||
def parse_module(
|
||||
self, program_text: str, incremental_step: int = 0
|
||||
) -> list[tuple[str, str, str]]:
|
||||
"""Return the module and program names for a test case.
|
||||
|
||||
Normally, the unit tests will parse the default ('__main__')
|
||||
module and follow all the imports listed there. You can override
|
||||
this behavior and instruct the tests to check multiple modules
|
||||
by using a comment like this in the test case input:
|
||||
|
||||
# cmd: mypy -m foo.bar foo.baz
|
||||
|
||||
You can also use `# cmdN:` to have a different cmd for incremental
|
||||
step N (2, 3, ...).
|
||||
|
||||
Return a list of tuples (module name, file name, program text).
|
||||
"""
|
||||
m = re.search("# cmd: mypy -m ([a-zA-Z0-9_. ]+)$", program_text, flags=re.MULTILINE)
|
||||
if incremental_step > 1:
|
||||
alt_regex = f"# cmd{incremental_step}: mypy -m ([a-zA-Z0-9_. ]+)$"
|
||||
alt_m = re.search(alt_regex, program_text, flags=re.MULTILINE)
|
||||
if alt_m is not None:
|
||||
# Optionally return a different command if in a later step
|
||||
# of incremental mode, otherwise default to reusing the
|
||||
# original cmd.
|
||||
m = alt_m
|
||||
|
||||
if m:
|
||||
# The test case wants to use a non-default main
|
||||
# module. Look up the module and give it as the thing to
|
||||
# analyze.
|
||||
module_names = m.group(1)
|
||||
out = []
|
||||
search_paths = SearchPaths((test_temp_dir,), (), (), ())
|
||||
cache = FindModuleCache(search_paths, fscache=None, options=None)
|
||||
for module_name in module_names.split(" "):
|
||||
path = cache.find_module(module_name)
|
||||
assert isinstance(path, str), f"Can't find ad hoc case file: {module_name}"
|
||||
with open(path, encoding="utf8") as f:
|
||||
program_text = f.read()
|
||||
out.append((module_name, path, program_text))
|
||||
return out
|
||||
else:
|
||||
return [("__main__", "main", program_text)]
|
||||
152
venv/lib/python3.12/site-packages/mypy/test/testcmdline.py
Normal file
152
venv/lib/python3.12/site-packages/mypy/test/testcmdline.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""Test cases for the command line.
|
||||
|
||||
To begin we test that "mypy <directory>[/]" always recurses down the
|
||||
whole tree.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from mypy.test.config import PREFIX, test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import (
|
||||
assert_string_arrays_equal,
|
||||
check_test_output_files,
|
||||
normalize_error_messages,
|
||||
)
|
||||
|
||||
try:
|
||||
import lxml # type: ignore[import-untyped]
|
||||
except ImportError:
|
||||
lxml = None
|
||||
|
||||
import pytest
|
||||
|
||||
# Path to Python 3 interpreter
|
||||
python3_path = sys.executable
|
||||
|
||||
# Files containing test case descriptions.
|
||||
cmdline_files = ["cmdline.test", "cmdline.pyproject.test", "reports.test", "envvars.test"]
|
||||
|
||||
|
||||
class PythonCmdlineSuite(DataSuite):
|
||||
files = cmdline_files
|
||||
native_sep = True
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
if lxml is None and os.path.basename(testcase.file) == "reports.test":
|
||||
pytest.skip("Cannot import lxml. Is it installed?")
|
||||
for step in [1] + sorted(testcase.output2):
|
||||
test_python_cmdline(testcase, step)
|
||||
|
||||
|
||||
def test_python_cmdline(testcase: DataDrivenTestCase, step: int) -> None:
|
||||
assert testcase.old_cwd is not None, "test was not properly set up"
|
||||
# Write the program to a file.
|
||||
program = "_program.py"
|
||||
program_path = os.path.join(test_temp_dir, program)
|
||||
with open(program_path, "w", encoding="utf8") as file:
|
||||
for s in testcase.input:
|
||||
file.write(f"{s}\n")
|
||||
args = parse_args(testcase.input[0])
|
||||
custom_cwd = parse_cwd(testcase.input[1]) if len(testcase.input) > 1 else None
|
||||
args.append("--show-traceback")
|
||||
if "--error-summary" not in args:
|
||||
args.append("--no-error-summary")
|
||||
if "--show-error-codes" not in args:
|
||||
args.append("--hide-error-codes")
|
||||
if "--disallow-empty-bodies" not in args:
|
||||
args.append("--allow-empty-bodies")
|
||||
if "--no-force-uppercase-builtins" not in args:
|
||||
args.append("--force-uppercase-builtins")
|
||||
if "--no-force-union-syntax" not in args:
|
||||
args.append("--force-union-syntax")
|
||||
# Type check the program.
|
||||
fixed = [python3_path, "-m", "mypy"]
|
||||
env = os.environ.copy()
|
||||
env.pop("COLUMNS", None)
|
||||
extra_path = os.path.join(os.path.abspath(test_temp_dir), "pypath")
|
||||
env["PYTHONPATH"] = PREFIX
|
||||
if os.path.isdir(extra_path):
|
||||
env["PYTHONPATH"] += os.pathsep + extra_path
|
||||
cwd = os.path.join(test_temp_dir, custom_cwd or "")
|
||||
args = [arg.replace("$CWD", os.path.abspath(cwd)) for arg in args]
|
||||
process = subprocess.Popen(
|
||||
fixed + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env
|
||||
)
|
||||
outb, errb = process.communicate()
|
||||
result = process.returncode
|
||||
# Split output into lines.
|
||||
out = [s.rstrip("\n\r") for s in str(outb, "utf8").splitlines()]
|
||||
err = [s.rstrip("\n\r") for s in str(errb, "utf8").splitlines()]
|
||||
|
||||
if "PYCHARM_HOSTED" in os.environ:
|
||||
for pos, line in enumerate(err):
|
||||
if line.startswith("pydev debugger: "):
|
||||
# Delete the attaching debugger message itself, plus the extra newline added.
|
||||
del err[pos : pos + 2]
|
||||
break
|
||||
|
||||
# Remove temp file.
|
||||
os.remove(program_path)
|
||||
# Compare actual output to expected.
|
||||
if testcase.output_files:
|
||||
# Ignore stdout, but we insist on empty stderr and zero status.
|
||||
if err or result:
|
||||
raise AssertionError(
|
||||
"Expected zero status and empty stderr%s, got %d and\n%s"
|
||||
% (" on step %d" % step if testcase.output2 else "", result, "\n".join(err + out))
|
||||
)
|
||||
check_test_output_files(testcase, step)
|
||||
else:
|
||||
if testcase.normalize_output:
|
||||
out = normalize_error_messages(err + out)
|
||||
obvious_result = 1 if out else 0
|
||||
if obvious_result != result:
|
||||
out.append(f"== Return code: {result}")
|
||||
expected_out = testcase.output if step == 1 else testcase.output2[step]
|
||||
# Strip "tmp/" out of the test so that # E: works...
|
||||
expected_out = [s.replace("tmp" + os.sep, "") for s in expected_out]
|
||||
assert_string_arrays_equal(
|
||||
expected_out,
|
||||
out,
|
||||
"Invalid output ({}, line {}){}".format(
|
||||
testcase.file, testcase.line, " on step %d" % step if testcase.output2 else ""
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def parse_args(line: str) -> list[str]:
|
||||
"""Parse the first line of the program for the command line.
|
||||
|
||||
This should have the form
|
||||
|
||||
# cmd: mypy <options>
|
||||
|
||||
For example:
|
||||
|
||||
# cmd: mypy pkg/
|
||||
"""
|
||||
m = re.match("# cmd: mypy (.*)$", line)
|
||||
if not m:
|
||||
return [] # No args; mypy will spit out an error.
|
||||
return m.group(1).split()
|
||||
|
||||
|
||||
def parse_cwd(line: str) -> str | None:
|
||||
"""Parse the second line of the program for the command line.
|
||||
|
||||
This should have the form
|
||||
|
||||
# cwd: <directory>
|
||||
|
||||
For example:
|
||||
|
||||
# cwd: main/subdir
|
||||
"""
|
||||
m = re.match("# cwd: (.*)$", line)
|
||||
return m.group(1) if m else None
|
||||
158
venv/lib/python3.12/site-packages/mypy/test/testconstraints.py
Normal file
158
venv/lib/python3.12/site-packages/mypy/test/testconstraints.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy.constraints import SUBTYPE_OF, SUPERTYPE_OF, Constraint, infer_constraints
|
||||
from mypy.test.helpers import Suite
|
||||
from mypy.test.typefixture import TypeFixture
|
||||
from mypy.types import Instance, TupleType, UnpackType
|
||||
|
||||
|
||||
class ConstraintsSuite(Suite):
|
||||
def setUp(self) -> None:
|
||||
self.fx = TypeFixture()
|
||||
|
||||
def test_no_type_variables(self) -> None:
|
||||
assert not infer_constraints(self.fx.o, self.fx.o, SUBTYPE_OF)
|
||||
|
||||
def test_basic_type_variable(self) -> None:
|
||||
fx = self.fx
|
||||
for direction in [SUBTYPE_OF, SUPERTYPE_OF]:
|
||||
assert infer_constraints(fx.gt, fx.ga, direction) == [
|
||||
Constraint(type_var=fx.t, op=direction, target=fx.a)
|
||||
]
|
||||
|
||||
def test_basic_type_var_tuple_subtype(self) -> None:
|
||||
fx = self.fx
|
||||
assert infer_constraints(
|
||||
Instance(fx.gvi, [UnpackType(fx.ts)]), Instance(fx.gvi, [fx.a, fx.b]), SUBTYPE_OF
|
||||
) == [
|
||||
Constraint(type_var=fx.ts, op=SUBTYPE_OF, target=TupleType([fx.a, fx.b], fx.std_tuple))
|
||||
]
|
||||
|
||||
def test_basic_type_var_tuple(self) -> None:
|
||||
fx = self.fx
|
||||
assert infer_constraints(
|
||||
Instance(fx.gvi, [UnpackType(fx.ts)]), Instance(fx.gvi, [fx.a, fx.b]), SUPERTYPE_OF
|
||||
) == [
|
||||
Constraint(
|
||||
type_var=fx.ts, op=SUPERTYPE_OF, target=TupleType([fx.a, fx.b], fx.std_tuple)
|
||||
)
|
||||
]
|
||||
|
||||
def test_type_var_tuple_with_prefix_and_suffix(self) -> None:
|
||||
fx = self.fx
|
||||
assert set(
|
||||
infer_constraints(
|
||||
Instance(fx.gv2i, [fx.t, UnpackType(fx.ts), fx.s]),
|
||||
Instance(fx.gv2i, [fx.a, fx.b, fx.c, fx.d]),
|
||||
SUPERTYPE_OF,
|
||||
)
|
||||
) == {
|
||||
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.a),
|
||||
Constraint(
|
||||
type_var=fx.ts, op=SUPERTYPE_OF, target=TupleType([fx.b, fx.c], fx.std_tuple)
|
||||
),
|
||||
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.d),
|
||||
}
|
||||
|
||||
def test_unpack_homogenous_tuple(self) -> None:
|
||||
fx = self.fx
|
||||
assert set(
|
||||
infer_constraints(
|
||||
Instance(fx.gvi, [UnpackType(Instance(fx.std_tuplei, [fx.t]))]),
|
||||
Instance(fx.gvi, [fx.a, fx.b]),
|
||||
SUPERTYPE_OF,
|
||||
)
|
||||
) == {
|
||||
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.a),
|
||||
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.b),
|
||||
}
|
||||
|
||||
def test_unpack_homogenous_tuple_with_prefix_and_suffix(self) -> None:
|
||||
fx = self.fx
|
||||
assert set(
|
||||
infer_constraints(
|
||||
Instance(fx.gv2i, [fx.t, UnpackType(Instance(fx.std_tuplei, [fx.s])), fx.u]),
|
||||
Instance(fx.gv2i, [fx.a, fx.b, fx.c, fx.d]),
|
||||
SUPERTYPE_OF,
|
||||
)
|
||||
) == {
|
||||
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.a),
|
||||
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.b),
|
||||
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.c),
|
||||
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.d),
|
||||
}
|
||||
|
||||
def test_unpack_tuple(self) -> None:
|
||||
fx = self.fx
|
||||
assert set(
|
||||
infer_constraints(
|
||||
Instance(
|
||||
fx.gvi,
|
||||
[
|
||||
UnpackType(
|
||||
TupleType([fx.t, fx.s], fallback=Instance(fx.std_tuplei, [fx.o]))
|
||||
)
|
||||
],
|
||||
),
|
||||
Instance(fx.gvi, [fx.a, fx.b]),
|
||||
SUPERTYPE_OF,
|
||||
)
|
||||
) == {
|
||||
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.a),
|
||||
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.b),
|
||||
}
|
||||
|
||||
def test_unpack_with_prefix_and_suffix(self) -> None:
|
||||
fx = self.fx
|
||||
assert set(
|
||||
infer_constraints(
|
||||
Instance(
|
||||
fx.gv2i,
|
||||
[
|
||||
fx.u,
|
||||
UnpackType(
|
||||
TupleType([fx.t, fx.s], fallback=Instance(fx.std_tuplei, [fx.o]))
|
||||
),
|
||||
fx.u,
|
||||
],
|
||||
),
|
||||
Instance(fx.gv2i, [fx.a, fx.b, fx.c, fx.d]),
|
||||
SUPERTYPE_OF,
|
||||
)
|
||||
) == {
|
||||
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.a),
|
||||
Constraint(type_var=fx.t, op=SUPERTYPE_OF, target=fx.b),
|
||||
Constraint(type_var=fx.s, op=SUPERTYPE_OF, target=fx.c),
|
||||
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.d),
|
||||
}
|
||||
|
||||
def test_unpack_tuple_length_non_match(self) -> None:
|
||||
fx = self.fx
|
||||
assert set(
|
||||
infer_constraints(
|
||||
Instance(
|
||||
fx.gv2i,
|
||||
[
|
||||
fx.u,
|
||||
UnpackType(
|
||||
TupleType([fx.t, fx.s], fallback=Instance(fx.std_tuplei, [fx.o]))
|
||||
),
|
||||
fx.u,
|
||||
],
|
||||
),
|
||||
Instance(fx.gv2i, [fx.a, fx.b, fx.d]),
|
||||
SUPERTYPE_OF,
|
||||
)
|
||||
# We still get constraints on the prefix/suffix in this case.
|
||||
) == {
|
||||
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.a),
|
||||
Constraint(type_var=fx.u, op=SUPERTYPE_OF, target=fx.d),
|
||||
}
|
||||
|
||||
def test_var_length_tuple_with_fixed_length_tuple(self) -> None:
|
||||
fx = self.fx
|
||||
assert not infer_constraints(
|
||||
TupleType([fx.t, fx.s], fallback=Instance(fx.std_tuplei, [fx.o])),
|
||||
Instance(fx.std_tuplei, [fx.a]),
|
||||
SUPERTYPE_OF,
|
||||
)
|
||||
132
venv/lib/python3.12/site-packages/mypy/test/testdaemon.py
Normal file
132
venv/lib/python3.12/site-packages/mypy/test/testdaemon.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""End-to-end test cases for the daemon (dmypy).
|
||||
|
||||
These are special because they run multiple shell commands.
|
||||
|
||||
This also includes some unit tests.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from mypy.dmypy_server import filter_out_missing_top_level_packages
|
||||
from mypy.fscache import FileSystemCache
|
||||
from mypy.modulefinder import SearchPaths
|
||||
from mypy.test.config import PREFIX, test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, normalize_error_messages
|
||||
|
||||
# Files containing test cases descriptions.
|
||||
daemon_files = ["daemon.test"]
|
||||
|
||||
|
||||
class DaemonSuite(DataSuite):
|
||||
files = daemon_files
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
try:
|
||||
test_daemon(testcase)
|
||||
finally:
|
||||
# Kill the daemon if it's still running.
|
||||
run_cmd("dmypy kill")
|
||||
|
||||
|
||||
def test_daemon(testcase: DataDrivenTestCase) -> None:
|
||||
assert testcase.old_cwd is not None, "test was not properly set up"
|
||||
for i, step in enumerate(parse_script(testcase.input)):
|
||||
cmd = step[0]
|
||||
expected_lines = step[1:]
|
||||
assert cmd.startswith("$")
|
||||
cmd = cmd[1:].strip()
|
||||
cmd = cmd.replace("{python}", sys.executable)
|
||||
sts, output = run_cmd(cmd)
|
||||
output_lines = output.splitlines()
|
||||
output_lines = normalize_error_messages(output_lines)
|
||||
if sts:
|
||||
output_lines.append("== Return code: %d" % sts)
|
||||
assert_string_arrays_equal(
|
||||
expected_lines,
|
||||
output_lines,
|
||||
"Command %d (%s) did not give expected output" % (i + 1, cmd),
|
||||
)
|
||||
|
||||
|
||||
def parse_script(input: list[str]) -> list[list[str]]:
|
||||
"""Parse testcase.input into steps.
|
||||
|
||||
Each command starts with a line starting with '$'.
|
||||
The first line (less '$') is sent to the shell.
|
||||
The remaining lines are expected output.
|
||||
"""
|
||||
steps = []
|
||||
step: list[str] = []
|
||||
for line in input:
|
||||
if line.startswith("$"):
|
||||
if step:
|
||||
assert step[0].startswith("$")
|
||||
steps.append(step)
|
||||
step = []
|
||||
step.append(line)
|
||||
if step:
|
||||
steps.append(step)
|
||||
return steps
|
||||
|
||||
|
||||
def run_cmd(input: str) -> tuple[int, str]:
|
||||
if input[1:].startswith("mypy run --") and "--show-error-codes" not in input:
|
||||
input += " --hide-error-codes"
|
||||
if input.startswith("dmypy "):
|
||||
input = sys.executable + " -m mypy." + input
|
||||
if input.startswith("mypy "):
|
||||
input = sys.executable + " -m" + input
|
||||
env = os.environ.copy()
|
||||
env["PYTHONPATH"] = PREFIX
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
input, shell=True, stderr=subprocess.STDOUT, text=True, cwd=test_temp_dir, env=env
|
||||
)
|
||||
return 0, output
|
||||
except subprocess.CalledProcessError as err:
|
||||
return err.returncode, err.output
|
||||
|
||||
|
||||
class DaemonUtilitySuite(unittest.TestCase):
|
||||
"""Unit tests for helpers"""
|
||||
|
||||
def test_filter_out_missing_top_level_packages(self) -> None:
|
||||
with tempfile.TemporaryDirectory() as td:
|
||||
self.make_file(td, "base/a/")
|
||||
self.make_file(td, "base/b.py")
|
||||
self.make_file(td, "base/c.pyi")
|
||||
self.make_file(td, "base/missing.txt")
|
||||
self.make_file(td, "typeshed/d.pyi")
|
||||
self.make_file(td, "typeshed/@python2/e") # outdated
|
||||
self.make_file(td, "pkg1/f-stubs")
|
||||
self.make_file(td, "pkg2/g-python2-stubs") # outdated
|
||||
self.make_file(td, "mpath/sub/long_name/")
|
||||
|
||||
def makepath(p: str) -> str:
|
||||
return os.path.join(td, p)
|
||||
|
||||
search = SearchPaths(
|
||||
python_path=(makepath("base"),),
|
||||
mypy_path=(makepath("mpath/sub"),),
|
||||
package_path=(makepath("pkg1"), makepath("pkg2")),
|
||||
typeshed_path=(makepath("typeshed"),),
|
||||
)
|
||||
fscache = FileSystemCache()
|
||||
res = filter_out_missing_top_level_packages(
|
||||
{"a", "b", "c", "d", "e", "f", "g", "long_name", "ff", "missing"}, search, fscache
|
||||
)
|
||||
assert res == {"a", "b", "c", "d", "f", "long_name"}
|
||||
|
||||
def make_file(self, base: str, path: str) -> None:
|
||||
fullpath = os.path.join(base, path)
|
||||
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
|
||||
if not path.endswith("/"):
|
||||
with open(fullpath, "w") as f:
|
||||
f.write("# test file")
|
||||
77
venv/lib/python3.12/site-packages/mypy/test/testdeps.py
Normal file
77
venv/lib/python3.12/site-packages/mypy/test/testdeps.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""Test cases for generating node-level dependencies (for fine-grained incremental checking)"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from collections import defaultdict
|
||||
|
||||
from mypy import build
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.nodes import Expression, MypyFile
|
||||
from mypy.options import Options
|
||||
from mypy.server.deps import get_dependencies
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, find_test_files, parse_options
|
||||
from mypy.types import Type
|
||||
from mypy.typestate import type_state
|
||||
|
||||
# Only dependencies in these modules are dumped
|
||||
dumped_modules = ["__main__", "pkg", "pkg.mod"]
|
||||
|
||||
|
||||
class GetDependenciesSuite(DataSuite):
|
||||
files = find_test_files(pattern="deps*.test")
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
src = "\n".join(testcase.input)
|
||||
dump_all = "# __dump_all__" in src
|
||||
options = parse_options(src, testcase, incremental_step=1)
|
||||
options.use_builtins_fixtures = True
|
||||
options.show_traceback = True
|
||||
options.cache_dir = os.devnull
|
||||
options.export_types = True
|
||||
options.preserve_asts = True
|
||||
options.allow_empty_bodies = True
|
||||
messages, files, type_map = self.build(src, options)
|
||||
a = messages
|
||||
if files is None or type_map is None:
|
||||
if not a:
|
||||
a = ["Unknown compile error (likely syntax error in test case or fixture)"]
|
||||
else:
|
||||
deps: defaultdict[str, set[str]] = defaultdict(set)
|
||||
for module, file in files.items():
|
||||
if (module in dumped_modules or dump_all) and (module in testcase.test_modules):
|
||||
new_deps = get_dependencies(file, type_map, options.python_version, options)
|
||||
for source in new_deps:
|
||||
deps[source].update(new_deps[source])
|
||||
|
||||
type_state.add_all_protocol_deps(deps)
|
||||
|
||||
for source, targets in sorted(deps.items()):
|
||||
if source.startswith(("<enum", "<typing", "<mypy", "<_typeshed.")):
|
||||
# Remove noise.
|
||||
continue
|
||||
line = f"{source} -> {', '.join(sorted(targets))}"
|
||||
# Clean up output a bit
|
||||
line = line.replace("__main__", "m")
|
||||
a.append(line)
|
||||
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
|
||||
def build(
|
||||
self, source: str, options: Options
|
||||
) -> tuple[list[str], dict[str, MypyFile] | None, dict[Expression, Type] | None]:
|
||||
try:
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, source)],
|
||||
options=options,
|
||||
alt_lib_path=test_temp_dir,
|
||||
)
|
||||
except CompileError as e:
|
||||
# TODO: Should perhaps not return None here.
|
||||
return e.messages, None, None
|
||||
return result.errors, result.files, result.types
|
||||
67
venv/lib/python3.12/site-packages/mypy/test/testdiff.py
Normal file
67
venv/lib/python3.12/site-packages/mypy/test/testdiff.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""Test cases for AST diff (used for fine-grained incremental checking)"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from mypy import build
|
||||
from mypy.defaults import PYTHON3_VERSION
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.nodes import MypyFile
|
||||
from mypy.options import Options
|
||||
from mypy.server.astdiff import compare_symbol_table_snapshots, snapshot_symbol_table
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, parse_options
|
||||
|
||||
|
||||
class ASTDiffSuite(DataSuite):
|
||||
files = ["diff.test"]
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
first_src = "\n".join(testcase.input)
|
||||
files_dict = dict(testcase.files)
|
||||
second_src = files_dict["tmp/next.py"]
|
||||
options = parse_options(first_src, testcase, 1)
|
||||
|
||||
messages1, files1 = self.build(first_src, options)
|
||||
messages2, files2 = self.build(second_src, options)
|
||||
|
||||
a = []
|
||||
if messages1:
|
||||
a.extend(messages1)
|
||||
if messages2:
|
||||
a.append("== next ==")
|
||||
a.extend(messages2)
|
||||
|
||||
assert (
|
||||
files1 is not None and files2 is not None
|
||||
), "cases where CompileError occurred should not be run"
|
||||
prefix = "__main__"
|
||||
snapshot1 = snapshot_symbol_table(prefix, files1["__main__"].names)
|
||||
snapshot2 = snapshot_symbol_table(prefix, files2["__main__"].names)
|
||||
diff = compare_symbol_table_snapshots(prefix, snapshot1, snapshot2)
|
||||
for trigger in sorted(diff):
|
||||
a.append(trigger)
|
||||
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
|
||||
def build(self, source: str, options: Options) -> tuple[list[str], dict[str, MypyFile] | None]:
|
||||
options.use_builtins_fixtures = True
|
||||
options.show_traceback = True
|
||||
options.cache_dir = os.devnull
|
||||
options.python_version = PYTHON3_VERSION
|
||||
options.allow_empty_bodies = True
|
||||
try:
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, source)],
|
||||
options=options,
|
||||
alt_lib_path=test_temp_dir,
|
||||
)
|
||||
except CompileError as e:
|
||||
# TODO: Is it okay to return None?
|
||||
return e.messages, None
|
||||
return result.errors, result.files
|
||||
@@ -0,0 +1,45 @@
|
||||
"""Tests for mypy incremental error output."""
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy import build
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.options import Options
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal
|
||||
|
||||
|
||||
class ErrorStreamSuite(DataSuite):
|
||||
required_out_section = True
|
||||
base_path = "."
|
||||
files = ["errorstream.test"]
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
test_error_stream(testcase)
|
||||
|
||||
|
||||
def test_error_stream(testcase: DataDrivenTestCase) -> None:
|
||||
"""Perform a single error streaming test case.
|
||||
|
||||
The argument contains the description of the test case.
|
||||
"""
|
||||
options = Options()
|
||||
options.show_traceback = True
|
||||
options.hide_error_codes = True
|
||||
|
||||
logged_messages: list[str] = []
|
||||
|
||||
def flush_errors(msgs: list[str], serious: bool) -> None:
|
||||
if msgs:
|
||||
logged_messages.append("==== Errors flushed ====")
|
||||
logged_messages.extend(msgs)
|
||||
|
||||
sources = [BuildSource("main", "__main__", "\n".join(testcase.input))]
|
||||
try:
|
||||
build.build(sources=sources, options=options, flush_errors=flush_errors)
|
||||
except CompileError as e:
|
||||
assert e.messages == []
|
||||
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, logged_messages, f"Invalid output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
438
venv/lib/python3.12/site-packages/mypy/test/testfinegrained.py
Normal file
438
venv/lib/python3.12/site-packages/mypy/test/testfinegrained.py
Normal file
@@ -0,0 +1,438 @@
|
||||
"""Test cases for fine-grained incremental checking.
|
||||
|
||||
Each test cases runs a batch build followed by one or more fine-grained
|
||||
incremental steps. We verify that each step produces the expected output.
|
||||
|
||||
See the comment at the top of test-data/unit/fine-grained.test for more
|
||||
information.
|
||||
|
||||
N.B.: Unlike most of the other test suites, testfinegrained does not
|
||||
rely on an alt_lib_path for finding source files. This means that they
|
||||
can test interactions with the lib_path that is built implicitly based
|
||||
on specified sources.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import unittest
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from mypy import build
|
||||
from mypy.config_parser import parse_config_file
|
||||
from mypy.dmypy_server import Server
|
||||
from mypy.dmypy_util import DEFAULT_STATUS_FILE
|
||||
from mypy.errors import CompileError
|
||||
from mypy.find_sources import create_source_list
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.options import TYPE_VAR_TUPLE, UNPACK, Options
|
||||
from mypy.server.mergecheck import check_consistency
|
||||
from mypy.server.update import sort_messages_preserving_file_order
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite, DeleteFile, UpdateFile
|
||||
from mypy.test.helpers import (
|
||||
assert_module_equivalence,
|
||||
assert_string_arrays_equal,
|
||||
assert_target_equivalence,
|
||||
find_test_files,
|
||||
parse_options,
|
||||
perform_file_operations,
|
||||
)
|
||||
|
||||
# Set to True to perform (somewhat expensive) checks for duplicate AST nodes after merge
|
||||
CHECK_CONSISTENCY = False
|
||||
|
||||
|
||||
class FineGrainedSuite(DataSuite):
|
||||
files = find_test_files(
|
||||
pattern="fine-grained*.test", exclude=["fine-grained-cache-incremental.test"]
|
||||
)
|
||||
|
||||
# Whether to use the fine-grained cache in the testing. This is overridden
|
||||
# by a trivial subclass to produce a suite that uses the cache.
|
||||
use_cache = False
|
||||
|
||||
def should_skip(self, testcase: DataDrivenTestCase) -> bool:
|
||||
# Decide whether to skip the test. This could have been structured
|
||||
# as a filter() classmethod also, but we want the tests reported
|
||||
# as skipped, not just elided.
|
||||
if self.use_cache:
|
||||
if testcase.only_when == "-only_when_nocache":
|
||||
return True
|
||||
# TODO: In caching mode we currently don't well support
|
||||
# starting from cached states with errors in them.
|
||||
if testcase.output and testcase.output[0] != "==":
|
||||
return True
|
||||
else:
|
||||
if testcase.only_when == "-only_when_cache":
|
||||
return True
|
||||
return False
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
if self.should_skip(testcase):
|
||||
pytest.skip()
|
||||
return
|
||||
|
||||
main_src = "\n".join(testcase.input)
|
||||
main_path = os.path.join(test_temp_dir, "main")
|
||||
with open(main_path, "w", encoding="utf8") as f:
|
||||
f.write(main_src)
|
||||
|
||||
options = self.get_options(main_src, testcase, build_cache=False)
|
||||
build_options = self.get_options(main_src, testcase, build_cache=True)
|
||||
server = Server(options, DEFAULT_STATUS_FILE)
|
||||
|
||||
num_regular_incremental_steps = self.get_build_steps(main_src)
|
||||
step = 1
|
||||
sources = self.parse_sources(main_src, step, options)
|
||||
if step <= num_regular_incremental_steps:
|
||||
messages = self.build(build_options, sources)
|
||||
else:
|
||||
messages = self.run_check(server, sources)
|
||||
|
||||
a = []
|
||||
if messages:
|
||||
a.extend(normalize_messages(messages))
|
||||
|
||||
assert testcase.tmpdir
|
||||
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
|
||||
a.extend(self.maybe_inspect(step, server, main_src))
|
||||
|
||||
if server.fine_grained_manager:
|
||||
if CHECK_CONSISTENCY:
|
||||
check_consistency(server.fine_grained_manager)
|
||||
|
||||
steps = testcase.find_steps()
|
||||
all_triggered = []
|
||||
|
||||
for operations in steps:
|
||||
step += 1
|
||||
output, triggered = self.perform_step(
|
||||
operations,
|
||||
server,
|
||||
options,
|
||||
build_options,
|
||||
testcase,
|
||||
main_src,
|
||||
step,
|
||||
num_regular_incremental_steps,
|
||||
)
|
||||
a.append("==")
|
||||
a.extend(output)
|
||||
all_triggered.extend(triggered)
|
||||
|
||||
# Normalize paths in test output (for Windows).
|
||||
a = [line.replace("\\", "/") for line in a]
|
||||
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
|
||||
if testcase.triggered:
|
||||
assert_string_arrays_equal(
|
||||
testcase.triggered,
|
||||
self.format_triggered(all_triggered),
|
||||
f"Invalid active triggers ({testcase.file}, line {testcase.line})",
|
||||
)
|
||||
|
||||
def get_options(self, source: str, testcase: DataDrivenTestCase, build_cache: bool) -> Options:
|
||||
# This handles things like '# flags: --foo'.
|
||||
options = parse_options(source, testcase, incremental_step=1)
|
||||
options.incremental = True
|
||||
options.use_builtins_fixtures = True
|
||||
options.show_traceback = True
|
||||
options.error_summary = False
|
||||
options.fine_grained_incremental = not build_cache
|
||||
options.use_fine_grained_cache = self.use_cache and not build_cache
|
||||
options.cache_fine_grained = self.use_cache
|
||||
options.local_partial_types = True
|
||||
options.enable_incomplete_feature = [TYPE_VAR_TUPLE, UNPACK]
|
||||
# Treat empty bodies safely for these test cases.
|
||||
options.allow_empty_bodies = not testcase.name.endswith("_no_empty")
|
||||
if re.search("flags:.*--follow-imports", source) is None:
|
||||
# Override the default for follow_imports
|
||||
options.follow_imports = "error"
|
||||
|
||||
for name, _ in testcase.files:
|
||||
if "mypy.ini" in name or "pyproject.toml" in name:
|
||||
parse_config_file(options, lambda: None, name)
|
||||
break
|
||||
|
||||
return options
|
||||
|
||||
def run_check(self, server: Server, sources: list[BuildSource]) -> list[str]:
|
||||
response = server.check(sources, export_types=True, is_tty=False, terminal_width=-1)
|
||||
out = response["out"] or response["err"]
|
||||
assert isinstance(out, str)
|
||||
return out.splitlines()
|
||||
|
||||
def build(self, options: Options, sources: list[BuildSource]) -> list[str]:
|
||||
try:
|
||||
result = build.build(sources=sources, options=options)
|
||||
except CompileError as e:
|
||||
return e.messages
|
||||
return result.errors
|
||||
|
||||
def format_triggered(self, triggered: list[list[str]]) -> list[str]:
|
||||
result = []
|
||||
for n, triggers in enumerate(triggered):
|
||||
filtered = [trigger for trigger in triggers if not trigger.endswith("__>")]
|
||||
filtered = sorted(filtered)
|
||||
result.append(("%d: %s" % (n + 2, ", ".join(filtered))).strip())
|
||||
return result
|
||||
|
||||
def get_build_steps(self, program_text: str) -> int:
|
||||
"""Get the number of regular incremental steps to run, from the test source"""
|
||||
if not self.use_cache:
|
||||
return 0
|
||||
m = re.search("# num_build_steps: ([0-9]+)$", program_text, flags=re.MULTILINE)
|
||||
if m is not None:
|
||||
return int(m.group(1))
|
||||
return 1
|
||||
|
||||
def perform_step(
|
||||
self,
|
||||
operations: list[UpdateFile | DeleteFile],
|
||||
server: Server,
|
||||
options: Options,
|
||||
build_options: Options,
|
||||
testcase: DataDrivenTestCase,
|
||||
main_src: str,
|
||||
step: int,
|
||||
num_regular_incremental_steps: int,
|
||||
) -> tuple[list[str], list[list[str]]]:
|
||||
"""Perform one fine-grained incremental build step (after some file updates/deletions).
|
||||
|
||||
Return (mypy output, triggered targets).
|
||||
"""
|
||||
perform_file_operations(operations)
|
||||
sources = self.parse_sources(main_src, step, options)
|
||||
|
||||
if step <= num_regular_incremental_steps:
|
||||
new_messages = self.build(build_options, sources)
|
||||
else:
|
||||
new_messages = self.run_check(server, sources)
|
||||
|
||||
updated: list[str] = []
|
||||
changed: list[str] = []
|
||||
targets: list[str] = []
|
||||
triggered = []
|
||||
if server.fine_grained_manager:
|
||||
if CHECK_CONSISTENCY:
|
||||
check_consistency(server.fine_grained_manager)
|
||||
triggered.append(server.fine_grained_manager.triggered)
|
||||
|
||||
updated = server.fine_grained_manager.updated_modules
|
||||
changed = [mod for mod, file in server.fine_grained_manager.changed_modules]
|
||||
targets = server.fine_grained_manager.processed_targets
|
||||
|
||||
expected_stale = testcase.expected_stale_modules.get(step - 1)
|
||||
if expected_stale is not None:
|
||||
assert_module_equivalence("stale" + str(step - 1), expected_stale, changed)
|
||||
|
||||
expected_rechecked = testcase.expected_rechecked_modules.get(step - 1)
|
||||
if expected_rechecked is not None:
|
||||
assert_module_equivalence("rechecked" + str(step - 1), expected_rechecked, updated)
|
||||
|
||||
expected = testcase.expected_fine_grained_targets.get(step)
|
||||
if expected:
|
||||
assert_target_equivalence("targets" + str(step), expected, targets)
|
||||
|
||||
new_messages = normalize_messages(new_messages)
|
||||
|
||||
a = new_messages
|
||||
assert testcase.tmpdir
|
||||
a.extend(self.maybe_suggest(step, server, main_src, testcase.tmpdir.name))
|
||||
a.extend(self.maybe_inspect(step, server, main_src))
|
||||
|
||||
return a, triggered
|
||||
|
||||
def parse_sources(
|
||||
self, program_text: str, incremental_step: int, options: Options
|
||||
) -> list[BuildSource]:
|
||||
"""Return target BuildSources for a test case.
|
||||
|
||||
Normally, the unit tests will check all files included in the test
|
||||
case. This differs from how testcheck works by default, as dmypy
|
||||
doesn't currently support following imports.
|
||||
|
||||
You can override this behavior and instruct the tests to check
|
||||
multiple modules by using a comment like this in the test case
|
||||
input:
|
||||
|
||||
# cmd: main a.py
|
||||
|
||||
You can also use `# cmdN:` to have a different cmd for incremental
|
||||
step N (2, 3, ...).
|
||||
|
||||
"""
|
||||
m = re.search("# cmd: mypy ([a-zA-Z0-9_./ ]+)$", program_text, flags=re.MULTILINE)
|
||||
regex = f"# cmd{incremental_step}: mypy ([a-zA-Z0-9_./ ]+)$"
|
||||
alt_m = re.search(regex, program_text, flags=re.MULTILINE)
|
||||
if alt_m is not None:
|
||||
# Optionally return a different command if in a later step
|
||||
# of incremental mode, otherwise default to reusing the
|
||||
# original cmd.
|
||||
m = alt_m
|
||||
|
||||
if m:
|
||||
# The test case wants to use a non-default set of files.
|
||||
paths = [os.path.join(test_temp_dir, path) for path in m.group(1).strip().split()]
|
||||
return create_source_list(paths, options)
|
||||
else:
|
||||
base = BuildSource(os.path.join(test_temp_dir, "main"), "__main__", None)
|
||||
# Use expand_dir instead of create_source_list to avoid complaints
|
||||
# when there aren't any .py files in an increment
|
||||
return [base] + create_source_list([test_temp_dir], options, allow_empty_dir=True)
|
||||
|
||||
def maybe_suggest(self, step: int, server: Server, src: str, tmp_dir: str) -> list[str]:
|
||||
output: list[str] = []
|
||||
targets = self.get_suggest(src, step)
|
||||
for flags, target in targets:
|
||||
json = "--json" in flags
|
||||
callsites = "--callsites" in flags
|
||||
no_any = "--no-any" in flags
|
||||
no_errors = "--no-errors" in flags
|
||||
m = re.match("--flex-any=([0-9.]+)", flags)
|
||||
flex_any = float(m.group(1)) if m else None
|
||||
m = re.match(r"--use-fixme=(\w+)", flags)
|
||||
use_fixme = m.group(1) if m else None
|
||||
m = re.match("--max-guesses=([0-9]+)", flags)
|
||||
max_guesses = int(m.group(1)) if m else None
|
||||
res: dict[str, Any] = server.cmd_suggest(
|
||||
target.strip(),
|
||||
json=json,
|
||||
no_any=no_any,
|
||||
no_errors=no_errors,
|
||||
flex_any=flex_any,
|
||||
use_fixme=use_fixme,
|
||||
callsites=callsites,
|
||||
max_guesses=max_guesses,
|
||||
)
|
||||
val = res["error"] if "error" in res else res["out"] + res["err"]
|
||||
if json:
|
||||
# JSON contains already escaped \ on Windows, so requires a bit of care.
|
||||
val = val.replace("\\\\", "\\")
|
||||
val = val.replace(os.path.realpath(tmp_dir) + os.path.sep, "")
|
||||
val = val.replace(os.path.abspath(tmp_dir) + os.path.sep, "")
|
||||
output.extend(val.strip().split("\n"))
|
||||
return normalize_messages(output)
|
||||
|
||||
def maybe_inspect(self, step: int, server: Server, src: str) -> list[str]:
|
||||
output: list[str] = []
|
||||
targets = self.get_inspect(src, step)
|
||||
for flags, location in targets:
|
||||
m = re.match(r"--show=(\w+)", flags)
|
||||
show = m.group(1) if m else "type"
|
||||
verbosity = 0
|
||||
if "-v" in flags:
|
||||
verbosity = 1
|
||||
if "-vv" in flags:
|
||||
verbosity = 2
|
||||
m = re.match(r"--limit=([0-9]+)", flags)
|
||||
limit = int(m.group(1)) if m else 0
|
||||
include_span = "--include-span" in flags
|
||||
include_kind = "--include-kind" in flags
|
||||
include_object_attrs = "--include-object-attrs" in flags
|
||||
union_attrs = "--union-attrs" in flags
|
||||
force_reload = "--force-reload" in flags
|
||||
res: dict[str, Any] = server.cmd_inspect(
|
||||
show,
|
||||
location,
|
||||
verbosity=verbosity,
|
||||
limit=limit,
|
||||
include_span=include_span,
|
||||
include_kind=include_kind,
|
||||
include_object_attrs=include_object_attrs,
|
||||
union_attrs=union_attrs,
|
||||
force_reload=force_reload,
|
||||
)
|
||||
val = res["error"] if "error" in res else res["out"] + res["err"]
|
||||
output.extend(val.strip().split("\n"))
|
||||
return normalize_messages(output)
|
||||
|
||||
def get_suggest(self, program_text: str, incremental_step: int) -> list[tuple[str, str]]:
|
||||
step_bit = "1?" if incremental_step == 1 else str(incremental_step)
|
||||
regex = f"# suggest{step_bit}: (--[a-zA-Z0-9_\\-./=?^ ]+ )*([a-zA-Z0-9_.:/?^ ]+)$"
|
||||
m = re.findall(regex, program_text, flags=re.MULTILINE)
|
||||
return m
|
||||
|
||||
def get_inspect(self, program_text: str, incremental_step: int) -> list[tuple[str, str]]:
|
||||
step_bit = "1?" if incremental_step == 1 else str(incremental_step)
|
||||
regex = f"# inspect{step_bit}: (--[a-zA-Z0-9_\\-=?^ ]+ )*([a-zA-Z0-9_.:/?^ ]+)$"
|
||||
m = re.findall(regex, program_text, flags=re.MULTILINE)
|
||||
return m
|
||||
|
||||
|
||||
def normalize_messages(messages: list[str]) -> list[str]:
|
||||
return [re.sub("^tmp" + re.escape(os.sep), "", message) for message in messages]
|
||||
|
||||
|
||||
class TestMessageSorting(unittest.TestCase):
|
||||
def test_simple_sorting(self) -> None:
|
||||
msgs = ['x.py:1: error: "int" not callable', 'foo/y.py:123: note: "X" not defined']
|
||||
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
|
||||
assert sort_messages_preserving_file_order(msgs, old_msgs) == list(reversed(msgs))
|
||||
assert sort_messages_preserving_file_order(list(reversed(msgs)), old_msgs) == list(
|
||||
reversed(msgs)
|
||||
)
|
||||
|
||||
def test_long_form_sorting(self) -> None:
|
||||
# Multi-line errors should be sorted together and not split.
|
||||
msg1 = [
|
||||
'x.py:1: error: "int" not callable',
|
||||
"and message continues (x: y)",
|
||||
" 1()",
|
||||
" ^~~",
|
||||
]
|
||||
msg2 = [
|
||||
'foo/y.py: In function "f":',
|
||||
'foo/y.py:123: note: "X" not defined',
|
||||
"and again message continues",
|
||||
]
|
||||
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
|
||||
assert sort_messages_preserving_file_order(msg1 + msg2, old_msgs) == msg2 + msg1
|
||||
assert sort_messages_preserving_file_order(msg2 + msg1, old_msgs) == msg2 + msg1
|
||||
|
||||
def test_mypy_error_prefix(self) -> None:
|
||||
# Some errors don't have a file and start with "mypy: ". These
|
||||
# shouldn't be sorted together with file-specific errors.
|
||||
msg1 = 'x.py:1: error: "int" not callable'
|
||||
msg2 = 'foo/y:123: note: "X" not defined'
|
||||
msg3 = "mypy: Error not associated with a file"
|
||||
old_msgs = [
|
||||
"mypy: Something wrong",
|
||||
'foo/y:12: note: "Y" not defined',
|
||||
'x.py:8: error: "str" not callable',
|
||||
]
|
||||
assert sort_messages_preserving_file_order([msg1, msg2, msg3], old_msgs) == [
|
||||
msg2,
|
||||
msg1,
|
||||
msg3,
|
||||
]
|
||||
assert sort_messages_preserving_file_order([msg3, msg2, msg1], old_msgs) == [
|
||||
msg2,
|
||||
msg1,
|
||||
msg3,
|
||||
]
|
||||
|
||||
def test_new_file_at_the_end(self) -> None:
|
||||
msg1 = 'x.py:1: error: "int" not callable'
|
||||
msg2 = 'foo/y.py:123: note: "X" not defined'
|
||||
new1 = "ab.py:3: error: Problem: error"
|
||||
new2 = "aaa:3: error: Bad"
|
||||
old_msgs = ['foo/y.py:12: note: "Y" not defined', 'x.py:8: error: "str" not callable']
|
||||
assert sort_messages_preserving_file_order([msg1, msg2, new1], old_msgs) == [
|
||||
msg2,
|
||||
msg1,
|
||||
new1,
|
||||
]
|
||||
assert sort_messages_preserving_file_order([new1, msg1, msg2, new2], old_msgs) == [
|
||||
msg2,
|
||||
msg1,
|
||||
new1,
|
||||
new2,
|
||||
]
|
||||
@@ -0,0 +1,18 @@
|
||||
"""Tests for fine-grained incremental checking using the cache.
|
||||
|
||||
All of the real code for this lives in testfinegrained.py.
|
||||
"""
|
||||
|
||||
# We can't "import FineGrainedSuite from ..." because that will cause pytest
|
||||
# to collect the non-caching tests when running this file.
|
||||
from __future__ import annotations
|
||||
|
||||
import mypy.test.testfinegrained
|
||||
|
||||
|
||||
class FineGrainedCacheSuite(mypy.test.testfinegrained.FineGrainedSuite):
|
||||
use_cache = True
|
||||
test_name_suffix = "_cached"
|
||||
files = mypy.test.testfinegrained.FineGrainedSuite.files + [
|
||||
"fine-grained-cache-incremental.test"
|
||||
]
|
||||
85
venv/lib/python3.12/site-packages/mypy/test/testformatter.py
Normal file
85
venv/lib/python3.12/site-packages/mypy/test/testformatter.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest import TestCase, main
|
||||
|
||||
from mypy.util import split_words, trim_source_line
|
||||
|
||||
|
||||
class FancyErrorFormattingTestCases(TestCase):
|
||||
def test_trim_source(self) -> None:
|
||||
assert trim_source_line("0123456789abcdef", max_len=16, col=5, min_width=2) == (
|
||||
"0123456789abcdef",
|
||||
0,
|
||||
)
|
||||
|
||||
# Locations near start.
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=0, min_width=2) == (
|
||||
"0123456...",
|
||||
0,
|
||||
)
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=4, min_width=2) == (
|
||||
"0123456...",
|
||||
0,
|
||||
)
|
||||
|
||||
# Middle locations.
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=5, min_width=2) == (
|
||||
"...1234567...",
|
||||
-2,
|
||||
)
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=6, min_width=2) == (
|
||||
"...2345678...",
|
||||
-1,
|
||||
)
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=8, min_width=2) == (
|
||||
"...456789a...",
|
||||
1,
|
||||
)
|
||||
|
||||
# Locations near the end.
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=11, min_width=2) == (
|
||||
"...789abcd...",
|
||||
4,
|
||||
)
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=13, min_width=2) == (
|
||||
"...9abcdef",
|
||||
6,
|
||||
)
|
||||
assert trim_source_line("0123456789abcdef", max_len=7, col=15, min_width=2) == (
|
||||
"...9abcdef",
|
||||
6,
|
||||
)
|
||||
|
||||
def test_split_words(self) -> None:
|
||||
assert split_words("Simple message") == ["Simple", "message"]
|
||||
assert split_words('Message with "Some[Long, Types]"' " in it") == [
|
||||
"Message",
|
||||
"with",
|
||||
'"Some[Long, Types]"',
|
||||
"in",
|
||||
"it",
|
||||
]
|
||||
assert split_words('Message with "Some[Long, Types]"' " and [error-code]") == [
|
||||
"Message",
|
||||
"with",
|
||||
'"Some[Long, Types]"',
|
||||
"and",
|
||||
"[error-code]",
|
||||
]
|
||||
assert split_words('"Type[Stands, First]" then words') == [
|
||||
'"Type[Stands, First]"',
|
||||
"then",
|
||||
"words",
|
||||
]
|
||||
assert split_words('First words "Then[Stands, Type]"') == [
|
||||
"First",
|
||||
"words",
|
||||
'"Then[Stands, Type]"',
|
||||
]
|
||||
assert split_words('"Type[Only, Here]"') == ['"Type[Only, Here]"']
|
||||
assert split_words("OneWord") == ["OneWord"]
|
||||
assert split_words(" ") == ["", ""]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
101
venv/lib/python3.12/site-packages/mypy/test/testfscache.py
Normal file
101
venv/lib/python3.12/site-packages/mypy/test/testfscache.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""Unit tests for file system cache."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from mypy.fscache import FileSystemCache
|
||||
|
||||
|
||||
class TestFileSystemCache(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.oldcwd = os.getcwd()
|
||||
os.chdir(self.tempdir)
|
||||
self.fscache = FileSystemCache()
|
||||
|
||||
def tearDown(self) -> None:
|
||||
os.chdir(self.oldcwd)
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
def test_isfile_case_1(self) -> None:
|
||||
self.make_file("bar.py")
|
||||
self.make_file("pkg/sub_package/__init__.py")
|
||||
self.make_file("pkg/sub_package/foo.py")
|
||||
# Run twice to test both cached and non-cached code paths.
|
||||
for i in range(2):
|
||||
assert self.isfile_case("bar.py")
|
||||
assert self.isfile_case("pkg/sub_package/__init__.py")
|
||||
assert self.isfile_case("pkg/sub_package/foo.py")
|
||||
assert not self.isfile_case("non_existent.py")
|
||||
assert not self.isfile_case("pkg/non_existent.py")
|
||||
assert not self.isfile_case("pkg/")
|
||||
assert not self.isfile_case("bar.py/")
|
||||
for i in range(2):
|
||||
assert not self.isfile_case("Bar.py")
|
||||
assert not self.isfile_case("pkg/sub_package/__init__.PY")
|
||||
assert not self.isfile_case("pkg/Sub_Package/foo.py")
|
||||
assert not self.isfile_case("Pkg/sub_package/foo.py")
|
||||
|
||||
def test_isfile_case_2(self) -> None:
|
||||
self.make_file("bar.py")
|
||||
self.make_file("pkg/sub_package/__init__.py")
|
||||
self.make_file("pkg/sub_package/foo.py")
|
||||
# Run twice to test both cached and non-cached code paths.
|
||||
# This reverses the order of checks from test_isfile_case_1.
|
||||
for i in range(2):
|
||||
assert not self.isfile_case("Bar.py")
|
||||
assert not self.isfile_case("pkg/sub_package/__init__.PY")
|
||||
assert not self.isfile_case("pkg/Sub_Package/foo.py")
|
||||
assert not self.isfile_case("Pkg/sub_package/foo.py")
|
||||
for i in range(2):
|
||||
assert self.isfile_case("bar.py")
|
||||
assert self.isfile_case("pkg/sub_package/__init__.py")
|
||||
assert self.isfile_case("pkg/sub_package/foo.py")
|
||||
assert not self.isfile_case("non_existent.py")
|
||||
assert not self.isfile_case("pkg/non_existent.py")
|
||||
|
||||
def test_isfile_case_3(self) -> None:
|
||||
self.make_file("bar.py")
|
||||
self.make_file("pkg/sub_package/__init__.py")
|
||||
self.make_file("pkg/sub_package/foo.py")
|
||||
# Run twice to test both cached and non-cached code paths.
|
||||
for i in range(2):
|
||||
assert self.isfile_case("bar.py")
|
||||
assert not self.isfile_case("non_existent.py")
|
||||
assert not self.isfile_case("pkg/non_existent.py")
|
||||
assert not self.isfile_case("Bar.py")
|
||||
assert not self.isfile_case("pkg/sub_package/__init__.PY")
|
||||
assert not self.isfile_case("pkg/Sub_Package/foo.py")
|
||||
assert not self.isfile_case("Pkg/sub_package/foo.py")
|
||||
assert self.isfile_case("pkg/sub_package/__init__.py")
|
||||
assert self.isfile_case("pkg/sub_package/foo.py")
|
||||
|
||||
def test_isfile_case_other_directory(self) -> None:
|
||||
self.make_file("bar.py")
|
||||
with tempfile.TemporaryDirectory() as other:
|
||||
self.make_file("other_dir.py", base=other)
|
||||
self.make_file("pkg/other_dir.py", base=other)
|
||||
assert self.isfile_case(os.path.join(other, "other_dir.py"))
|
||||
assert not self.isfile_case(os.path.join(other, "Other_Dir.py"))
|
||||
assert not self.isfile_case(os.path.join(other, "bar.py"))
|
||||
if sys.platform in ("win32", "darwin"):
|
||||
# We only check case for directories under our prefix, and since
|
||||
# this path is not under the prefix, case difference is fine.
|
||||
assert self.isfile_case(os.path.join(other, "PKG/other_dir.py"))
|
||||
|
||||
def make_file(self, path: str, base: str | None = None) -> None:
|
||||
if base is None:
|
||||
base = self.tempdir
|
||||
fullpath = os.path.join(base, path)
|
||||
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
|
||||
if not path.endswith("/"):
|
||||
with open(fullpath, "w") as f:
|
||||
f.write("# test file")
|
||||
|
||||
def isfile_case(self, path: str) -> bool:
|
||||
return self.fscache.isfile_case(os.path.join(self.tempdir, path), self.tempdir)
|
||||
83
venv/lib/python3.12/site-packages/mypy/test/testgraph.py
Normal file
83
venv/lib/python3.12/site-packages/mypy/test/testgraph.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Test cases for graph processing code in build.py."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import AbstractSet
|
||||
|
||||
from mypy.build import BuildManager, BuildSourceSet, State, order_ascc, sorted_components
|
||||
from mypy.errors import Errors
|
||||
from mypy.fscache import FileSystemCache
|
||||
from mypy.graph_utils import strongly_connected_components, topsort
|
||||
from mypy.modulefinder import SearchPaths
|
||||
from mypy.options import Options
|
||||
from mypy.plugin import Plugin
|
||||
from mypy.report import Reports
|
||||
from mypy.test.helpers import Suite, assert_equal
|
||||
from mypy.version import __version__
|
||||
|
||||
|
||||
class GraphSuite(Suite):
|
||||
def test_topsort(self) -> None:
|
||||
a = frozenset({"A"})
|
||||
b = frozenset({"B"})
|
||||
c = frozenset({"C"})
|
||||
d = frozenset({"D"})
|
||||
data: dict[AbstractSet[str], set[AbstractSet[str]]] = {a: {b, c}, b: {d}, c: {d}}
|
||||
res = list(topsort(data))
|
||||
assert_equal(res, [{d}, {b, c}, {a}])
|
||||
|
||||
def test_scc(self) -> None:
|
||||
vertices = {"A", "B", "C", "D"}
|
||||
edges: dict[str, list[str]] = {"A": ["B", "C"], "B": ["C"], "C": ["B", "D"], "D": []}
|
||||
sccs = {frozenset(x) for x in strongly_connected_components(vertices, edges)}
|
||||
assert_equal(sccs, {frozenset({"A"}), frozenset({"B", "C"}), frozenset({"D"})})
|
||||
|
||||
def _make_manager(self) -> BuildManager:
|
||||
options = Options()
|
||||
options.use_builtins_fixtures = True
|
||||
errors = Errors(options)
|
||||
fscache = FileSystemCache()
|
||||
search_paths = SearchPaths((), (), (), ())
|
||||
manager = BuildManager(
|
||||
data_dir="",
|
||||
search_paths=search_paths,
|
||||
ignore_prefix="",
|
||||
source_set=BuildSourceSet([]),
|
||||
reports=Reports("", {}),
|
||||
options=options,
|
||||
version_id=__version__,
|
||||
plugin=Plugin(options),
|
||||
plugins_snapshot={},
|
||||
errors=errors,
|
||||
flush_errors=lambda msgs, serious: None,
|
||||
fscache=fscache,
|
||||
stdout=sys.stdout,
|
||||
stderr=sys.stderr,
|
||||
)
|
||||
return manager
|
||||
|
||||
def test_sorted_components(self) -> None:
|
||||
manager = self._make_manager()
|
||||
graph = {
|
||||
"a": State("a", None, "import b, c", manager),
|
||||
"d": State("d", None, "pass", manager),
|
||||
"b": State("b", None, "import c", manager),
|
||||
"c": State("c", None, "import b, d", manager),
|
||||
}
|
||||
res = sorted_components(graph)
|
||||
assert_equal(res, [frozenset({"d"}), frozenset({"c", "b"}), frozenset({"a"})])
|
||||
|
||||
def test_order_ascc(self) -> None:
|
||||
manager = self._make_manager()
|
||||
graph = {
|
||||
"a": State("a", None, "import b, c", manager),
|
||||
"d": State("d", None, "def f(): import a", manager),
|
||||
"b": State("b", None, "import c", manager),
|
||||
"c": State("c", None, "import b, d", manager),
|
||||
}
|
||||
res = sorted_components(graph)
|
||||
assert_equal(res, [frozenset({"a", "d", "c", "b"})])
|
||||
ascc = res[0]
|
||||
scc = order_ascc(graph, ascc)
|
||||
assert_equal(scc, ["d", "c", "b", "a"])
|
||||
373
venv/lib/python3.12/site-packages/mypy/test/testinfer.py
Normal file
373
venv/lib/python3.12/site-packages/mypy/test/testinfer.py
Normal file
@@ -0,0 +1,373 @@
|
||||
"""Test cases for type inference helper functions."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy.argmap import map_actuals_to_formals
|
||||
from mypy.checker import DisjointDict, group_comparison_operands
|
||||
from mypy.literals import Key
|
||||
from mypy.nodes import ARG_NAMED, ARG_OPT, ARG_POS, ARG_STAR, ARG_STAR2, ArgKind, NameExpr
|
||||
from mypy.test.helpers import Suite, assert_equal
|
||||
from mypy.test.typefixture import TypeFixture
|
||||
from mypy.types import AnyType, TupleType, Type, TypeOfAny
|
||||
|
||||
|
||||
class MapActualsToFormalsSuite(Suite):
|
||||
"""Test cases for argmap.map_actuals_to_formals."""
|
||||
|
||||
def test_basic(self) -> None:
|
||||
self.assert_map([], [], [])
|
||||
|
||||
def test_positional_only(self) -> None:
|
||||
self.assert_map([ARG_POS], [ARG_POS], [[0]])
|
||||
self.assert_map([ARG_POS, ARG_POS], [ARG_POS, ARG_POS], [[0], [1]])
|
||||
|
||||
def test_optional(self) -> None:
|
||||
self.assert_map([], [ARG_OPT], [[]])
|
||||
self.assert_map([ARG_POS], [ARG_OPT], [[0]])
|
||||
self.assert_map([ARG_POS], [ARG_OPT, ARG_OPT], [[0], []])
|
||||
|
||||
def test_callee_star(self) -> None:
|
||||
self.assert_map([], [ARG_STAR], [[]])
|
||||
self.assert_map([ARG_POS], [ARG_STAR], [[0]])
|
||||
self.assert_map([ARG_POS, ARG_POS], [ARG_STAR], [[0, 1]])
|
||||
|
||||
def test_caller_star(self) -> None:
|
||||
self.assert_map([ARG_STAR], [ARG_STAR], [[0]])
|
||||
self.assert_map([ARG_POS, ARG_STAR], [ARG_STAR], [[0, 1]])
|
||||
self.assert_map([ARG_STAR], [ARG_POS, ARG_STAR], [[0], [0]])
|
||||
self.assert_map([ARG_STAR], [ARG_OPT, ARG_STAR], [[0], [0]])
|
||||
|
||||
def test_too_many_caller_args(self) -> None:
|
||||
self.assert_map([ARG_POS], [], [])
|
||||
self.assert_map([ARG_STAR], [], [])
|
||||
self.assert_map([ARG_STAR], [ARG_POS], [[0]])
|
||||
|
||||
def test_tuple_star(self) -> None:
|
||||
any_type = AnyType(TypeOfAny.special_form)
|
||||
self.assert_vararg_map([ARG_STAR], [ARG_POS], [[0]], self.make_tuple(any_type))
|
||||
self.assert_vararg_map(
|
||||
[ARG_STAR], [ARG_POS, ARG_POS], [[0], [0]], self.make_tuple(any_type, any_type)
|
||||
)
|
||||
self.assert_vararg_map(
|
||||
[ARG_STAR],
|
||||
[ARG_POS, ARG_OPT, ARG_OPT],
|
||||
[[0], [0], []],
|
||||
self.make_tuple(any_type, any_type),
|
||||
)
|
||||
|
||||
def make_tuple(self, *args: Type) -> TupleType:
|
||||
return TupleType(list(args), TypeFixture().std_tuple)
|
||||
|
||||
def test_named_args(self) -> None:
|
||||
self.assert_map(["x"], [(ARG_POS, "x")], [[0]])
|
||||
self.assert_map(["y", "x"], [(ARG_POS, "x"), (ARG_POS, "y")], [[1], [0]])
|
||||
|
||||
def test_some_named_args(self) -> None:
|
||||
self.assert_map(["y"], [(ARG_OPT, "x"), (ARG_OPT, "y"), (ARG_OPT, "z")], [[], [0], []])
|
||||
|
||||
def test_missing_named_arg(self) -> None:
|
||||
self.assert_map(["y"], [(ARG_OPT, "x")], [[]])
|
||||
|
||||
def test_duplicate_named_arg(self) -> None:
|
||||
self.assert_map(["x", "x"], [(ARG_OPT, "x")], [[0, 1]])
|
||||
|
||||
def test_varargs_and_bare_asterisk(self) -> None:
|
||||
self.assert_map([ARG_STAR], [ARG_STAR, (ARG_NAMED, "x")], [[0], []])
|
||||
self.assert_map([ARG_STAR, "x"], [ARG_STAR, (ARG_NAMED, "x")], [[0], [1]])
|
||||
|
||||
def test_keyword_varargs(self) -> None:
|
||||
self.assert_map(["x"], [ARG_STAR2], [[0]])
|
||||
self.assert_map(["x", ARG_STAR2], [ARG_STAR2], [[0, 1]])
|
||||
self.assert_map(["x", ARG_STAR2], [(ARG_POS, "x"), ARG_STAR2], [[0], [1]])
|
||||
self.assert_map([ARG_POS, ARG_STAR2], [(ARG_POS, "x"), ARG_STAR2], [[0], [1]])
|
||||
|
||||
def test_both_kinds_of_varargs(self) -> None:
|
||||
self.assert_map([ARG_STAR, ARG_STAR2], [(ARG_POS, "x"), (ARG_POS, "y")], [[0, 1], [0, 1]])
|
||||
|
||||
def test_special_cases(self) -> None:
|
||||
self.assert_map([ARG_STAR], [ARG_STAR, ARG_STAR2], [[0], []])
|
||||
self.assert_map([ARG_STAR, ARG_STAR2], [ARG_STAR, ARG_STAR2], [[0], [1]])
|
||||
self.assert_map([ARG_STAR2], [(ARG_POS, "x"), ARG_STAR2], [[0], [0]])
|
||||
self.assert_map([ARG_STAR2], [ARG_STAR2], [[0]])
|
||||
|
||||
def assert_map(
|
||||
self,
|
||||
caller_kinds_: list[ArgKind | str],
|
||||
callee_kinds_: list[ArgKind | tuple[ArgKind, str]],
|
||||
expected: list[list[int]],
|
||||
) -> None:
|
||||
caller_kinds, caller_names = expand_caller_kinds(caller_kinds_)
|
||||
callee_kinds, callee_names = expand_callee_kinds(callee_kinds_)
|
||||
result = map_actuals_to_formals(
|
||||
caller_kinds,
|
||||
caller_names,
|
||||
callee_kinds,
|
||||
callee_names,
|
||||
lambda i: AnyType(TypeOfAny.special_form),
|
||||
)
|
||||
assert_equal(result, expected)
|
||||
|
||||
def assert_vararg_map(
|
||||
self,
|
||||
caller_kinds: list[ArgKind],
|
||||
callee_kinds: list[ArgKind],
|
||||
expected: list[list[int]],
|
||||
vararg_type: Type,
|
||||
) -> None:
|
||||
result = map_actuals_to_formals(caller_kinds, [], callee_kinds, [], lambda i: vararg_type)
|
||||
assert_equal(result, expected)
|
||||
|
||||
|
||||
def expand_caller_kinds(
|
||||
kinds_or_names: list[ArgKind | str],
|
||||
) -> tuple[list[ArgKind], list[str | None]]:
|
||||
kinds = []
|
||||
names: list[str | None] = []
|
||||
for k in kinds_or_names:
|
||||
if isinstance(k, str):
|
||||
kinds.append(ARG_NAMED)
|
||||
names.append(k)
|
||||
else:
|
||||
kinds.append(k)
|
||||
names.append(None)
|
||||
return kinds, names
|
||||
|
||||
|
||||
def expand_callee_kinds(
|
||||
kinds_and_names: list[ArgKind | tuple[ArgKind, str]]
|
||||
) -> tuple[list[ArgKind], list[str | None]]:
|
||||
kinds = []
|
||||
names: list[str | None] = []
|
||||
for v in kinds_and_names:
|
||||
if isinstance(v, tuple):
|
||||
kinds.append(v[0])
|
||||
names.append(v[1])
|
||||
else:
|
||||
kinds.append(v)
|
||||
names.append(None)
|
||||
return kinds, names
|
||||
|
||||
|
||||
class OperandDisjointDictSuite(Suite):
|
||||
"""Test cases for checker.DisjointDict, which is used for type inference with operands."""
|
||||
|
||||
def new(self) -> DisjointDict[int, str]:
|
||||
return DisjointDict()
|
||||
|
||||
def test_independent_maps(self) -> None:
|
||||
d = self.new()
|
||||
d.add_mapping({0, 1}, {"group1"})
|
||||
d.add_mapping({2, 3, 4}, {"group2"})
|
||||
d.add_mapping({5, 6, 7}, {"group3"})
|
||||
|
||||
self.assertEqual(
|
||||
d.items(), [({0, 1}, {"group1"}), ({2, 3, 4}, {"group2"}), ({5, 6, 7}, {"group3"})]
|
||||
)
|
||||
|
||||
def test_partial_merging(self) -> None:
|
||||
d = self.new()
|
||||
d.add_mapping({0, 1}, {"group1"})
|
||||
d.add_mapping({1, 2}, {"group2"})
|
||||
d.add_mapping({3, 4}, {"group3"})
|
||||
d.add_mapping({5, 0}, {"group4"})
|
||||
d.add_mapping({5, 6}, {"group5"})
|
||||
d.add_mapping({4, 7}, {"group6"})
|
||||
|
||||
self.assertEqual(
|
||||
d.items(),
|
||||
[
|
||||
({0, 1, 2, 5, 6}, {"group1", "group2", "group4", "group5"}),
|
||||
({3, 4, 7}, {"group3", "group6"}),
|
||||
],
|
||||
)
|
||||
|
||||
def test_full_merging(self) -> None:
|
||||
d = self.new()
|
||||
d.add_mapping({0, 1, 2}, {"a"})
|
||||
d.add_mapping({3, 4, 2}, {"b"})
|
||||
d.add_mapping({10, 11, 12}, {"c"})
|
||||
d.add_mapping({13, 14, 15}, {"d"})
|
||||
d.add_mapping({14, 10, 16}, {"e"})
|
||||
d.add_mapping({0, 10}, {"f"})
|
||||
|
||||
self.assertEqual(
|
||||
d.items(),
|
||||
[({0, 1, 2, 3, 4, 10, 11, 12, 13, 14, 15, 16}, {"a", "b", "c", "d", "e", "f"})],
|
||||
)
|
||||
|
||||
def test_merge_with_multiple_overlaps(self) -> None:
|
||||
d = self.new()
|
||||
d.add_mapping({0, 1, 2}, {"a"})
|
||||
d.add_mapping({3, 4, 5}, {"b"})
|
||||
d.add_mapping({1, 2, 4, 5}, {"c"})
|
||||
d.add_mapping({6, 1, 2, 4, 5}, {"d"})
|
||||
d.add_mapping({6, 1, 2, 4, 5}, {"e"})
|
||||
|
||||
self.assertEqual(d.items(), [({0, 1, 2, 3, 4, 5, 6}, {"a", "b", "c", "d", "e"})])
|
||||
|
||||
|
||||
class OperandComparisonGroupingSuite(Suite):
|
||||
"""Test cases for checker.group_comparison_operands."""
|
||||
|
||||
def literal_keymap(self, assignable_operands: dict[int, NameExpr]) -> dict[int, Key]:
|
||||
output: dict[int, Key] = {}
|
||||
for index, expr in assignable_operands.items():
|
||||
output[index] = ("FakeExpr", expr.name)
|
||||
return output
|
||||
|
||||
def test_basic_cases(self) -> None:
|
||||
# Note: the grouping function doesn't actually inspect the input exprs, so we
|
||||
# just default to using NameExprs for simplicity.
|
||||
x0 = NameExpr("x0")
|
||||
x1 = NameExpr("x1")
|
||||
x2 = NameExpr("x2")
|
||||
x3 = NameExpr("x3")
|
||||
x4 = NameExpr("x4")
|
||||
|
||||
basic_input = [("==", x0, x1), ("==", x1, x2), ("<", x2, x3), ("==", x3, x4)]
|
||||
|
||||
none_assignable = self.literal_keymap({})
|
||||
all_assignable = self.literal_keymap({0: x0, 1: x1, 2: x2, 3: x3, 4: x4})
|
||||
|
||||
for assignable in [none_assignable, all_assignable]:
|
||||
self.assertEqual(
|
||||
group_comparison_operands(basic_input, assignable, set()),
|
||||
[("==", [0, 1]), ("==", [1, 2]), ("<", [2, 3]), ("==", [3, 4])],
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(basic_input, assignable, {"=="}),
|
||||
[("==", [0, 1, 2]), ("<", [2, 3]), ("==", [3, 4])],
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(basic_input, assignable, {"<"}),
|
||||
[("==", [0, 1]), ("==", [1, 2]), ("<", [2, 3]), ("==", [3, 4])],
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(basic_input, assignable, {"==", "<"}),
|
||||
[("==", [0, 1, 2]), ("<", [2, 3]), ("==", [3, 4])],
|
||||
)
|
||||
|
||||
def test_multiple_groups(self) -> None:
|
||||
x0 = NameExpr("x0")
|
||||
x1 = NameExpr("x1")
|
||||
x2 = NameExpr("x2")
|
||||
x3 = NameExpr("x3")
|
||||
x4 = NameExpr("x4")
|
||||
x5 = NameExpr("x5")
|
||||
|
||||
self.assertEqual(
|
||||
group_comparison_operands(
|
||||
[("==", x0, x1), ("==", x1, x2), ("is", x2, x3), ("is", x3, x4)],
|
||||
self.literal_keymap({}),
|
||||
{"==", "is"},
|
||||
),
|
||||
[("==", [0, 1, 2]), ("is", [2, 3, 4])],
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(
|
||||
[("==", x0, x1), ("==", x1, x2), ("==", x2, x3), ("==", x3, x4)],
|
||||
self.literal_keymap({}),
|
||||
{"==", "is"},
|
||||
),
|
||||
[("==", [0, 1, 2, 3, 4])],
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(
|
||||
[("is", x0, x1), ("==", x1, x2), ("==", x2, x3), ("==", x3, x4)],
|
||||
self.literal_keymap({}),
|
||||
{"==", "is"},
|
||||
),
|
||||
[("is", [0, 1]), ("==", [1, 2, 3, 4])],
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(
|
||||
[("is", x0, x1), ("is", x1, x2), ("<", x2, x3), ("==", x3, x4), ("==", x4, x5)],
|
||||
self.literal_keymap({}),
|
||||
{"==", "is"},
|
||||
),
|
||||
[("is", [0, 1, 2]), ("<", [2, 3]), ("==", [3, 4, 5])],
|
||||
)
|
||||
|
||||
def test_multiple_groups_coalescing(self) -> None:
|
||||
x0 = NameExpr("x0")
|
||||
x1 = NameExpr("x1")
|
||||
x2 = NameExpr("x2")
|
||||
x3 = NameExpr("x3")
|
||||
x4 = NameExpr("x4")
|
||||
|
||||
nothing_combined = [("==", [0, 1, 2]), ("<", [2, 3]), ("==", [3, 4, 5])]
|
||||
everything_combined = [("==", [0, 1, 2, 3, 4, 5]), ("<", [2, 3])]
|
||||
|
||||
# Note: We do 'x4 == x0' at the very end!
|
||||
two_groups = [
|
||||
("==", x0, x1),
|
||||
("==", x1, x2),
|
||||
("<", x2, x3),
|
||||
("==", x3, x4),
|
||||
("==", x4, x0),
|
||||
]
|
||||
self.assertEqual(
|
||||
group_comparison_operands(
|
||||
two_groups, self.literal_keymap({0: x0, 1: x1, 2: x2, 3: x3, 4: x4, 5: x0}), {"=="}
|
||||
),
|
||||
everything_combined,
|
||||
"All vars are assignable, everything is combined",
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(
|
||||
two_groups, self.literal_keymap({1: x1, 2: x2, 3: x3, 4: x4}), {"=="}
|
||||
),
|
||||
nothing_combined,
|
||||
"x0 is unassignable, so no combining",
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(
|
||||
two_groups, self.literal_keymap({0: x0, 1: x1, 3: x3, 5: x0}), {"=="}
|
||||
),
|
||||
everything_combined,
|
||||
"Some vars are unassignable but x0 is, so we combine",
|
||||
)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(two_groups, self.literal_keymap({0: x0, 5: x0}), {"=="}),
|
||||
everything_combined,
|
||||
"All vars are unassignable but x0 is, so we combine",
|
||||
)
|
||||
|
||||
def test_multiple_groups_different_operators(self) -> None:
|
||||
x0 = NameExpr("x0")
|
||||
x1 = NameExpr("x1")
|
||||
x2 = NameExpr("x2")
|
||||
x3 = NameExpr("x3")
|
||||
|
||||
groups = [("==", x0, x1), ("==", x1, x2), ("is", x2, x3), ("is", x3, x0)]
|
||||
keymap = self.literal_keymap({0: x0, 1: x1, 2: x2, 3: x3, 4: x0})
|
||||
self.assertEqual(
|
||||
group_comparison_operands(groups, keymap, {"==", "is"}),
|
||||
[("==", [0, 1, 2]), ("is", [2, 3, 4])],
|
||||
"Different operators can never be combined",
|
||||
)
|
||||
|
||||
def test_single_pair(self) -> None:
|
||||
x0 = NameExpr("x0")
|
||||
x1 = NameExpr("x1")
|
||||
|
||||
single_comparison = [("==", x0, x1)]
|
||||
expected_output = [("==", [0, 1])]
|
||||
|
||||
assignable_combinations: list[dict[int, NameExpr]] = [{}, {0: x0}, {1: x1}, {0: x0, 1: x1}]
|
||||
to_group_by: list[set[str]] = [set(), {"=="}, {"is"}]
|
||||
|
||||
for combo in assignable_combinations:
|
||||
for operators in to_group_by:
|
||||
keymap = self.literal_keymap(combo)
|
||||
self.assertEqual(
|
||||
group_comparison_operands(single_comparison, keymap, operators),
|
||||
expected_output,
|
||||
)
|
||||
|
||||
def test_empty_pair_list(self) -> None:
|
||||
# This case should never occur in practice -- ComparisionExprs
|
||||
# always contain at least one comparison. But in case it does...
|
||||
|
||||
self.assertEqual(group_comparison_operands([], {}, set()), [])
|
||||
self.assertEqual(group_comparison_operands([], {}, {"=="}), [])
|
||||
76
venv/lib/python3.12/site-packages/mypy/test/testipc.py
Normal file
76
venv/lib/python3.12/site-packages/mypy/test/testipc.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import time
|
||||
from multiprocessing import Process, Queue
|
||||
from unittest import TestCase, main
|
||||
|
||||
import pytest
|
||||
|
||||
from mypy.ipc import IPCClient, IPCServer
|
||||
|
||||
CONNECTION_NAME = "dmypy-test-ipc"
|
||||
|
||||
|
||||
def server(msg: str, q: Queue[str]) -> None:
|
||||
server = IPCServer(CONNECTION_NAME)
|
||||
q.put(server.connection_name)
|
||||
data = b""
|
||||
while not data:
|
||||
with server:
|
||||
server.write(msg.encode())
|
||||
data = server.read()
|
||||
server.cleanup()
|
||||
|
||||
|
||||
class IPCTests(TestCase):
|
||||
def test_transaction_large(self) -> None:
|
||||
queue: Queue[str] = Queue()
|
||||
msg = "t" * 200000 # longer than the max read size of 100_000
|
||||
p = Process(target=server, args=(msg, queue), daemon=True)
|
||||
p.start()
|
||||
connection_name = queue.get()
|
||||
with IPCClient(connection_name, timeout=1) as client:
|
||||
assert client.read() == msg.encode()
|
||||
client.write(b"test")
|
||||
queue.close()
|
||||
queue.join_thread()
|
||||
p.join()
|
||||
|
||||
def test_connect_twice(self) -> None:
|
||||
queue: Queue[str] = Queue()
|
||||
msg = "this is a test message"
|
||||
p = Process(target=server, args=(msg, queue), daemon=True)
|
||||
p.start()
|
||||
connection_name = queue.get()
|
||||
with IPCClient(connection_name, timeout=1) as client:
|
||||
assert client.read() == msg.encode()
|
||||
client.write(b"") # don't let the server hang up yet, we want to connect again.
|
||||
|
||||
with IPCClient(connection_name, timeout=1) as client:
|
||||
assert client.read() == msg.encode()
|
||||
client.write(b"test")
|
||||
queue.close()
|
||||
queue.join_thread()
|
||||
p.join()
|
||||
assert p.exitcode == 0
|
||||
|
||||
# Run test_connect_twice a lot, in the hopes of finding issues.
|
||||
# This is really slow, so it is skipped, but can be enabled if
|
||||
# needed to debug IPC issues.
|
||||
@pytest.mark.skip
|
||||
def test_connect_alot(self) -> None:
|
||||
t0 = time.time()
|
||||
for i in range(1000):
|
||||
try:
|
||||
print(i, "start")
|
||||
self.test_connect_twice()
|
||||
finally:
|
||||
t1 = time.time()
|
||||
print(i, t1 - t0)
|
||||
sys.stdout.flush()
|
||||
t0 = t1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
238
venv/lib/python3.12/site-packages/mypy/test/testmerge.py
Normal file
238
venv/lib/python3.12/site-packages/mypy/test/testmerge.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""Test cases for AST merge (used for fine-grained incremental checking)"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from mypy import build
|
||||
from mypy.build import BuildResult
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.nodes import (
|
||||
UNBOUND_IMPORTED,
|
||||
Expression,
|
||||
MypyFile,
|
||||
Node,
|
||||
SymbolTable,
|
||||
SymbolTableNode,
|
||||
TypeInfo,
|
||||
TypeVarExpr,
|
||||
Var,
|
||||
)
|
||||
from mypy.options import Options
|
||||
from mypy.server.subexpr import get_subexpressions
|
||||
from mypy.server.update import FineGrainedBuildManager
|
||||
from mypy.strconv import StrConv
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, normalize_error_messages, parse_options
|
||||
from mypy.types import Type, TypeStrVisitor
|
||||
from mypy.util import IdMapper, short_type
|
||||
|
||||
# Which data structures to dump in a test case?
|
||||
SYMTABLE = "SYMTABLE"
|
||||
TYPEINFO = " TYPEINFO"
|
||||
TYPES = "TYPES"
|
||||
AST = "AST"
|
||||
|
||||
|
||||
class ASTMergeSuite(DataSuite):
|
||||
files = ["merge.test"]
|
||||
|
||||
def setup(self) -> None:
|
||||
super().setup()
|
||||
self.str_conv = StrConv(show_ids=True, options=Options())
|
||||
assert self.str_conv.id_mapper is not None
|
||||
self.id_mapper: IdMapper = self.str_conv.id_mapper
|
||||
self.type_str_conv = TypeStrVisitor(self.id_mapper, options=Options())
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
name = testcase.name
|
||||
# We use the test case name to decide which data structures to dump.
|
||||
# Dumping everything would result in very verbose test cases.
|
||||
if name.endswith("_symtable"):
|
||||
kind = SYMTABLE
|
||||
elif name.endswith("_typeinfo"):
|
||||
kind = TYPEINFO
|
||||
elif name.endswith("_types"):
|
||||
kind = TYPES
|
||||
else:
|
||||
kind = AST
|
||||
|
||||
main_src = "\n".join(testcase.input)
|
||||
result = self.build(main_src, testcase)
|
||||
assert result is not None, "cases where CompileError occurred should not be run"
|
||||
result.manager.fscache.flush()
|
||||
fine_grained_manager = FineGrainedBuildManager(result)
|
||||
|
||||
a = []
|
||||
if result.errors:
|
||||
a.extend(result.errors)
|
||||
|
||||
target_path = os.path.join(test_temp_dir, "target.py")
|
||||
shutil.copy(os.path.join(test_temp_dir, "target.py.next"), target_path)
|
||||
|
||||
a.extend(self.dump(fine_grained_manager, kind, testcase.test_modules))
|
||||
old_subexpr = get_subexpressions(result.manager.modules["target"])
|
||||
|
||||
a.append("==>")
|
||||
|
||||
new_file, new_types = self.build_increment(fine_grained_manager, "target", target_path)
|
||||
a.extend(self.dump(fine_grained_manager, kind, testcase.test_modules))
|
||||
|
||||
for expr in old_subexpr:
|
||||
if isinstance(expr, TypeVarExpr):
|
||||
# These are merged so we can't perform the check.
|
||||
continue
|
||||
# Verify that old AST nodes are removed from the expression type map.
|
||||
assert expr not in new_types
|
||||
|
||||
if testcase.normalize_output:
|
||||
a = normalize_error_messages(a)
|
||||
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
|
||||
def build(self, source: str, testcase: DataDrivenTestCase) -> BuildResult | None:
|
||||
options = parse_options(source, testcase, incremental_step=1)
|
||||
options.incremental = True
|
||||
options.fine_grained_incremental = True
|
||||
options.use_builtins_fixtures = True
|
||||
options.export_types = True
|
||||
options.show_traceback = True
|
||||
options.allow_empty_bodies = True
|
||||
options.force_uppercase_builtins = True
|
||||
main_path = os.path.join(test_temp_dir, "main")
|
||||
|
||||
self.str_conv.options = options
|
||||
self.type_str_conv.options = options
|
||||
with open(main_path, "w", encoding="utf8") as f:
|
||||
f.write(source)
|
||||
try:
|
||||
result = build.build(
|
||||
sources=[BuildSource(main_path, None, None)],
|
||||
options=options,
|
||||
alt_lib_path=test_temp_dir,
|
||||
)
|
||||
except CompileError:
|
||||
# TODO: Is it okay to return None?
|
||||
return None
|
||||
return result
|
||||
|
||||
def build_increment(
|
||||
self, manager: FineGrainedBuildManager, module_id: str, path: str
|
||||
) -> tuple[MypyFile, dict[Expression, Type]]:
|
||||
manager.flush_cache()
|
||||
manager.update([(module_id, path)], [])
|
||||
module = manager.manager.modules[module_id]
|
||||
type_map = manager.graph[module_id].type_map()
|
||||
return module, type_map
|
||||
|
||||
def dump(
|
||||
self, manager: FineGrainedBuildManager, kind: str, test_modules: list[str]
|
||||
) -> list[str]:
|
||||
modules = {
|
||||
name: file for name, file in manager.manager.modules.items() if name in test_modules
|
||||
}
|
||||
if kind == AST:
|
||||
return self.dump_asts(modules)
|
||||
elif kind == TYPEINFO:
|
||||
return self.dump_typeinfos(modules)
|
||||
elif kind == SYMTABLE:
|
||||
return self.dump_symbol_tables(modules)
|
||||
elif kind == TYPES:
|
||||
return self.dump_types(modules, manager)
|
||||
assert False, f"Invalid kind {kind}"
|
||||
|
||||
def dump_asts(self, modules: dict[str, MypyFile]) -> list[str]:
|
||||
a = []
|
||||
for m in sorted(modules):
|
||||
s = modules[m].accept(self.str_conv)
|
||||
a.extend(s.splitlines())
|
||||
return a
|
||||
|
||||
def dump_symbol_tables(self, modules: dict[str, MypyFile]) -> list[str]:
|
||||
a = []
|
||||
for id in sorted(modules):
|
||||
a.extend(self.dump_symbol_table(id, modules[id].names))
|
||||
return a
|
||||
|
||||
def dump_symbol_table(self, module_id: str, symtable: SymbolTable) -> list[str]:
|
||||
a = [f"{module_id}:"]
|
||||
for name in sorted(symtable):
|
||||
if name.startswith("__"):
|
||||
continue
|
||||
a.append(f" {name}: {self.format_symbol_table_node(symtable[name])}")
|
||||
return a
|
||||
|
||||
def format_symbol_table_node(self, node: SymbolTableNode) -> str:
|
||||
if node.node is None:
|
||||
if node.kind == UNBOUND_IMPORTED:
|
||||
return "UNBOUND_IMPORTED"
|
||||
return "None"
|
||||
if isinstance(node.node, Node):
|
||||
s = f"{str(type(node.node).__name__)}<{self.id_mapper.id(node.node)}>"
|
||||
else:
|
||||
s = f"? ({type(node.node)})"
|
||||
if (
|
||||
isinstance(node.node, Var)
|
||||
and node.node.type
|
||||
and not node.node.fullname.startswith("typing.")
|
||||
):
|
||||
typestr = self.format_type(node.node.type)
|
||||
s += f"({typestr})"
|
||||
return s
|
||||
|
||||
def dump_typeinfos(self, modules: dict[str, MypyFile]) -> list[str]:
|
||||
a = []
|
||||
for id in sorted(modules):
|
||||
a.extend(self.dump_typeinfos_recursive(modules[id].names))
|
||||
return a
|
||||
|
||||
def dump_typeinfos_recursive(self, names: SymbolTable) -> list[str]:
|
||||
a = []
|
||||
for name, node in sorted(names.items(), key=lambda x: x[0]):
|
||||
if isinstance(node.node, TypeInfo):
|
||||
a.extend(self.dump_typeinfo(node.node))
|
||||
a.extend(self.dump_typeinfos_recursive(node.node.names))
|
||||
return a
|
||||
|
||||
def dump_typeinfo(self, info: TypeInfo) -> list[str]:
|
||||
if info.fullname == "enum.Enum":
|
||||
# Avoid noise
|
||||
return []
|
||||
s = info.dump(str_conv=self.str_conv, type_str_conv=self.type_str_conv)
|
||||
return s.splitlines()
|
||||
|
||||
def dump_types(
|
||||
self, modules: dict[str, MypyFile], manager: FineGrainedBuildManager
|
||||
) -> list[str]:
|
||||
a = []
|
||||
# To make the results repeatable, we try to generate unique and
|
||||
# deterministic sort keys.
|
||||
for module_id in sorted(modules):
|
||||
all_types = manager.manager.all_types
|
||||
# Compute a module type map from the global type map
|
||||
tree = manager.graph[module_id].tree
|
||||
assert tree is not None
|
||||
type_map = {
|
||||
node: all_types[node] for node in get_subexpressions(tree) if node in all_types
|
||||
}
|
||||
if type_map:
|
||||
a.append(f"## {module_id}")
|
||||
for expr in sorted(
|
||||
type_map,
|
||||
key=lambda n: (
|
||||
n.line,
|
||||
short_type(n),
|
||||
n.str_with_options(self.str_conv.options) + str(type_map[n]),
|
||||
),
|
||||
):
|
||||
typ = type_map[expr]
|
||||
a.append(f"{short_type(expr)}:{expr.line}: {self.format_type(typ)}")
|
||||
return a
|
||||
|
||||
def format_type(self, typ: Type) -> str:
|
||||
return typ.accept(self.type_str_conv)
|
||||
278
venv/lib/python3.12/site-packages/mypy/test/testmodulefinder.py
Normal file
278
venv/lib/python3.12/site-packages/mypy/test/testmodulefinder.py
Normal file
@@ -0,0 +1,278 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from mypy.modulefinder import FindModuleCache, ModuleNotFoundReason, SearchPaths
|
||||
from mypy.options import Options
|
||||
from mypy.test.config import package_path
|
||||
from mypy.test.helpers import Suite, assert_equal
|
||||
|
||||
data_path = os.path.relpath(os.path.join(package_path, "modulefinder"))
|
||||
|
||||
|
||||
class ModuleFinderSuite(Suite):
|
||||
def setUp(self) -> None:
|
||||
self.search_paths = SearchPaths(
|
||||
python_path=(),
|
||||
mypy_path=(
|
||||
os.path.join(data_path, "nsx-pkg1"),
|
||||
os.path.join(data_path, "nsx-pkg2"),
|
||||
os.path.join(data_path, "nsx-pkg3"),
|
||||
os.path.join(data_path, "nsy-pkg1"),
|
||||
os.path.join(data_path, "nsy-pkg2"),
|
||||
os.path.join(data_path, "pkg1"),
|
||||
os.path.join(data_path, "pkg2"),
|
||||
),
|
||||
package_path=(),
|
||||
typeshed_path=(),
|
||||
)
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
self.fmc_ns = FindModuleCache(self.search_paths, fscache=None, options=options)
|
||||
|
||||
options = Options()
|
||||
options.namespace_packages = False
|
||||
self.fmc_nons = FindModuleCache(self.search_paths, fscache=None, options=options)
|
||||
|
||||
def test__no_namespace_packages__nsx(self) -> None:
|
||||
"""
|
||||
If namespace_packages is False, we shouldn't find nsx
|
||||
"""
|
||||
found_module = self.fmc_nons.find_module("nsx")
|
||||
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
|
||||
|
||||
def test__no_namespace_packages__nsx_a(self) -> None:
|
||||
"""
|
||||
If namespace_packages is False, we shouldn't find nsx.a.
|
||||
"""
|
||||
found_module = self.fmc_nons.find_module("nsx.a")
|
||||
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
|
||||
|
||||
def test__no_namespace_packages__find_a_in_pkg1(self) -> None:
|
||||
"""
|
||||
Find find pkg1/a.py for "a" with namespace_packages False.
|
||||
"""
|
||||
found_module = self.fmc_nons.find_module("a")
|
||||
expected = os.path.join(data_path, "pkg1", "a.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__no_namespace_packages__find_b_in_pkg2(self) -> None:
|
||||
found_module = self.fmc_ns.find_module("b")
|
||||
expected = os.path.join(data_path, "pkg2", "b", "__init__.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_nsx_as_namespace_pkg_in_pkg1(self) -> None:
|
||||
"""
|
||||
There's no __init__.py in any of the nsx dirs, return
|
||||
the path to the first one found in mypypath.
|
||||
"""
|
||||
found_module = self.fmc_ns.find_module("nsx")
|
||||
expected = os.path.join(data_path, "nsx-pkg1", "nsx")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_nsx_a_init_in_pkg1(self) -> None:
|
||||
"""
|
||||
Find nsx-pkg1/nsx/a/__init__.py for "nsx.a" in namespace mode.
|
||||
"""
|
||||
found_module = self.fmc_ns.find_module("nsx.a")
|
||||
expected = os.path.join(data_path, "nsx-pkg1", "nsx", "a", "__init__.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_nsx_b_init_in_pkg2(self) -> None:
|
||||
"""
|
||||
Find nsx-pkg2/nsx/b/__init__.py for "nsx.b" in namespace mode.
|
||||
"""
|
||||
found_module = self.fmc_ns.find_module("nsx.b")
|
||||
expected = os.path.join(data_path, "nsx-pkg2", "nsx", "b", "__init__.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_nsx_c_c_in_pkg3(self) -> None:
|
||||
"""
|
||||
Find nsx-pkg3/nsx/c/c.py for "nsx.c.c" in namespace mode.
|
||||
"""
|
||||
found_module = self.fmc_ns.find_module("nsx.c.c")
|
||||
expected = os.path.join(data_path, "nsx-pkg3", "nsx", "c", "c.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_nsy_a__init_pyi(self) -> None:
|
||||
"""
|
||||
Prefer nsy-pkg1/a/__init__.pyi file over __init__.py.
|
||||
"""
|
||||
found_module = self.fmc_ns.find_module("nsy.a")
|
||||
expected = os.path.join(data_path, "nsy-pkg1", "nsy", "a", "__init__.pyi")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_nsy_b__init_py(self) -> None:
|
||||
"""
|
||||
There is a nsy-pkg2/nsy/b.pyi, but also a nsy-pkg2/nsy/b/__init__.py.
|
||||
We expect to find the latter when looking up "nsy.b" as
|
||||
a package is preferred over a module.
|
||||
"""
|
||||
found_module = self.fmc_ns.find_module("nsy.b")
|
||||
expected = os.path.join(data_path, "nsy-pkg2", "nsy", "b", "__init__.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_nsy_c_pyi(self) -> None:
|
||||
"""
|
||||
There is a nsy-pkg2/nsy/c.pyi and nsy-pkg2/nsy/c.py
|
||||
We expect to find the former when looking up "nsy.b" as
|
||||
.pyi is preferred over .py.
|
||||
"""
|
||||
found_module = self.fmc_ns.find_module("nsy.c")
|
||||
expected = os.path.join(data_path, "nsy-pkg2", "nsy", "c.pyi")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_a_in_pkg1(self) -> None:
|
||||
found_module = self.fmc_ns.find_module("a")
|
||||
expected = os.path.join(data_path, "pkg1", "a.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_b_init_in_pkg2(self) -> None:
|
||||
found_module = self.fmc_ns.find_module("b")
|
||||
expected = os.path.join(data_path, "pkg2", "b", "__init__.py")
|
||||
assert_equal(expected, found_module)
|
||||
|
||||
def test__find_d_nowhere(self) -> None:
|
||||
found_module = self.fmc_ns.find_module("d")
|
||||
assert_equal(ModuleNotFoundReason.NOT_FOUND, found_module)
|
||||
|
||||
|
||||
class ModuleFinderSitePackagesSuite(Suite):
|
||||
def setUp(self) -> None:
|
||||
self.package_dir = os.path.relpath(
|
||||
os.path.join(package_path, "modulefinder-site-packages")
|
||||
)
|
||||
|
||||
package_paths = (
|
||||
os.path.join(self.package_dir, "baz"),
|
||||
os.path.join(self.package_dir, "..", "not-a-directory"),
|
||||
os.path.join(self.package_dir, "..", "modulefinder-src"),
|
||||
self.package_dir,
|
||||
)
|
||||
|
||||
self.search_paths = SearchPaths(
|
||||
python_path=(),
|
||||
mypy_path=(os.path.join(data_path, "pkg1"),),
|
||||
package_path=tuple(package_paths),
|
||||
typeshed_path=(),
|
||||
)
|
||||
options = Options()
|
||||
options.namespace_packages = True
|
||||
self.fmc_ns = FindModuleCache(self.search_paths, fscache=None, options=options)
|
||||
|
||||
options = Options()
|
||||
options.namespace_packages = False
|
||||
self.fmc_nons = FindModuleCache(self.search_paths, fscache=None, options=options)
|
||||
|
||||
def path(self, *parts: str) -> str:
|
||||
return os.path.join(self.package_dir, *parts)
|
||||
|
||||
def test__packages_with_ns(self) -> None:
|
||||
cases = [
|
||||
# Namespace package with py.typed
|
||||
("ns_pkg_typed", self.path("ns_pkg_typed")),
|
||||
("ns_pkg_typed.a", self.path("ns_pkg_typed", "a.py")),
|
||||
("ns_pkg_typed.b", self.path("ns_pkg_typed", "b")),
|
||||
("ns_pkg_typed.b.c", self.path("ns_pkg_typed", "b", "c.py")),
|
||||
("ns_pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
|
||||
# Namespace package without py.typed
|
||||
("ns_pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Namespace package without stub package
|
||||
("ns_pkg_w_stubs", self.path("ns_pkg_w_stubs")),
|
||||
("ns_pkg_w_stubs.typed", self.path("ns_pkg_w_stubs-stubs", "typed", "__init__.pyi")),
|
||||
(
|
||||
"ns_pkg_w_stubs.typed_inline",
|
||||
self.path("ns_pkg_w_stubs", "typed_inline", "__init__.py"),
|
||||
),
|
||||
("ns_pkg_w_stubs.untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Regular package with py.typed
|
||||
("pkg_typed", self.path("pkg_typed", "__init__.py")),
|
||||
("pkg_typed.a", self.path("pkg_typed", "a.py")),
|
||||
("pkg_typed.b", self.path("pkg_typed", "b", "__init__.py")),
|
||||
("pkg_typed.b.c", self.path("pkg_typed", "b", "c.py")),
|
||||
("pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
|
||||
# Regular package without py.typed
|
||||
("pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Top-level Python file in site-packages
|
||||
("standalone", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("standalone.standalone_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Packages found by following .pth files
|
||||
("baz_pkg", self.path("baz", "baz_pkg", "__init__.py")),
|
||||
("ns_baz_pkg.a", self.path("baz", "ns_baz_pkg", "a.py")),
|
||||
("neighbor_pkg", self.path("..", "modulefinder-src", "neighbor_pkg", "__init__.py")),
|
||||
("ns_neighbor_pkg.a", self.path("..", "modulefinder-src", "ns_neighbor_pkg", "a.py")),
|
||||
# Something that doesn't exist
|
||||
("does_not_exist", ModuleNotFoundReason.NOT_FOUND),
|
||||
# A regular package with an installed set of stubs
|
||||
("foo.bar", self.path("foo-stubs", "bar.pyi")),
|
||||
# A regular, non-site-packages module
|
||||
("a", os.path.join(data_path, "pkg1", "a.py")),
|
||||
]
|
||||
for module, expected in cases:
|
||||
template = "Find(" + module + ") got {}; expected {}"
|
||||
|
||||
actual = self.fmc_ns.find_module(module)
|
||||
assert_equal(actual, expected, template)
|
||||
|
||||
def test__packages_without_ns(self) -> None:
|
||||
cases = [
|
||||
# Namespace package with py.typed
|
||||
("ns_pkg_typed", ModuleNotFoundReason.NOT_FOUND),
|
||||
("ns_pkg_typed.a", ModuleNotFoundReason.NOT_FOUND),
|
||||
("ns_pkg_typed.b", ModuleNotFoundReason.NOT_FOUND),
|
||||
("ns_pkg_typed.b.c", ModuleNotFoundReason.NOT_FOUND),
|
||||
("ns_pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
|
||||
# Namespace package without py.typed
|
||||
("ns_pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Namespace package without stub package
|
||||
("ns_pkg_w_stubs", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("ns_pkg_w_stubs.typed", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
(
|
||||
"ns_pkg_w_stubs.typed_inline",
|
||||
self.path("ns_pkg_w_stubs", "typed_inline", "__init__.py"),
|
||||
),
|
||||
("ns_pkg_w_stubs.untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Regular package with py.typed
|
||||
("pkg_typed", self.path("pkg_typed", "__init__.py")),
|
||||
("pkg_typed.a", self.path("pkg_typed", "a.py")),
|
||||
("pkg_typed.b", self.path("pkg_typed", "b", "__init__.py")),
|
||||
("pkg_typed.b.c", self.path("pkg_typed", "b", "c.py")),
|
||||
("pkg_typed.a.a_var", ModuleNotFoundReason.NOT_FOUND),
|
||||
# Regular package without py.typed
|
||||
("pkg_untyped", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.a", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.b", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.b.c", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("pkg_untyped.a.a_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Top-level Python file in site-packages
|
||||
("standalone", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
("standalone.standalone_var", ModuleNotFoundReason.FOUND_WITHOUT_TYPE_HINTS),
|
||||
# Packages found by following .pth files
|
||||
("baz_pkg", self.path("baz", "baz_pkg", "__init__.py")),
|
||||
("ns_baz_pkg.a", ModuleNotFoundReason.NOT_FOUND),
|
||||
("neighbor_pkg", self.path("..", "modulefinder-src", "neighbor_pkg", "__init__.py")),
|
||||
("ns_neighbor_pkg.a", ModuleNotFoundReason.NOT_FOUND),
|
||||
# Something that doesn't exist
|
||||
("does_not_exist", ModuleNotFoundReason.NOT_FOUND),
|
||||
# A regular package with an installed set of stubs
|
||||
("foo.bar", self.path("foo-stubs", "bar.pyi")),
|
||||
# A regular, non-site-packages module
|
||||
("a", os.path.join(data_path, "pkg1", "a.py")),
|
||||
]
|
||||
for module, expected in cases:
|
||||
template = "Find(" + module + ") got {}; expected {}"
|
||||
|
||||
actual = self.fmc_nons.find_module(module)
|
||||
assert_equal(actual, expected, template)
|
||||
14
venv/lib/python3.12/site-packages/mypy/test/testmypyc.py
Normal file
14
venv/lib/python3.12/site-packages/mypy/test/testmypyc.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""A basic check to make sure that we are using a mypyc-compiled version when expected."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from unittest import TestCase
|
||||
|
||||
import mypy
|
||||
|
||||
|
||||
class MypycTest(TestCase):
|
||||
def test_using_mypyc(self) -> None:
|
||||
if os.getenv("TEST_MYPYC", None) == "1":
|
||||
assert not mypy.__file__.endswith(".py"), "Expected to find a mypyc-compiled version"
|
||||
97
venv/lib/python3.12/site-packages/mypy/test/testparse.py
Normal file
97
venv/lib/python3.12/site-packages/mypy/test/testparse.py
Normal file
@@ -0,0 +1,97 @@
|
||||
"""Tests for the mypy parser."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
from pytest import skip
|
||||
|
||||
from mypy import defaults
|
||||
from mypy.config_parser import parse_mypy_comments
|
||||
from mypy.errors import CompileError
|
||||
from mypy.options import Options
|
||||
from mypy.parse import parse
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, find_test_files, parse_options
|
||||
from mypy.util import get_mypy_comments
|
||||
|
||||
|
||||
class ParserSuite(DataSuite):
|
||||
required_out_section = True
|
||||
base_path = "."
|
||||
files = find_test_files(pattern="parse*.test", exclude=["parse-errors.test"])
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
files.remove("parse-python310.test")
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
test_parser(testcase)
|
||||
|
||||
|
||||
def test_parser(testcase: DataDrivenTestCase) -> None:
|
||||
"""Perform a single parser test case.
|
||||
|
||||
The argument contains the description of the test case.
|
||||
"""
|
||||
options = Options()
|
||||
options.force_uppercase_builtins = True
|
||||
options.hide_error_codes = True
|
||||
|
||||
if testcase.file.endswith("python310.test"):
|
||||
options.python_version = (3, 10)
|
||||
else:
|
||||
options.python_version = defaults.PYTHON3_VERSION
|
||||
|
||||
source = "\n".join(testcase.input)
|
||||
|
||||
# Apply mypy: comments to options.
|
||||
comments = get_mypy_comments(source)
|
||||
changes, _ = parse_mypy_comments(comments, options)
|
||||
options = options.apply_changes(changes)
|
||||
|
||||
try:
|
||||
n = parse(
|
||||
bytes(source, "ascii"), fnam="main", module="__main__", errors=None, options=options
|
||||
)
|
||||
a = n.str_with_options(options).split("\n")
|
||||
except CompileError as e:
|
||||
a = e.messages
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, a, f"Invalid parser output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
|
||||
|
||||
# The file name shown in test case output. This is displayed in error
|
||||
# messages, and must match the file name in the test case descriptions.
|
||||
INPUT_FILE_NAME = "file"
|
||||
|
||||
|
||||
class ParseErrorSuite(DataSuite):
|
||||
required_out_section = True
|
||||
base_path = "."
|
||||
files = ["parse-errors.test"]
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
test_parse_error(testcase)
|
||||
|
||||
|
||||
def test_parse_error(testcase: DataDrivenTestCase) -> None:
|
||||
try:
|
||||
options = parse_options("\n".join(testcase.input), testcase, 0)
|
||||
if options.python_version != sys.version_info[:2]:
|
||||
skip()
|
||||
# Compile temporary file. The test file contains non-ASCII characters.
|
||||
parse(
|
||||
bytes("\n".join(testcase.input), "utf-8"), INPUT_FILE_NAME, "__main__", None, options
|
||||
)
|
||||
raise AssertionError("No errors reported")
|
||||
except CompileError as e:
|
||||
if e.module_with_blocker is not None:
|
||||
assert e.module_with_blocker == "__main__"
|
||||
# Verify that there was a compile error and that the error messages
|
||||
# are equivalent.
|
||||
assert_string_arrays_equal(
|
||||
testcase.output,
|
||||
e.messages,
|
||||
f"Invalid compiler output ({testcase.file}, line {testcase.line})",
|
||||
)
|
||||
211
venv/lib/python3.12/site-packages/mypy/test/testpep561.py
Normal file
211
venv/lib/python3.12/site-packages/mypy/test/testpep561.py
Normal file
@@ -0,0 +1,211 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
from typing import Iterator
|
||||
|
||||
import filelock
|
||||
|
||||
import mypy.api
|
||||
from mypy.test.config import package_path, pip_lock, pip_timeout, test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, perform_file_operations
|
||||
|
||||
# NOTE: options.use_builtins_fixtures should not be set in these
|
||||
# tests, otherwise mypy will ignore installed third-party packages.
|
||||
|
||||
|
||||
class PEP561Suite(DataSuite):
|
||||
files = ["pep561.test"]
|
||||
base_path = "."
|
||||
|
||||
def run_case(self, test_case: DataDrivenTestCase) -> None:
|
||||
test_pep561(test_case)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def virtualenv(python_executable: str = sys.executable) -> Iterator[tuple[str, str]]:
|
||||
"""Context manager that creates a virtualenv in a temporary directory
|
||||
|
||||
Returns the path to the created Python executable
|
||||
"""
|
||||
with tempfile.TemporaryDirectory() as venv_dir:
|
||||
proc = subprocess.run(
|
||||
[python_executable, "-m", "venv", venv_dir], cwd=os.getcwd(), capture_output=True
|
||||
)
|
||||
if proc.returncode != 0:
|
||||
err = proc.stdout.decode("utf-8") + proc.stderr.decode("utf-8")
|
||||
raise Exception("Failed to create venv.\n" + err)
|
||||
if sys.platform == "win32":
|
||||
yield venv_dir, os.path.abspath(os.path.join(venv_dir, "Scripts", "python"))
|
||||
else:
|
||||
yield venv_dir, os.path.abspath(os.path.join(venv_dir, "bin", "python"))
|
||||
|
||||
|
||||
def upgrade_pip(python_executable: str) -> None:
|
||||
"""Install pip>=21.3.1. Required for editable installs with PEP 660."""
|
||||
if (
|
||||
sys.version_info >= (3, 11)
|
||||
or (3, 10, 3) <= sys.version_info < (3, 11)
|
||||
or (3, 9, 11) <= sys.version_info < (3, 10)
|
||||
or (3, 8, 13) <= sys.version_info < (3, 9)
|
||||
):
|
||||
# Skip for more recent Python releases which come with pip>=21.3.1
|
||||
# out of the box - for performance reasons.
|
||||
return
|
||||
|
||||
install_cmd = [python_executable, "-m", "pip", "install", "pip>=21.3.1"]
|
||||
try:
|
||||
with filelock.FileLock(pip_lock, timeout=pip_timeout):
|
||||
proc = subprocess.run(install_cmd, capture_output=True, env=os.environ)
|
||||
except filelock.Timeout as err:
|
||||
raise Exception(f"Failed to acquire {pip_lock}") from err
|
||||
if proc.returncode != 0:
|
||||
raise Exception(proc.stdout.decode("utf-8") + proc.stderr.decode("utf-8"))
|
||||
|
||||
|
||||
def install_package(
|
||||
pkg: str, python_executable: str = sys.executable, editable: bool = False
|
||||
) -> None:
|
||||
"""Install a package from test-data/packages/pkg/"""
|
||||
working_dir = os.path.join(package_path, pkg)
|
||||
with tempfile.TemporaryDirectory() as dir:
|
||||
install_cmd = [python_executable, "-m", "pip", "install"]
|
||||
if editable:
|
||||
install_cmd.append("-e")
|
||||
install_cmd.append(".")
|
||||
|
||||
# Note that newer versions of pip (21.3+) don't
|
||||
# follow this env variable, but this is for compatibility
|
||||
env = {"PIP_BUILD": dir}
|
||||
# Inherit environment for Windows
|
||||
env.update(os.environ)
|
||||
try:
|
||||
with filelock.FileLock(pip_lock, timeout=pip_timeout):
|
||||
proc = subprocess.run(install_cmd, cwd=working_dir, capture_output=True, env=env)
|
||||
except filelock.Timeout as err:
|
||||
raise Exception(f"Failed to acquire {pip_lock}") from err
|
||||
if proc.returncode != 0:
|
||||
raise Exception(proc.stdout.decode("utf-8") + proc.stderr.decode("utf-8"))
|
||||
|
||||
|
||||
def test_pep561(testcase: DataDrivenTestCase) -> None:
|
||||
"""Test running mypy on files that depend on PEP 561 packages."""
|
||||
assert testcase.old_cwd is not None, "test was not properly set up"
|
||||
python = sys.executable
|
||||
|
||||
assert python is not None, "Should be impossible"
|
||||
pkgs, pip_args = parse_pkgs(testcase.input[0])
|
||||
mypy_args = parse_mypy_args(testcase.input[1])
|
||||
editable = False
|
||||
for arg in pip_args:
|
||||
if arg == "editable":
|
||||
editable = True
|
||||
else:
|
||||
raise ValueError(f"Unknown pip argument: {arg}")
|
||||
assert pkgs, "No packages to install for PEP 561 test?"
|
||||
with virtualenv(python) as venv:
|
||||
venv_dir, python_executable = venv
|
||||
if editable:
|
||||
# Editable installs with PEP 660 require pip>=21.3
|
||||
upgrade_pip(python_executable)
|
||||
for pkg in pkgs:
|
||||
install_package(pkg, python_executable, editable)
|
||||
|
||||
cmd_line = list(mypy_args)
|
||||
has_program = not ("-p" in cmd_line or "--package" in cmd_line)
|
||||
if has_program:
|
||||
program = testcase.name + ".py"
|
||||
with open(program, "w", encoding="utf-8") as f:
|
||||
for s in testcase.input:
|
||||
f.write(f"{s}\n")
|
||||
cmd_line.append(program)
|
||||
|
||||
cmd_line.extend(["--no-error-summary", "--hide-error-codes"])
|
||||
if python_executable != sys.executable:
|
||||
cmd_line.append(f"--python-executable={python_executable}")
|
||||
|
||||
steps = testcase.find_steps()
|
||||
if steps != [[]]:
|
||||
steps = [[]] + steps # type: ignore[assignment]
|
||||
|
||||
for i, operations in enumerate(steps):
|
||||
perform_file_operations(operations)
|
||||
|
||||
output = []
|
||||
# Type check the module
|
||||
out, err, returncode = mypy.api.run(cmd_line)
|
||||
|
||||
# split lines, remove newlines, and remove directory of test case
|
||||
for line in (out + err).splitlines():
|
||||
if line.startswith(test_temp_dir + os.sep):
|
||||
output.append(line[len(test_temp_dir + os.sep) :].rstrip("\r\n"))
|
||||
else:
|
||||
# Normalize paths so that the output is the same on Windows and Linux/macOS.
|
||||
line = line.replace(test_temp_dir + os.sep, test_temp_dir + "/")
|
||||
output.append(line.rstrip("\r\n"))
|
||||
iter_count = "" if i == 0 else f" on iteration {i + 1}"
|
||||
expected = testcase.output if i == 0 else testcase.output2.get(i + 1, [])
|
||||
|
||||
assert_string_arrays_equal(
|
||||
expected,
|
||||
output,
|
||||
f"Invalid output ({testcase.file}, line {testcase.line}){iter_count}",
|
||||
)
|
||||
|
||||
if has_program:
|
||||
os.remove(program)
|
||||
|
||||
|
||||
def parse_pkgs(comment: str) -> tuple[list[str], list[str]]:
|
||||
if not comment.startswith("# pkgs:"):
|
||||
return ([], [])
|
||||
else:
|
||||
pkgs_str, *args = comment[7:].split(";")
|
||||
return ([pkg.strip() for pkg in pkgs_str.split(",")], [arg.strip() for arg in args])
|
||||
|
||||
|
||||
def parse_mypy_args(line: str) -> list[str]:
|
||||
m = re.match("# flags: (.*)$", line)
|
||||
if not m:
|
||||
return [] # No args; mypy will spit out an error.
|
||||
return m.group(1).split()
|
||||
|
||||
|
||||
def test_mypy_path_is_respected() -> None:
|
||||
assert False
|
||||
packages = "packages"
|
||||
pkg_name = "a"
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
old_dir = os.getcwd()
|
||||
os.chdir(temp_dir)
|
||||
try:
|
||||
# Create the pkg for files to go into
|
||||
full_pkg_name = os.path.join(temp_dir, packages, pkg_name)
|
||||
os.makedirs(full_pkg_name)
|
||||
|
||||
# Create the empty __init__ file to declare a package
|
||||
pkg_init_name = os.path.join(temp_dir, packages, pkg_name, "__init__.py")
|
||||
open(pkg_init_name, "w", encoding="utf8").close()
|
||||
|
||||
mypy_config_path = os.path.join(temp_dir, "mypy.ini")
|
||||
with open(mypy_config_path, "w") as mypy_file:
|
||||
mypy_file.write("[mypy]\n")
|
||||
mypy_file.write(f"mypy_path = ./{packages}\n")
|
||||
|
||||
with virtualenv() as venv:
|
||||
venv_dir, python_executable = venv
|
||||
|
||||
cmd_line_args = []
|
||||
if python_executable != sys.executable:
|
||||
cmd_line_args.append(f"--python-executable={python_executable}")
|
||||
cmd_line_args.extend(["--config-file", mypy_config_path, "--package", pkg_name])
|
||||
|
||||
out, err, returncode = mypy.api.run(cmd_line_args)
|
||||
assert returncode == 0
|
||||
finally:
|
||||
os.chdir(old_dir)
|
||||
116
venv/lib/python3.12/site-packages/mypy/test/testpythoneval.py
Normal file
116
venv/lib/python3.12/site-packages/mypy/test/testpythoneval.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""Test cases for running mypy programs using a Python interpreter.
|
||||
|
||||
Each test case type checks a program then runs it using Python. The
|
||||
output (stdout) of the program is compared to expected output. Type checking
|
||||
uses full builtins and other stubs.
|
||||
|
||||
Note: Currently Python interpreter paths are hard coded.
|
||||
|
||||
Note: These test cases are *not* included in the main test suite, as including
|
||||
this suite would slow down the main suite too much.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from tempfile import TemporaryDirectory
|
||||
|
||||
from mypy import api
|
||||
from mypy.defaults import PYTHON3_VERSION
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, split_lines
|
||||
|
||||
# Path to Python 3 interpreter
|
||||
python3_path = sys.executable
|
||||
program_re = re.compile(r"\b_program.py\b")
|
||||
|
||||
|
||||
class PythonEvaluationSuite(DataSuite):
|
||||
files = ["pythoneval.test", "pythoneval-asyncio.test"]
|
||||
cache_dir = TemporaryDirectory()
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
test_python_evaluation(testcase, os.path.join(self.cache_dir.name, ".mypy_cache"))
|
||||
|
||||
|
||||
def test_python_evaluation(testcase: DataDrivenTestCase, cache_dir: str) -> None:
|
||||
"""Runs Mypy in a subprocess.
|
||||
|
||||
If this passes without errors, executes the script again with a given Python
|
||||
version.
|
||||
"""
|
||||
assert testcase.old_cwd is not None, "test was not properly set up"
|
||||
# We must enable site packages to get access to installed stubs.
|
||||
mypy_cmdline = [
|
||||
"--show-traceback",
|
||||
"--no-silence-site-packages",
|
||||
"--no-error-summary",
|
||||
"--hide-error-codes",
|
||||
"--allow-empty-bodies",
|
||||
"--force-uppercase-builtins",
|
||||
]
|
||||
interpreter = python3_path
|
||||
mypy_cmdline.append(f"--python-version={'.'.join(map(str, PYTHON3_VERSION))}")
|
||||
|
||||
m = re.search("# flags: (.*)$", "\n".join(testcase.input), re.MULTILINE)
|
||||
if m:
|
||||
additional_flags = m.group(1).split()
|
||||
for flag in additional_flags:
|
||||
if flag.startswith("--python-version="):
|
||||
targetted_python_version = flag.split("=")[1]
|
||||
targetted_major, targetted_minor = targetted_python_version.split(".")
|
||||
if (int(targetted_major), int(targetted_minor)) > (
|
||||
sys.version_info.major,
|
||||
sys.version_info.minor,
|
||||
):
|
||||
return
|
||||
mypy_cmdline.extend(additional_flags)
|
||||
|
||||
# Write the program to a file.
|
||||
program = "_" + testcase.name + ".py"
|
||||
program_path = os.path.join(test_temp_dir, program)
|
||||
mypy_cmdline.append(program_path)
|
||||
with open(program_path, "w", encoding="utf8") as file:
|
||||
for s in testcase.input:
|
||||
file.write(f"{s}\n")
|
||||
mypy_cmdline.append(f"--cache-dir={cache_dir}")
|
||||
output = []
|
||||
# Type check the program.
|
||||
out, err, returncode = api.run(mypy_cmdline)
|
||||
# split lines, remove newlines, and remove directory of test case
|
||||
for line in (out + err).splitlines():
|
||||
if line.startswith(test_temp_dir + os.sep):
|
||||
output.append(line[len(test_temp_dir + os.sep) :].rstrip("\r\n"))
|
||||
else:
|
||||
# Normalize paths so that the output is the same on Windows and Linux/macOS.
|
||||
line = line.replace(test_temp_dir + os.sep, test_temp_dir + "/")
|
||||
output.append(line.rstrip("\r\n"))
|
||||
if returncode > 1 and not testcase.output:
|
||||
# Either api.run() doesn't work well in case of a crash, or pytest interferes with it.
|
||||
# Tweak output to prevent tests with empty expected output to pass in case of a crash.
|
||||
output.append("!!! Mypy crashed !!!")
|
||||
if returncode == 0 and not output:
|
||||
# Execute the program.
|
||||
proc = subprocess.run(
|
||||
[interpreter, "-Wignore", program], cwd=test_temp_dir, capture_output=True
|
||||
)
|
||||
output.extend(split_lines(proc.stdout, proc.stderr))
|
||||
# Remove temp file.
|
||||
os.remove(program_path)
|
||||
for i, line in enumerate(output):
|
||||
if os.path.sep + "typeshed" + os.path.sep in line:
|
||||
output[i] = line.split(os.path.sep)[-1]
|
||||
assert_string_arrays_equal(
|
||||
adapt_output(testcase), output, f"Invalid output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
|
||||
|
||||
def adapt_output(testcase: DataDrivenTestCase) -> list[str]:
|
||||
"""Translates the generic _program.py into the actual filename."""
|
||||
program = "_" + testcase.name + ".py"
|
||||
return [program_re.sub(program, line) for line in testcase.output]
|
||||
54
venv/lib/python3.12/site-packages/mypy/test/testreports.py
Normal file
54
venv/lib/python3.12/site-packages/mypy/test/testreports.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""Test cases for reports generated by mypy."""
|
||||
from __future__ import annotations
|
||||
|
||||
import textwrap
|
||||
|
||||
from mypy.report import CoberturaPackage, get_line_rate
|
||||
from mypy.test.helpers import Suite, assert_equal
|
||||
|
||||
try:
|
||||
import lxml # type: ignore[import-untyped]
|
||||
except ImportError:
|
||||
lxml = None
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class CoberturaReportSuite(Suite):
|
||||
@pytest.mark.skipif(lxml is None, reason="Cannot import lxml. Is it installed?")
|
||||
def test_get_line_rate(self) -> None:
|
||||
assert_equal("1.0", get_line_rate(0, 0))
|
||||
assert_equal("0.3333", get_line_rate(1, 3))
|
||||
|
||||
@pytest.mark.skipif(lxml is None, reason="Cannot import lxml. Is it installed?")
|
||||
def test_as_xml(self) -> None:
|
||||
import lxml.etree as etree # type: ignore[import-untyped]
|
||||
|
||||
cobertura_package = CoberturaPackage("foobar")
|
||||
cobertura_package.covered_lines = 21
|
||||
cobertura_package.total_lines = 42
|
||||
|
||||
child_package = CoberturaPackage("raz")
|
||||
child_package.covered_lines = 10
|
||||
child_package.total_lines = 10
|
||||
child_package.classes["class"] = etree.Element("class")
|
||||
|
||||
cobertura_package.packages["raz"] = child_package
|
||||
|
||||
expected_output = textwrap.dedent(
|
||||
"""\
|
||||
<package complexity="1.0" name="foobar" branch-rate="0" line-rate="0.5000">
|
||||
<classes/>
|
||||
<packages>
|
||||
<package complexity="1.0" name="raz" branch-rate="0" line-rate="1.0000">
|
||||
<classes>
|
||||
<class/>
|
||||
</classes>
|
||||
</package>
|
||||
</packages>
|
||||
</package>
|
||||
"""
|
||||
).encode("ascii")
|
||||
assert_equal(
|
||||
expected_output, etree.tostring(cobertura_package.as_xml(), pretty_print=True)
|
||||
)
|
||||
210
venv/lib/python3.12/site-packages/mypy/test/testsemanal.py
Normal file
210
venv/lib/python3.12/site-packages/mypy/test/testsemanal.py
Normal file
@@ -0,0 +1,210 @@
|
||||
"""Semantic analyzer test cases"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import Dict
|
||||
|
||||
from mypy import build
|
||||
from mypy.defaults import PYTHON3_VERSION
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.nodes import TypeInfo
|
||||
from mypy.options import TYPE_VAR_TUPLE, UNPACK, Options
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import (
|
||||
assert_string_arrays_equal,
|
||||
find_test_files,
|
||||
normalize_error_messages,
|
||||
parse_options,
|
||||
testfile_pyversion,
|
||||
)
|
||||
|
||||
# Semantic analyzer test cases: dump parse tree
|
||||
|
||||
# Semantic analysis test case description files.
|
||||
semanal_files = find_test_files(
|
||||
pattern="semanal-*.test",
|
||||
exclude=[
|
||||
"semanal-errors-python310.test",
|
||||
"semanal-errors.test",
|
||||
"semanal-typeinfo.test",
|
||||
"semanal-symtable.test",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
semanal_files.remove("semanal-python310.test")
|
||||
|
||||
|
||||
def get_semanal_options(program_text: str, testcase: DataDrivenTestCase) -> Options:
|
||||
options = parse_options(program_text, testcase, 1)
|
||||
options.use_builtins_fixtures = True
|
||||
options.semantic_analysis_only = True
|
||||
options.show_traceback = True
|
||||
options.python_version = PYTHON3_VERSION
|
||||
options.enable_incomplete_feature = [TYPE_VAR_TUPLE, UNPACK]
|
||||
options.force_uppercase_builtins = True
|
||||
return options
|
||||
|
||||
|
||||
class SemAnalSuite(DataSuite):
|
||||
files = semanal_files
|
||||
native_sep = True
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
test_semanal(testcase)
|
||||
|
||||
|
||||
def test_semanal(testcase: DataDrivenTestCase) -> None:
|
||||
"""Perform a semantic analysis test case.
|
||||
|
||||
The testcase argument contains a description of the test case
|
||||
(inputs and output).
|
||||
"""
|
||||
|
||||
try:
|
||||
src = "\n".join(testcase.input)
|
||||
options = get_semanal_options(src, testcase)
|
||||
options.python_version = testfile_pyversion(testcase.file)
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, src)], options=options, alt_lib_path=test_temp_dir
|
||||
)
|
||||
a = result.errors
|
||||
if a:
|
||||
raise CompileError(a)
|
||||
# Include string representations of the source files in the actual
|
||||
# output.
|
||||
for module in sorted(result.files.keys()):
|
||||
if module in testcase.test_modules:
|
||||
a += result.files[module].str_with_options(options).split("\n")
|
||||
except CompileError as e:
|
||||
a = e.messages
|
||||
if testcase.normalize_output:
|
||||
a = normalize_error_messages(a)
|
||||
assert_string_arrays_equal(
|
||||
testcase.output,
|
||||
a,
|
||||
f"Invalid semantic analyzer output ({testcase.file}, line {testcase.line})",
|
||||
)
|
||||
|
||||
|
||||
# Semantic analyzer error test cases
|
||||
|
||||
|
||||
class SemAnalErrorSuite(DataSuite):
|
||||
files = ["semanal-errors.test"]
|
||||
if sys.version_info >= (3, 10):
|
||||
semanal_files.append("semanal-errors-python310.test")
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
test_semanal_error(testcase)
|
||||
|
||||
|
||||
def test_semanal_error(testcase: DataDrivenTestCase) -> None:
|
||||
"""Perform a test case."""
|
||||
|
||||
try:
|
||||
src = "\n".join(testcase.input)
|
||||
res = build.build(
|
||||
sources=[BuildSource("main", None, src)],
|
||||
options=get_semanal_options(src, testcase),
|
||||
alt_lib_path=test_temp_dir,
|
||||
)
|
||||
a = res.errors
|
||||
except CompileError as e:
|
||||
# Verify that there was a compile error and that the error messages
|
||||
# are equivalent.
|
||||
a = e.messages
|
||||
if testcase.normalize_output:
|
||||
a = normalize_error_messages(a)
|
||||
assert_string_arrays_equal(
|
||||
testcase.output, a, f"Invalid compiler output ({testcase.file}, line {testcase.line})"
|
||||
)
|
||||
|
||||
|
||||
# SymbolNode table export test cases
|
||||
|
||||
|
||||
class SemAnalSymtableSuite(DataSuite):
|
||||
required_out_section = True
|
||||
files = ["semanal-symtable.test"]
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
"""Perform a test case."""
|
||||
try:
|
||||
# Build test case input.
|
||||
src = "\n".join(testcase.input)
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, src)],
|
||||
options=get_semanal_options(src, testcase),
|
||||
alt_lib_path=test_temp_dir,
|
||||
)
|
||||
# The output is the symbol table converted into a string.
|
||||
a = result.errors
|
||||
if a:
|
||||
raise CompileError(a)
|
||||
for module in sorted(result.files.keys()):
|
||||
if module in testcase.test_modules:
|
||||
a.append(f"{module}:")
|
||||
for s in str(result.files[module].names).split("\n"):
|
||||
a.append(" " + s)
|
||||
except CompileError as e:
|
||||
a = e.messages
|
||||
assert_string_arrays_equal(
|
||||
testcase.output,
|
||||
a,
|
||||
f"Invalid semantic analyzer output ({testcase.file}, line {testcase.line})",
|
||||
)
|
||||
|
||||
|
||||
# Type info export test cases
|
||||
class SemAnalTypeInfoSuite(DataSuite):
|
||||
required_out_section = True
|
||||
files = ["semanal-typeinfo.test"]
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
"""Perform a test case."""
|
||||
try:
|
||||
# Build test case input.
|
||||
src = "\n".join(testcase.input)
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, src)],
|
||||
options=get_semanal_options(src, testcase),
|
||||
alt_lib_path=test_temp_dir,
|
||||
)
|
||||
a = result.errors
|
||||
if a:
|
||||
raise CompileError(a)
|
||||
|
||||
# Collect all TypeInfos in top-level modules.
|
||||
typeinfos = TypeInfoMap()
|
||||
for module, file in result.files.items():
|
||||
if module in testcase.test_modules:
|
||||
for n in file.names.values():
|
||||
if isinstance(n.node, TypeInfo):
|
||||
assert n.fullname
|
||||
if any(n.fullname.startswith(m + ".") for m in testcase.test_modules):
|
||||
typeinfos[n.fullname] = n.node
|
||||
|
||||
# The output is the symbol table converted into a string.
|
||||
a = str(typeinfos).split("\n")
|
||||
except CompileError as e:
|
||||
a = e.messages
|
||||
assert_string_arrays_equal(
|
||||
testcase.output,
|
||||
a,
|
||||
f"Invalid semantic analyzer output ({testcase.file}, line {testcase.line})",
|
||||
)
|
||||
|
||||
|
||||
class TypeInfoMap(Dict[str, TypeInfo]):
|
||||
def __str__(self) -> str:
|
||||
a: list[str] = ["TypeInfoMap("]
|
||||
for x, y in sorted(self.items()):
|
||||
ti = ("\n" + " ").join(str(y).split("\n"))
|
||||
a.append(f" {x} : {ti}")
|
||||
a[-1] += ")"
|
||||
return "\n".join(a)
|
||||
285
venv/lib/python3.12/site-packages/mypy/test/testsolve.py
Normal file
285
venv/lib/python3.12/site-packages/mypy/test/testsolve.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""Test cases for the constraint solver used in type inference."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy.constraints import SUBTYPE_OF, SUPERTYPE_OF, Constraint
|
||||
from mypy.solve import Bounds, Graph, solve_constraints, transitive_closure
|
||||
from mypy.test.helpers import Suite, assert_equal
|
||||
from mypy.test.typefixture import TypeFixture
|
||||
from mypy.types import Type, TypeVarId, TypeVarLikeType, TypeVarType
|
||||
|
||||
|
||||
class SolveSuite(Suite):
|
||||
def setUp(self) -> None:
|
||||
self.fx = TypeFixture()
|
||||
|
||||
def test_empty_input(self) -> None:
|
||||
self.assert_solve([], [], [])
|
||||
|
||||
def test_simple_supertype_constraints(self) -> None:
|
||||
self.assert_solve([self.fx.t], [self.supc(self.fx.t, self.fx.a)], [self.fx.a])
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.supc(self.fx.t, self.fx.a), self.supc(self.fx.t, self.fx.b)],
|
||||
[self.fx.a],
|
||||
)
|
||||
|
||||
def test_simple_subtype_constraints(self) -> None:
|
||||
self.assert_solve([self.fx.t], [self.subc(self.fx.t, self.fx.a)], [self.fx.a])
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.subc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.b)],
|
||||
[self.fx.b],
|
||||
)
|
||||
|
||||
def test_both_kinds_of_constraints(self) -> None:
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.supc(self.fx.t, self.fx.b), self.subc(self.fx.t, self.fx.a)],
|
||||
[self.fx.b],
|
||||
)
|
||||
|
||||
def test_unsatisfiable_constraints(self) -> None:
|
||||
# The constraints are impossible to satisfy.
|
||||
self.assert_solve(
|
||||
[self.fx.t], [self.supc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.b)], [None]
|
||||
)
|
||||
|
||||
def test_exactly_specified_result(self) -> None:
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.supc(self.fx.t, self.fx.b), self.subc(self.fx.t, self.fx.b)],
|
||||
[self.fx.b],
|
||||
)
|
||||
|
||||
def test_multiple_variables(self) -> None:
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.s],
|
||||
[
|
||||
self.supc(self.fx.t, self.fx.b),
|
||||
self.supc(self.fx.s, self.fx.c),
|
||||
self.subc(self.fx.t, self.fx.a),
|
||||
],
|
||||
[self.fx.b, self.fx.c],
|
||||
)
|
||||
|
||||
def test_no_constraints_for_var(self) -> None:
|
||||
self.assert_solve([self.fx.t], [], [self.fx.uninhabited])
|
||||
self.assert_solve([self.fx.t, self.fx.s], [], [self.fx.uninhabited, self.fx.uninhabited])
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.s],
|
||||
[self.supc(self.fx.s, self.fx.a)],
|
||||
[self.fx.uninhabited, self.fx.a],
|
||||
)
|
||||
|
||||
def test_simple_constraints_with_dynamic_type(self) -> None:
|
||||
self.assert_solve([self.fx.t], [self.supc(self.fx.t, self.fx.anyt)], [self.fx.anyt])
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.supc(self.fx.t, self.fx.anyt), self.supc(self.fx.t, self.fx.anyt)],
|
||||
[self.fx.anyt],
|
||||
)
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.supc(self.fx.t, self.fx.anyt), self.supc(self.fx.t, self.fx.a)],
|
||||
[self.fx.anyt],
|
||||
)
|
||||
|
||||
self.assert_solve([self.fx.t], [self.subc(self.fx.t, self.fx.anyt)], [self.fx.anyt])
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.subc(self.fx.t, self.fx.anyt), self.subc(self.fx.t, self.fx.anyt)],
|
||||
[self.fx.anyt],
|
||||
)
|
||||
# self.assert_solve([self.fx.t],
|
||||
# [self.subc(self.fx.t, self.fx.anyt),
|
||||
# self.subc(self.fx.t, self.fx.a)],
|
||||
# [self.fx.anyt])
|
||||
# TODO: figure out what this should be after changes to meet(any, X)
|
||||
|
||||
def test_both_normal_and_any_types_in_results(self) -> None:
|
||||
# If one of the bounds is any, we promote the other bound to
|
||||
# any as well, since otherwise the type range does not make sense.
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.supc(self.fx.t, self.fx.a), self.subc(self.fx.t, self.fx.anyt)],
|
||||
[self.fx.anyt],
|
||||
)
|
||||
|
||||
self.assert_solve(
|
||||
[self.fx.t],
|
||||
[self.supc(self.fx.t, self.fx.anyt), self.subc(self.fx.t, self.fx.a)],
|
||||
[self.fx.anyt],
|
||||
)
|
||||
|
||||
def test_poly_no_constraints(self) -> None:
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.u],
|
||||
[],
|
||||
[self.fx.uninhabited, self.fx.uninhabited],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_trivial_free(self) -> None:
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.u],
|
||||
[self.subc(self.fx.t, self.fx.a)],
|
||||
[self.fx.a, self.fx.u],
|
||||
[self.fx.u],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_free_pair(self) -> None:
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.u],
|
||||
[self.subc(self.fx.t, self.fx.u)],
|
||||
[self.fx.t, self.fx.t],
|
||||
[self.fx.t],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_free_pair_with_bounds(self) -> None:
|
||||
t_prime = self.fx.t.copy_modified(upper_bound=self.fx.b)
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.ub],
|
||||
[self.subc(self.fx.t, self.fx.ub)],
|
||||
[t_prime, t_prime],
|
||||
[t_prime],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_free_pair_with_bounds_uninhabited(self) -> None:
|
||||
self.assert_solve(
|
||||
[self.fx.ub, self.fx.uc],
|
||||
[self.subc(self.fx.ub, self.fx.uc)],
|
||||
[self.fx.uninhabited, self.fx.uninhabited],
|
||||
[],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_bounded_chain(self) -> None:
|
||||
# B <: T <: U <: S <: A
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.u, self.fx.s],
|
||||
[
|
||||
self.supc(self.fx.t, self.fx.b),
|
||||
self.subc(self.fx.t, self.fx.u),
|
||||
self.subc(self.fx.u, self.fx.s),
|
||||
self.subc(self.fx.s, self.fx.a),
|
||||
],
|
||||
[self.fx.b, self.fx.b, self.fx.b],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_reverse_overlapping_chain(self) -> None:
|
||||
# A :> T <: S :> B
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.s],
|
||||
[
|
||||
self.subc(self.fx.t, self.fx.s),
|
||||
self.subc(self.fx.t, self.fx.a),
|
||||
self.supc(self.fx.s, self.fx.b),
|
||||
],
|
||||
[self.fx.a, self.fx.a],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_reverse_split_chain(self) -> None:
|
||||
# B :> T <: S :> A
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.s],
|
||||
[
|
||||
self.subc(self.fx.t, self.fx.s),
|
||||
self.subc(self.fx.t, self.fx.b),
|
||||
self.supc(self.fx.s, self.fx.a),
|
||||
],
|
||||
[self.fx.b, self.fx.a],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_poly_unsolvable_chain(self) -> None:
|
||||
# A <: T <: U <: S <: B
|
||||
self.assert_solve(
|
||||
[self.fx.t, self.fx.u, self.fx.s],
|
||||
[
|
||||
self.supc(self.fx.t, self.fx.a),
|
||||
self.subc(self.fx.t, self.fx.u),
|
||||
self.subc(self.fx.u, self.fx.s),
|
||||
self.subc(self.fx.s, self.fx.b),
|
||||
],
|
||||
[None, None, None],
|
||||
allow_polymorphic=True,
|
||||
)
|
||||
|
||||
def test_simple_chain_closure(self) -> None:
|
||||
self.assert_transitive_closure(
|
||||
[self.fx.t.id, self.fx.s.id],
|
||||
[
|
||||
self.supc(self.fx.t, self.fx.b),
|
||||
self.subc(self.fx.t, self.fx.s),
|
||||
self.subc(self.fx.s, self.fx.a),
|
||||
],
|
||||
{(self.fx.t.id, self.fx.s.id)},
|
||||
{self.fx.t.id: {self.fx.b}, self.fx.s.id: {self.fx.b}},
|
||||
{self.fx.t.id: {self.fx.a}, self.fx.s.id: {self.fx.a}},
|
||||
)
|
||||
|
||||
def test_reverse_chain_closure(self) -> None:
|
||||
self.assert_transitive_closure(
|
||||
[self.fx.t.id, self.fx.s.id],
|
||||
[
|
||||
self.subc(self.fx.t, self.fx.s),
|
||||
self.subc(self.fx.t, self.fx.a),
|
||||
self.supc(self.fx.s, self.fx.b),
|
||||
],
|
||||
{(self.fx.t.id, self.fx.s.id)},
|
||||
{self.fx.t.id: set(), self.fx.s.id: {self.fx.b}},
|
||||
{self.fx.t.id: {self.fx.a}, self.fx.s.id: set()},
|
||||
)
|
||||
|
||||
def test_secondary_constraint_closure(self) -> None:
|
||||
self.assert_transitive_closure(
|
||||
[self.fx.t.id, self.fx.s.id],
|
||||
[self.supc(self.fx.s, self.fx.gt), self.subc(self.fx.s, self.fx.ga)],
|
||||
set(),
|
||||
{self.fx.t.id: set(), self.fx.s.id: {self.fx.gt}},
|
||||
{self.fx.t.id: {self.fx.a}, self.fx.s.id: {self.fx.ga}},
|
||||
)
|
||||
|
||||
def assert_solve(
|
||||
self,
|
||||
vars: list[TypeVarLikeType],
|
||||
constraints: list[Constraint],
|
||||
results: list[None | Type],
|
||||
free_vars: list[TypeVarLikeType] | None = None,
|
||||
allow_polymorphic: bool = False,
|
||||
) -> None:
|
||||
if free_vars is None:
|
||||
free_vars = []
|
||||
actual, actual_free = solve_constraints(
|
||||
vars, constraints, allow_polymorphic=allow_polymorphic
|
||||
)
|
||||
assert_equal(actual, results)
|
||||
assert_equal(actual_free, free_vars)
|
||||
|
||||
def assert_transitive_closure(
|
||||
self,
|
||||
vars: list[TypeVarId],
|
||||
constraints: list[Constraint],
|
||||
graph: Graph,
|
||||
lowers: Bounds,
|
||||
uppers: Bounds,
|
||||
) -> None:
|
||||
actual_graph, actual_lowers, actual_uppers = transitive_closure(vars, constraints)
|
||||
# Add trivial elements.
|
||||
for v in vars:
|
||||
graph.add((v, v))
|
||||
assert_equal(actual_graph, graph)
|
||||
assert_equal(dict(actual_lowers), lowers)
|
||||
assert_equal(dict(actual_uppers), uppers)
|
||||
|
||||
def supc(self, type_var: TypeVarType, bound: Type) -> Constraint:
|
||||
return Constraint(type_var, SUPERTYPE_OF, bound)
|
||||
|
||||
def subc(self, type_var: TypeVarType, bound: Type) -> Constraint:
|
||||
return Constraint(type_var, SUBTYPE_OF, bound)
|
||||
1565
venv/lib/python3.12/site-packages/mypy/test/teststubgen.py
Normal file
1565
venv/lib/python3.12/site-packages/mypy/test/teststubgen.py
Normal file
File diff suppressed because it is too large
Load Diff
12
venv/lib/python3.12/site-packages/mypy/test/teststubinfo.py
Normal file
12
venv/lib/python3.12/site-packages/mypy/test/teststubinfo.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import unittest
|
||||
|
||||
from mypy.stubinfo import is_legacy_bundled_package
|
||||
|
||||
|
||||
class TestStubInfo(unittest.TestCase):
|
||||
def test_is_legacy_bundled_packages(self) -> None:
|
||||
assert not is_legacy_bundled_package("foobar_asdf")
|
||||
assert is_legacy_bundled_package("pycurl")
|
||||
assert is_legacy_bundled_package("dataclasses")
|
||||
2223
venv/lib/python3.12/site-packages/mypy/test/teststubtest.py
Normal file
2223
venv/lib/python3.12/site-packages/mypy/test/teststubtest.py
Normal file
File diff suppressed because it is too large
Load Diff
382
venv/lib/python3.12/site-packages/mypy/test/testsubtypes.py
Normal file
382
venv/lib/python3.12/site-packages/mypy/test/testsubtypes.py
Normal file
@@ -0,0 +1,382 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy.nodes import CONTRAVARIANT, COVARIANT, INVARIANT
|
||||
from mypy.subtypes import is_subtype
|
||||
from mypy.test.helpers import Suite
|
||||
from mypy.test.typefixture import InterfaceTypeFixture, TypeFixture
|
||||
from mypy.types import Instance, TupleType, Type, UnpackType
|
||||
|
||||
|
||||
class SubtypingSuite(Suite):
|
||||
def setUp(self) -> None:
|
||||
self.fx = TypeFixture(INVARIANT)
|
||||
self.fx_contra = TypeFixture(CONTRAVARIANT)
|
||||
self.fx_co = TypeFixture(COVARIANT)
|
||||
|
||||
def test_trivial_cases(self) -> None:
|
||||
for simple in self.fx_co.a, self.fx_co.o, self.fx_co.b:
|
||||
self.assert_subtype(simple, simple)
|
||||
|
||||
def test_instance_subtyping(self) -> None:
|
||||
self.assert_strict_subtype(self.fx.a, self.fx.o)
|
||||
self.assert_strict_subtype(self.fx.b, self.fx.o)
|
||||
self.assert_strict_subtype(self.fx.b, self.fx.a)
|
||||
|
||||
self.assert_not_subtype(self.fx.a, self.fx.d)
|
||||
self.assert_not_subtype(self.fx.b, self.fx.c)
|
||||
|
||||
def test_simple_generic_instance_subtyping_invariant(self) -> None:
|
||||
self.assert_subtype(self.fx.ga, self.fx.ga)
|
||||
self.assert_subtype(self.fx.hab, self.fx.hab)
|
||||
|
||||
self.assert_not_subtype(self.fx.ga, self.fx.g2a)
|
||||
self.assert_not_subtype(self.fx.ga, self.fx.gb)
|
||||
self.assert_not_subtype(self.fx.gb, self.fx.ga)
|
||||
|
||||
def test_simple_generic_instance_subtyping_covariant(self) -> None:
|
||||
self.assert_subtype(self.fx_co.ga, self.fx_co.ga)
|
||||
self.assert_subtype(self.fx_co.hab, self.fx_co.hab)
|
||||
|
||||
self.assert_not_subtype(self.fx_co.ga, self.fx_co.g2a)
|
||||
self.assert_not_subtype(self.fx_co.ga, self.fx_co.gb)
|
||||
self.assert_subtype(self.fx_co.gb, self.fx_co.ga)
|
||||
|
||||
def test_simple_generic_instance_subtyping_contravariant(self) -> None:
|
||||
self.assert_subtype(self.fx_contra.ga, self.fx_contra.ga)
|
||||
self.assert_subtype(self.fx_contra.hab, self.fx_contra.hab)
|
||||
|
||||
self.assert_not_subtype(self.fx_contra.ga, self.fx_contra.g2a)
|
||||
self.assert_subtype(self.fx_contra.ga, self.fx_contra.gb)
|
||||
self.assert_not_subtype(self.fx_contra.gb, self.fx_contra.ga)
|
||||
|
||||
def test_generic_subtyping_with_inheritance_invariant(self) -> None:
|
||||
self.assert_subtype(self.fx.gsab, self.fx.gb)
|
||||
self.assert_not_subtype(self.fx.gsab, self.fx.ga)
|
||||
self.assert_not_subtype(self.fx.gsaa, self.fx.gb)
|
||||
|
||||
def test_generic_subtyping_with_inheritance_covariant(self) -> None:
|
||||
self.assert_subtype(self.fx_co.gsab, self.fx_co.gb)
|
||||
self.assert_subtype(self.fx_co.gsab, self.fx_co.ga)
|
||||
self.assert_not_subtype(self.fx_co.gsaa, self.fx_co.gb)
|
||||
|
||||
def test_generic_subtyping_with_inheritance_contravariant(self) -> None:
|
||||
self.assert_subtype(self.fx_contra.gsab, self.fx_contra.gb)
|
||||
self.assert_not_subtype(self.fx_contra.gsab, self.fx_contra.ga)
|
||||
self.assert_subtype(self.fx_contra.gsaa, self.fx_contra.gb)
|
||||
|
||||
def test_interface_subtyping(self) -> None:
|
||||
self.assert_subtype(self.fx.e, self.fx.f)
|
||||
self.assert_equivalent(self.fx.f, self.fx.f)
|
||||
self.assert_not_subtype(self.fx.a, self.fx.f)
|
||||
|
||||
def test_generic_interface_subtyping(self) -> None:
|
||||
# TODO make this work
|
||||
fx2 = InterfaceTypeFixture()
|
||||
|
||||
self.assert_subtype(fx2.m1, fx2.gfa)
|
||||
self.assert_not_subtype(fx2.m1, fx2.gfb)
|
||||
|
||||
self.assert_equivalent(fx2.gfa, fx2.gfa)
|
||||
|
||||
def test_basic_callable_subtyping(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable(self.fx.o, self.fx.d), self.fx.callable(self.fx.a, self.fx.d)
|
||||
)
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable(self.fx.d, self.fx.b), self.fx.callable(self.fx.d, self.fx.a)
|
||||
)
|
||||
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable(self.fx.a, self.fx.nonet), self.fx.callable(self.fx.a, self.fx.a)
|
||||
)
|
||||
|
||||
self.assert_unrelated(
|
||||
self.fx.callable(self.fx.a, self.fx.a, self.fx.a),
|
||||
self.fx.callable(self.fx.a, self.fx.a),
|
||||
)
|
||||
|
||||
def test_default_arg_callable_subtyping(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
|
||||
self.fx.callable(self.fx.a, self.fx.d, self.fx.a),
|
||||
)
|
||||
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
|
||||
self.fx.callable(self.fx.a, self.fx.a),
|
||||
)
|
||||
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_default(0, self.fx.a, self.fx.d, self.fx.a),
|
||||
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
|
||||
)
|
||||
|
||||
self.assert_unrelated(
|
||||
self.fx.callable_default(1, self.fx.a, self.fx.d, self.fx.a),
|
||||
self.fx.callable(self.fx.d, self.fx.d, self.fx.a),
|
||||
)
|
||||
|
||||
self.assert_unrelated(
|
||||
self.fx.callable_default(0, self.fx.a, self.fx.d, self.fx.a),
|
||||
self.fx.callable_default(1, self.fx.a, self.fx.a, self.fx.a),
|
||||
)
|
||||
|
||||
self.assert_unrelated(
|
||||
self.fx.callable_default(1, self.fx.a, self.fx.a),
|
||||
self.fx.callable(self.fx.a, self.fx.a, self.fx.a),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_1(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.a),
|
||||
self.fx.callable_var_arg(0, self.fx.b, self.fx.a),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_2(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.a),
|
||||
self.fx.callable(self.fx.b, self.fx.a),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_3(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.a), self.fx.callable(self.fx.a)
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_4(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_var_arg(1, self.fx.a, self.fx.d, self.fx.a),
|
||||
self.fx.callable(self.fx.b, self.fx.a),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_5(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.d, self.fx.a),
|
||||
self.fx.callable(self.fx.b, self.fx.a),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_6(self) -> None:
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.f, self.fx.d),
|
||||
self.fx.callable_var_arg(0, self.fx.b, self.fx.e, self.fx.d),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_7(self) -> None:
|
||||
self.assert_not_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
|
||||
self.fx.callable(self.fx.a, self.fx.d),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_8(self) -> None:
|
||||
self.assert_not_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.a, self.fx.d),
|
||||
)
|
||||
self.assert_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.d),
|
||||
self.fx.callable_var_arg(0, self.fx.b, self.fx.b, self.fx.d),
|
||||
)
|
||||
|
||||
def test_var_arg_callable_subtyping_9(self) -> None:
|
||||
self.assert_not_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.b, self.fx.b, self.fx.d),
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.d),
|
||||
)
|
||||
self.assert_subtype(
|
||||
self.fx.callable_var_arg(0, self.fx.a, self.fx.a, self.fx.d),
|
||||
self.fx.callable_var_arg(0, self.fx.b, self.fx.d),
|
||||
)
|
||||
|
||||
def test_type_callable_subtyping(self) -> None:
|
||||
self.assert_subtype(self.fx.callable_type(self.fx.d, self.fx.a), self.fx.type_type)
|
||||
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_type(self.fx.d, self.fx.b), self.fx.callable(self.fx.d, self.fx.a)
|
||||
)
|
||||
|
||||
self.assert_strict_subtype(
|
||||
self.fx.callable_type(self.fx.a, self.fx.b), self.fx.callable(self.fx.a, self.fx.b)
|
||||
)
|
||||
|
||||
def test_type_var_tuple(self) -> None:
|
||||
self.assert_subtype(Instance(self.fx.gvi, []), Instance(self.fx.gvi, []))
|
||||
self.assert_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
|
||||
Instance(self.fx.gvi, [self.fx.b, self.fx.a]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b]), Instance(self.fx.gvi, [self.fx.a])
|
||||
)
|
||||
|
||||
self.assert_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]),
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]),
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.us)]),
|
||||
)
|
||||
|
||||
self.assert_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.anyt)]),
|
||||
Instance(self.fx.gvi, [self.fx.anyt]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]), Instance(self.fx.gvi, [])
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss)]), Instance(self.fx.gvi, [self.fx.anyt])
|
||||
)
|
||||
|
||||
def test_type_var_tuple_with_prefix_suffix(self) -> None:
|
||||
self.assert_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
|
||||
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
|
||||
)
|
||||
self.assert_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss)]),
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss)]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
|
||||
Instance(self.fx.gvi, [self.fx.b, UnpackType(self.fx.ss)]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss)]),
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss)]),
|
||||
)
|
||||
|
||||
self.assert_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.b]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a]),
|
||||
Instance(self.fx.gvi, [UnpackType(self.fx.ss), self.fx.a, self.fx.b]),
|
||||
)
|
||||
|
||||
self.assert_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss), self.fx.c]),
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss), self.fx.c]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b, UnpackType(self.fx.ss), self.fx.c]),
|
||||
Instance(self.fx.gvi, [self.fx.a, UnpackType(self.fx.ss), self.fx.b, self.fx.c]),
|
||||
)
|
||||
|
||||
def test_type_var_tuple_unpacked_varlength_tuple(self) -> None:
|
||||
self.assert_subtype(
|
||||
Instance(
|
||||
self.fx.gvi,
|
||||
[
|
||||
UnpackType(
|
||||
TupleType(
|
||||
[self.fx.a, self.fx.b],
|
||||
fallback=Instance(self.fx.std_tuplei, [self.fx.o]),
|
||||
)
|
||||
)
|
||||
],
|
||||
),
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
|
||||
)
|
||||
|
||||
def test_type_var_tuple_unpacked_tuple(self) -> None:
|
||||
self.assert_subtype(
|
||||
Instance(
|
||||
self.fx.gvi,
|
||||
[
|
||||
UnpackType(
|
||||
TupleType(
|
||||
[self.fx.a, self.fx.b],
|
||||
fallback=Instance(self.fx.std_tuplei, [self.fx.o]),
|
||||
)
|
||||
)
|
||||
],
|
||||
),
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.b]),
|
||||
)
|
||||
self.assert_subtype(
|
||||
Instance(
|
||||
self.fx.gvi,
|
||||
[
|
||||
UnpackType(
|
||||
TupleType(
|
||||
[self.fx.a, self.fx.b],
|
||||
fallback=Instance(self.fx.std_tuplei, [self.fx.o]),
|
||||
)
|
||||
)
|
||||
],
|
||||
),
|
||||
Instance(self.fx.gvi, [self.fx.anyt, self.fx.anyt]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(
|
||||
self.fx.gvi,
|
||||
[
|
||||
UnpackType(
|
||||
TupleType(
|
||||
[self.fx.a, self.fx.b],
|
||||
fallback=Instance(self.fx.std_tuplei, [self.fx.o]),
|
||||
)
|
||||
)
|
||||
],
|
||||
),
|
||||
Instance(self.fx.gvi, [self.fx.a]),
|
||||
)
|
||||
self.assert_not_subtype(
|
||||
Instance(
|
||||
self.fx.gvi,
|
||||
[
|
||||
UnpackType(
|
||||
TupleType(
|
||||
[self.fx.a, self.fx.b],
|
||||
fallback=Instance(self.fx.std_tuplei, [self.fx.o]),
|
||||
)
|
||||
)
|
||||
],
|
||||
),
|
||||
# Order flipped here.
|
||||
Instance(self.fx.gvi, [self.fx.b, self.fx.a]),
|
||||
)
|
||||
|
||||
def test_type_var_tuple_unpacked_variable_length_tuple(self) -> None:
|
||||
self.assert_equivalent(
|
||||
Instance(self.fx.gvi, [self.fx.a, self.fx.a]),
|
||||
Instance(self.fx.gvi, [UnpackType(Instance(self.fx.std_tuplei, [self.fx.a]))]),
|
||||
)
|
||||
|
||||
# IDEA: Maybe add these test cases (they are tested pretty well in type
|
||||
# checker tests already):
|
||||
# * more interface subtyping test cases
|
||||
# * more generic interface subtyping test cases
|
||||
# * type variables
|
||||
# * tuple types
|
||||
# * None type
|
||||
# * any type
|
||||
# * generic function types
|
||||
|
||||
def assert_subtype(self, s: Type, t: Type) -> None:
|
||||
assert is_subtype(s, t), f"{s} not subtype of {t}"
|
||||
|
||||
def assert_not_subtype(self, s: Type, t: Type) -> None:
|
||||
assert not is_subtype(s, t), f"{s} subtype of {t}"
|
||||
|
||||
def assert_strict_subtype(self, s: Type, t: Type) -> None:
|
||||
self.assert_subtype(s, t)
|
||||
self.assert_not_subtype(t, s)
|
||||
|
||||
def assert_equivalent(self, s: Type, t: Type) -> None:
|
||||
self.assert_subtype(s, t)
|
||||
self.assert_subtype(t, s)
|
||||
|
||||
def assert_unrelated(self, s: Type, t: Type) -> None:
|
||||
self.assert_not_subtype(s, t)
|
||||
self.assert_not_subtype(t, s)
|
||||
66
venv/lib/python3.12/site-packages/mypy/test/testtransform.py
Normal file
66
venv/lib/python3.12/site-packages/mypy/test/testtransform.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""Identity AST transform test cases"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy import build
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.options import TYPE_VAR_TUPLE, UNPACK
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal, normalize_error_messages, parse_options
|
||||
from mypy.test.visitors import TypeAssertTransformVisitor
|
||||
|
||||
|
||||
class TransformSuite(DataSuite):
|
||||
required_out_section = True
|
||||
# Reuse semantic analysis test cases.
|
||||
files = [
|
||||
"semanal-basic.test",
|
||||
"semanal-expressions.test",
|
||||
"semanal-classes.test",
|
||||
"semanal-types.test",
|
||||
"semanal-modules.test",
|
||||
"semanal-statements.test",
|
||||
"semanal-abstractclasses.test",
|
||||
]
|
||||
native_sep = True
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
test_transform(testcase)
|
||||
|
||||
|
||||
def test_transform(testcase: DataDrivenTestCase) -> None:
|
||||
"""Perform an identity transform test case."""
|
||||
|
||||
try:
|
||||
src = "\n".join(testcase.input)
|
||||
options = parse_options(src, testcase, 1)
|
||||
options.use_builtins_fixtures = True
|
||||
options.semantic_analysis_only = True
|
||||
options.enable_incomplete_feature = [TYPE_VAR_TUPLE, UNPACK]
|
||||
options.show_traceback = True
|
||||
options.force_uppercase_builtins = True
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, src)], options=options, alt_lib_path=test_temp_dir
|
||||
)
|
||||
a = result.errors
|
||||
if a:
|
||||
raise CompileError(a)
|
||||
# Include string representations of the source files in the actual
|
||||
# output.
|
||||
for module in sorted(result.files.keys()):
|
||||
if module in testcase.test_modules:
|
||||
t = TypeAssertTransformVisitor()
|
||||
t.test_only = True
|
||||
file = t.mypyfile(result.files[module])
|
||||
a += file.str_with_options(options).split("\n")
|
||||
except CompileError as e:
|
||||
a = e.messages
|
||||
if testcase.normalize_output:
|
||||
a = normalize_error_messages(a)
|
||||
assert_string_arrays_equal(
|
||||
testcase.output,
|
||||
a,
|
||||
f"Invalid semantic analyzer output ({testcase.file}, line {testcase.line})",
|
||||
)
|
||||
83
venv/lib/python3.12/site-packages/mypy/test/testtypegen.py
Normal file
83
venv/lib/python3.12/site-packages/mypy/test/testtypegen.py
Normal file
@@ -0,0 +1,83 @@
|
||||
"""Test cases for the type checker: exporting inferred types"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
from mypy import build
|
||||
from mypy.errors import CompileError
|
||||
from mypy.modulefinder import BuildSource
|
||||
from mypy.nodes import NameExpr, TempNode
|
||||
from mypy.options import Options
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase, DataSuite
|
||||
from mypy.test.helpers import assert_string_arrays_equal
|
||||
from mypy.test.visitors import SkippedNodeSearcher, ignore_node
|
||||
from mypy.util import short_type
|
||||
|
||||
|
||||
class TypeExportSuite(DataSuite):
|
||||
required_out_section = True
|
||||
files = ["typexport-basic.test"]
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
try:
|
||||
line = testcase.input[0]
|
||||
mask = ""
|
||||
if line.startswith("##"):
|
||||
mask = "(" + line[2:].strip() + ")$"
|
||||
|
||||
src = "\n".join(testcase.input)
|
||||
options = Options()
|
||||
options.strict_optional = False # TODO: Enable strict optional checking
|
||||
options.use_builtins_fixtures = True
|
||||
options.show_traceback = True
|
||||
options.export_types = True
|
||||
options.preserve_asts = True
|
||||
options.allow_empty_bodies = True
|
||||
options.force_uppercase_builtins = True
|
||||
result = build.build(
|
||||
sources=[BuildSource("main", None, src)],
|
||||
options=options,
|
||||
alt_lib_path=test_temp_dir,
|
||||
)
|
||||
a = result.errors
|
||||
map = result.types
|
||||
nodes = map.keys()
|
||||
|
||||
# Ignore NameExpr nodes of variables with explicit (trivial) types
|
||||
# to simplify output.
|
||||
searcher = SkippedNodeSearcher()
|
||||
for file in result.files.values():
|
||||
searcher.ignore_file = file.fullname not in testcase.test_modules
|
||||
file.accept(searcher)
|
||||
ignored = searcher.nodes
|
||||
|
||||
# Filter nodes that should be included in the output.
|
||||
keys = []
|
||||
for node in nodes:
|
||||
if isinstance(node, TempNode):
|
||||
continue
|
||||
if node.line != -1 and map[node]:
|
||||
if ignore_node(node) or node in ignored:
|
||||
continue
|
||||
if re.match(mask, short_type(node)) or (
|
||||
isinstance(node, NameExpr) and re.match(mask, node.name)
|
||||
):
|
||||
# Include node in output.
|
||||
keys.append(node)
|
||||
|
||||
for key in sorted(
|
||||
keys,
|
||||
key=lambda n: (n.line, short_type(n), str(n) + map[n].str_with_options(options)),
|
||||
):
|
||||
ts = map[key].str_with_options(options).replace("*", "") # Remove erased tags
|
||||
ts = ts.replace("__main__.", "")
|
||||
a.append(f"{short_type(key)}({key.line}) : {ts}")
|
||||
except CompileError as e:
|
||||
a = e.messages
|
||||
assert_string_arrays_equal(
|
||||
testcase.output,
|
||||
a,
|
||||
f"Invalid type checker output ({testcase.file}, line {testcase.line})",
|
||||
)
|
||||
1474
venv/lib/python3.12/site-packages/mypy/test/testtypes.py
Normal file
1474
venv/lib/python3.12/site-packages/mypy/test/testtypes.py
Normal file
File diff suppressed because it is too large
Load Diff
17
venv/lib/python3.12/site-packages/mypy/test/testutil.py
Normal file
17
venv/lib/python3.12/site-packages/mypy/test/testutil.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from unittest import TestCase, mock
|
||||
|
||||
from mypy.util import get_terminal_width
|
||||
|
||||
|
||||
class TestGetTerminalSize(TestCase):
|
||||
def test_get_terminal_size_in_pty_defaults_to_80(self) -> None:
|
||||
# when run using a pty, `os.get_terminal_size()` returns `0, 0`
|
||||
ret = os.terminal_size((0, 0))
|
||||
mock_environ = os.environ.copy()
|
||||
mock_environ.pop("COLUMNS", None)
|
||||
with mock.patch.object(os, "get_terminal_size", return_value=ret):
|
||||
with mock.patch.dict(os.environ, values=mock_environ, clear=True):
|
||||
assert get_terminal_width() == 80
|
||||
414
venv/lib/python3.12/site-packages/mypy/test/typefixture.py
Normal file
414
venv/lib/python3.12/site-packages/mypy/test/typefixture.py
Normal file
@@ -0,0 +1,414 @@
|
||||
"""Fixture used in type-related test cases.
|
||||
|
||||
It contains class TypeInfos and Type objects.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy.nodes import (
|
||||
ARG_OPT,
|
||||
ARG_POS,
|
||||
ARG_STAR,
|
||||
COVARIANT,
|
||||
MDEF,
|
||||
Block,
|
||||
ClassDef,
|
||||
FuncDef,
|
||||
SymbolTable,
|
||||
SymbolTableNode,
|
||||
TypeAlias,
|
||||
TypeInfo,
|
||||
)
|
||||
from mypy.semanal_shared import set_callable_name
|
||||
from mypy.types import (
|
||||
AnyType,
|
||||
CallableType,
|
||||
Instance,
|
||||
LiteralType,
|
||||
NoneType,
|
||||
Type,
|
||||
TypeAliasType,
|
||||
TypeOfAny,
|
||||
TypeType,
|
||||
TypeVarLikeType,
|
||||
TypeVarTupleType,
|
||||
TypeVarType,
|
||||
UninhabitedType,
|
||||
UnionType,
|
||||
)
|
||||
|
||||
|
||||
class TypeFixture:
|
||||
"""Helper class that is used as a fixture in type-related unit tests.
|
||||
|
||||
The members are initialized to contain various type-related values.
|
||||
"""
|
||||
|
||||
def __init__(self, variance: int = COVARIANT) -> None:
|
||||
# The 'object' class
|
||||
self.oi = self.make_type_info("builtins.object") # class object
|
||||
self.o = Instance(self.oi, []) # object
|
||||
|
||||
# Type variables (these are effectively global)
|
||||
|
||||
def make_type_var(
|
||||
name: str, id: int, values: list[Type], upper_bound: Type, variance: int
|
||||
) -> TypeVarType:
|
||||
return TypeVarType(
|
||||
name,
|
||||
name,
|
||||
id,
|
||||
values,
|
||||
upper_bound,
|
||||
AnyType(TypeOfAny.from_omitted_generics),
|
||||
variance,
|
||||
)
|
||||
|
||||
self.t = make_type_var("T", 1, [], self.o, variance) # T`1 (type variable)
|
||||
self.tf = make_type_var("T", -1, [], self.o, variance) # T`-1 (type variable)
|
||||
self.tf2 = make_type_var("T", -2, [], self.o, variance) # T`-2 (type variable)
|
||||
self.s = make_type_var("S", 2, [], self.o, variance) # S`2 (type variable)
|
||||
self.s1 = make_type_var("S", 1, [], self.o, variance) # S`1 (type variable)
|
||||
self.sf = make_type_var("S", -2, [], self.o, variance) # S`-2 (type variable)
|
||||
self.sf1 = make_type_var("S", -1, [], self.o, variance) # S`-1 (type variable)
|
||||
self.u = make_type_var("U", 3, [], self.o, variance) # U`3 (type variable)
|
||||
|
||||
# Simple types
|
||||
self.anyt = AnyType(TypeOfAny.special_form)
|
||||
self.nonet = NoneType()
|
||||
self.uninhabited = UninhabitedType()
|
||||
|
||||
# Abstract class TypeInfos
|
||||
|
||||
# class F
|
||||
self.fi = self.make_type_info("F", is_abstract=True)
|
||||
|
||||
# class F2
|
||||
self.f2i = self.make_type_info("F2", is_abstract=True)
|
||||
|
||||
# class F3(F)
|
||||
self.f3i = self.make_type_info("F3", is_abstract=True, mro=[self.fi])
|
||||
|
||||
# Class TypeInfos
|
||||
self.std_tuplei = self.make_type_info(
|
||||
"builtins.tuple", mro=[self.oi], typevars=["T"], variances=[COVARIANT]
|
||||
) # class tuple
|
||||
self.type_typei = self.make_type_info("builtins.type") # class type
|
||||
self.bool_type_info = self.make_type_info("builtins.bool")
|
||||
self.str_type_info = self.make_type_info("builtins.str")
|
||||
self.functioni = self.make_type_info("builtins.function") # function TODO
|
||||
self.ai = self.make_type_info("A", mro=[self.oi]) # class A
|
||||
self.bi = self.make_type_info("B", mro=[self.ai, self.oi]) # class B(A)
|
||||
self.ci = self.make_type_info("C", mro=[self.ai, self.oi]) # class C(A)
|
||||
self.di = self.make_type_info("D", mro=[self.oi]) # class D
|
||||
# class E(F)
|
||||
self.ei = self.make_type_info("E", mro=[self.fi, self.oi])
|
||||
# class E2(F2, F)
|
||||
self.e2i = self.make_type_info("E2", mro=[self.f2i, self.fi, self.oi])
|
||||
# class E3(F, F2)
|
||||
self.e3i = self.make_type_info("E3", mro=[self.fi, self.f2i, self.oi])
|
||||
|
||||
# Generic class TypeInfos
|
||||
# G[T]
|
||||
self.gi = self.make_type_info("G", mro=[self.oi], typevars=["T"], variances=[variance])
|
||||
# G2[T]
|
||||
self.g2i = self.make_type_info("G2", mro=[self.oi], typevars=["T"], variances=[variance])
|
||||
# H[S, T]
|
||||
self.hi = self.make_type_info(
|
||||
"H", mro=[self.oi], typevars=["S", "T"], variances=[variance, variance]
|
||||
)
|
||||
# GS[T, S] <: G[S]
|
||||
self.gsi = self.make_type_info(
|
||||
"GS",
|
||||
mro=[self.gi, self.oi],
|
||||
typevars=["T", "S"],
|
||||
variances=[variance, variance],
|
||||
bases=[Instance(self.gi, [self.s])],
|
||||
)
|
||||
# GS2[S] <: G[S]
|
||||
self.gs2i = self.make_type_info(
|
||||
"GS2",
|
||||
mro=[self.gi, self.oi],
|
||||
typevars=["S"],
|
||||
variances=[variance],
|
||||
bases=[Instance(self.gi, [self.s1])],
|
||||
)
|
||||
|
||||
# list[T]
|
||||
self.std_listi = self.make_type_info(
|
||||
"builtins.list", mro=[self.oi], typevars=["T"], variances=[variance]
|
||||
)
|
||||
|
||||
# Instance types
|
||||
self.std_tuple = Instance(self.std_tuplei, [self.anyt]) # tuple
|
||||
self.type_type = Instance(self.type_typei, []) # type
|
||||
self.function = Instance(self.functioni, []) # function TODO
|
||||
self.str_type = Instance(self.str_type_info, [])
|
||||
self.bool_type = Instance(self.bool_type_info, [])
|
||||
self.a = Instance(self.ai, []) # A
|
||||
self.b = Instance(self.bi, []) # B
|
||||
self.c = Instance(self.ci, []) # C
|
||||
self.d = Instance(self.di, []) # D
|
||||
|
||||
self.e = Instance(self.ei, []) # E
|
||||
self.e2 = Instance(self.e2i, []) # E2
|
||||
self.e3 = Instance(self.e3i, []) # E3
|
||||
|
||||
self.f = Instance(self.fi, []) # F
|
||||
self.f2 = Instance(self.f2i, []) # F2
|
||||
self.f3 = Instance(self.f3i, []) # F3
|
||||
|
||||
# Generic instance types
|
||||
self.ga = Instance(self.gi, [self.a]) # G[A]
|
||||
self.gb = Instance(self.gi, [self.b]) # G[B]
|
||||
self.gd = Instance(self.gi, [self.d]) # G[D]
|
||||
self.go = Instance(self.gi, [self.o]) # G[object]
|
||||
self.gt = Instance(self.gi, [self.t]) # G[T`1]
|
||||
self.gtf = Instance(self.gi, [self.tf]) # G[T`-1]
|
||||
self.gtf2 = Instance(self.gi, [self.tf2]) # G[T`-2]
|
||||
self.gs = Instance(self.gi, [self.s]) # G[S]
|
||||
self.gdyn = Instance(self.gi, [self.anyt]) # G[Any]
|
||||
self.gn = Instance(self.gi, [NoneType()]) # G[None]
|
||||
|
||||
self.g2a = Instance(self.g2i, [self.a]) # G2[A]
|
||||
|
||||
self.gsaa = Instance(self.gsi, [self.a, self.a]) # GS[A, A]
|
||||
self.gsab = Instance(self.gsi, [self.a, self.b]) # GS[A, B]
|
||||
self.gsba = Instance(self.gsi, [self.b, self.a]) # GS[B, A]
|
||||
|
||||
self.gs2a = Instance(self.gs2i, [self.a]) # GS2[A]
|
||||
self.gs2b = Instance(self.gs2i, [self.b]) # GS2[B]
|
||||
self.gs2d = Instance(self.gs2i, [self.d]) # GS2[D]
|
||||
|
||||
self.hab = Instance(self.hi, [self.a, self.b]) # H[A, B]
|
||||
self.haa = Instance(self.hi, [self.a, self.a]) # H[A, A]
|
||||
self.hbb = Instance(self.hi, [self.b, self.b]) # H[B, B]
|
||||
self.hts = Instance(self.hi, [self.t, self.s]) # H[T, S]
|
||||
self.had = Instance(self.hi, [self.a, self.d]) # H[A, D]
|
||||
self.hao = Instance(self.hi, [self.a, self.o]) # H[A, object]
|
||||
|
||||
self.lsta = Instance(self.std_listi, [self.a]) # List[A]
|
||||
self.lstb = Instance(self.std_listi, [self.b]) # List[B]
|
||||
|
||||
self.lit1 = LiteralType(1, self.a)
|
||||
self.lit2 = LiteralType(2, self.a)
|
||||
self.lit3 = LiteralType("foo", self.d)
|
||||
self.lit4 = LiteralType(4, self.a)
|
||||
self.lit1_inst = Instance(self.ai, [], last_known_value=self.lit1)
|
||||
self.lit2_inst = Instance(self.ai, [], last_known_value=self.lit2)
|
||||
self.lit3_inst = Instance(self.di, [], last_known_value=self.lit3)
|
||||
self.lit4_inst = Instance(self.ai, [], last_known_value=self.lit4)
|
||||
|
||||
self.lit_str1 = LiteralType("x", self.str_type)
|
||||
self.lit_str2 = LiteralType("y", self.str_type)
|
||||
self.lit_str3 = LiteralType("z", self.str_type)
|
||||
self.lit_str1_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str1)
|
||||
self.lit_str2_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str2)
|
||||
self.lit_str3_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str3)
|
||||
|
||||
self.lit_false = LiteralType(False, self.bool_type)
|
||||
self.lit_true = LiteralType(True, self.bool_type)
|
||||
|
||||
self.type_a = TypeType.make_normalized(self.a)
|
||||
self.type_b = TypeType.make_normalized(self.b)
|
||||
self.type_c = TypeType.make_normalized(self.c)
|
||||
self.type_d = TypeType.make_normalized(self.d)
|
||||
self.type_t = TypeType.make_normalized(self.t)
|
||||
self.type_any = TypeType.make_normalized(self.anyt)
|
||||
|
||||
self._add_bool_dunder(self.bool_type_info)
|
||||
self._add_bool_dunder(self.ai)
|
||||
|
||||
# TypeVars with non-trivial bounds
|
||||
self.ub = make_type_var("UB", 5, [], self.b, variance) # UB`5 (type variable)
|
||||
self.uc = make_type_var("UC", 6, [], self.c, variance) # UC`6 (type variable)
|
||||
|
||||
def make_type_var_tuple(name: str, id: int, upper_bound: Type) -> TypeVarTupleType:
|
||||
return TypeVarTupleType(
|
||||
name,
|
||||
name,
|
||||
id,
|
||||
upper_bound,
|
||||
self.std_tuple,
|
||||
AnyType(TypeOfAny.from_omitted_generics),
|
||||
)
|
||||
|
||||
self.ts = make_type_var_tuple("Ts", 1, self.o) # Ts`1 (type var tuple)
|
||||
self.ss = make_type_var_tuple("Ss", 2, self.o) # Ss`2 (type var tuple)
|
||||
self.us = make_type_var_tuple("Us", 3, self.o) # Us`3 (type var tuple)
|
||||
|
||||
self.gvi = self.make_type_info("GV", mro=[self.oi], typevars=["Ts"], typevar_tuple_index=0)
|
||||
self.gv2i = self.make_type_info(
|
||||
"GV2", mro=[self.oi], typevars=["T", "Ts", "S"], typevar_tuple_index=1
|
||||
)
|
||||
|
||||
def _add_bool_dunder(self, type_info: TypeInfo) -> None:
|
||||
signature = CallableType([], [], [], Instance(self.bool_type_info, []), self.function)
|
||||
bool_func = FuncDef("__bool__", [], Block([]))
|
||||
bool_func.type = set_callable_name(signature, bool_func)
|
||||
type_info.names[bool_func.name] = SymbolTableNode(MDEF, bool_func)
|
||||
|
||||
# Helper methods
|
||||
|
||||
def callable(self, *a: Type) -> CallableType:
|
||||
"""callable(a1, ..., an, r) constructs a callable with argument types
|
||||
a1, ... an and return type r.
|
||||
"""
|
||||
return CallableType(
|
||||
list(a[:-1]), [ARG_POS] * (len(a) - 1), [None] * (len(a) - 1), a[-1], self.function
|
||||
)
|
||||
|
||||
def callable_type(self, *a: Type) -> CallableType:
|
||||
"""callable_type(a1, ..., an, r) constructs a callable with
|
||||
argument types a1, ... an and return type r, and which
|
||||
represents a type.
|
||||
"""
|
||||
return CallableType(
|
||||
list(a[:-1]), [ARG_POS] * (len(a) - 1), [None] * (len(a) - 1), a[-1], self.type_type
|
||||
)
|
||||
|
||||
def callable_default(self, min_args: int, *a: Type) -> CallableType:
|
||||
"""callable_default(min_args, a1, ..., an, r) constructs a
|
||||
callable with argument types a1, ... an and return type r,
|
||||
with min_args mandatory fixed arguments.
|
||||
"""
|
||||
n = len(a) - 1
|
||||
return CallableType(
|
||||
list(a[:-1]),
|
||||
[ARG_POS] * min_args + [ARG_OPT] * (n - min_args),
|
||||
[None] * n,
|
||||
a[-1],
|
||||
self.function,
|
||||
)
|
||||
|
||||
def callable_var_arg(self, min_args: int, *a: Type) -> CallableType:
|
||||
"""callable_var_arg(min_args, a1, ..., an, r) constructs a callable
|
||||
with argument types a1, ... *an and return type r.
|
||||
"""
|
||||
n = len(a) - 1
|
||||
return CallableType(
|
||||
list(a[:-1]),
|
||||
[ARG_POS] * min_args + [ARG_OPT] * (n - 1 - min_args) + [ARG_STAR],
|
||||
[None] * n,
|
||||
a[-1],
|
||||
self.function,
|
||||
)
|
||||
|
||||
def make_type_info(
|
||||
self,
|
||||
name: str,
|
||||
module_name: str | None = None,
|
||||
is_abstract: bool = False,
|
||||
mro: list[TypeInfo] | None = None,
|
||||
bases: list[Instance] | None = None,
|
||||
typevars: list[str] | None = None,
|
||||
typevar_tuple_index: int | None = None,
|
||||
variances: list[int] | None = None,
|
||||
) -> TypeInfo:
|
||||
"""Make a TypeInfo suitable for use in unit tests."""
|
||||
|
||||
class_def = ClassDef(name, Block([]), None, [])
|
||||
class_def.fullname = name
|
||||
|
||||
if module_name is None:
|
||||
if "." in name:
|
||||
module_name = name.rsplit(".", 1)[0]
|
||||
else:
|
||||
module_name = "__main__"
|
||||
|
||||
if typevars:
|
||||
v: list[TypeVarLikeType] = []
|
||||
for id, n in enumerate(typevars, 1):
|
||||
if typevar_tuple_index is not None and id - 1 == typevar_tuple_index:
|
||||
v.append(
|
||||
TypeVarTupleType(
|
||||
n,
|
||||
n,
|
||||
id,
|
||||
self.o,
|
||||
self.std_tuple,
|
||||
AnyType(TypeOfAny.from_omitted_generics),
|
||||
)
|
||||
)
|
||||
else:
|
||||
if variances:
|
||||
variance = variances[id - 1]
|
||||
else:
|
||||
variance = COVARIANT
|
||||
v.append(
|
||||
TypeVarType(
|
||||
n,
|
||||
n,
|
||||
id,
|
||||
[],
|
||||
self.o,
|
||||
AnyType(TypeOfAny.from_omitted_generics),
|
||||
variance=variance,
|
||||
)
|
||||
)
|
||||
class_def.type_vars = v
|
||||
|
||||
info = TypeInfo(SymbolTable(), class_def, module_name)
|
||||
if mro is None:
|
||||
mro = []
|
||||
if name != "builtins.object":
|
||||
mro.append(self.oi)
|
||||
info.mro = [info] + mro
|
||||
if bases is None:
|
||||
if mro:
|
||||
# By default, assume that there is a single non-generic base.
|
||||
bases = [Instance(mro[0], [])]
|
||||
else:
|
||||
bases = []
|
||||
info.bases = bases
|
||||
|
||||
return info
|
||||
|
||||
def def_alias_1(self, base: Instance) -> tuple[TypeAliasType, Type]:
|
||||
A = TypeAliasType(None, [])
|
||||
target = Instance(
|
||||
self.std_tuplei, [UnionType([base, A])]
|
||||
) # A = Tuple[Union[base, A], ...]
|
||||
AN = TypeAlias(target, "__main__.A", -1, -1)
|
||||
A.alias = AN
|
||||
return A, target
|
||||
|
||||
def def_alias_2(self, base: Instance) -> tuple[TypeAliasType, Type]:
|
||||
A = TypeAliasType(None, [])
|
||||
target = UnionType(
|
||||
[base, Instance(self.std_tuplei, [A])]
|
||||
) # A = Union[base, Tuple[A, ...]]
|
||||
AN = TypeAlias(target, "__main__.A", -1, -1)
|
||||
A.alias = AN
|
||||
return A, target
|
||||
|
||||
def non_rec_alias(
|
||||
self,
|
||||
target: Type,
|
||||
alias_tvars: list[TypeVarLikeType] | None = None,
|
||||
args: list[Type] | None = None,
|
||||
) -> TypeAliasType:
|
||||
AN = TypeAlias(target, "__main__.A", -1, -1, alias_tvars=alias_tvars)
|
||||
if args is None:
|
||||
args = []
|
||||
return TypeAliasType(AN, args)
|
||||
|
||||
|
||||
class InterfaceTypeFixture(TypeFixture):
|
||||
"""Extension of TypeFixture that contains additional generic
|
||||
interface types."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
# GF[T]
|
||||
self.gfi = self.make_type_info("GF", typevars=["T"], is_abstract=True)
|
||||
|
||||
# M1 <: GF[A]
|
||||
self.m1i = self.make_type_info(
|
||||
"M1", is_abstract=True, mro=[self.gfi, self.oi], bases=[Instance(self.gfi, [self.a])]
|
||||
)
|
||||
|
||||
self.gfa = Instance(self.gfi, [self.a]) # GF[A]
|
||||
self.gfb = Instance(self.gfi, [self.b]) # GF[B]
|
||||
|
||||
self.m1 = Instance(self.m1i, []) # M1
|
||||
87
venv/lib/python3.12/site-packages/mypy/test/update_data.py
Normal file
87
venv/lib/python3.12/site-packages/mypy/test/update_data.py
Normal file
@@ -0,0 +1,87 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from typing import Iterator
|
||||
|
||||
from mypy.test.data import DataDrivenTestCase, DataFileCollector, DataFileFix, parse_test_data
|
||||
|
||||
|
||||
def update_testcase_output(
|
||||
testcase: DataDrivenTestCase, actual: list[str], *, incremental_step: int
|
||||
) -> None:
|
||||
if testcase.xfail:
|
||||
return
|
||||
collector = testcase.parent
|
||||
assert isinstance(collector, DataFileCollector)
|
||||
for fix in _iter_fixes(testcase, actual, incremental_step=incremental_step):
|
||||
collector.enqueue_fix(fix)
|
||||
|
||||
|
||||
def _iter_fixes(
|
||||
testcase: DataDrivenTestCase, actual: list[str], *, incremental_step: int
|
||||
) -> Iterator[DataFileFix]:
|
||||
reports_by_line: dict[tuple[str, int], list[tuple[str, str]]] = defaultdict(list)
|
||||
for error_line in actual:
|
||||
comment_match = re.match(
|
||||
r"^(?P<filename>[^:]+):(?P<lineno>\d+): (?P<severity>error|note|warning): (?P<msg>.+)$",
|
||||
error_line,
|
||||
)
|
||||
if comment_match:
|
||||
filename = comment_match.group("filename")
|
||||
lineno = int(comment_match.group("lineno"))
|
||||
severity = comment_match.group("severity")
|
||||
msg = comment_match.group("msg")
|
||||
reports_by_line[filename, lineno].append((severity, msg))
|
||||
|
||||
test_items = parse_test_data(testcase.data, testcase.name)
|
||||
|
||||
# If we have [out] and/or [outN], we update just those sections.
|
||||
if any(re.match(r"^out\d*$", test_item.id) for test_item in test_items):
|
||||
for test_item in test_items:
|
||||
if (incremental_step < 2 and test_item.id == "out") or (
|
||||
incremental_step >= 2 and test_item.id == f"out{incremental_step}"
|
||||
):
|
||||
yield DataFileFix(
|
||||
lineno=testcase.line + test_item.line - 1,
|
||||
end_lineno=testcase.line + test_item.end_line - 1,
|
||||
lines=actual + [""] * test_item.trimmed_newlines,
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
# Update assertion comments within the sections
|
||||
for test_item in test_items:
|
||||
if test_item.id == "case":
|
||||
source_lines = test_item.data
|
||||
file_path = "main"
|
||||
elif test_item.id == "file":
|
||||
source_lines = test_item.data
|
||||
file_path = f"tmp/{test_item.arg}"
|
||||
else:
|
||||
continue # other sections we don't touch
|
||||
|
||||
fix_lines = []
|
||||
for lineno, source_line in enumerate(source_lines, start=1):
|
||||
reports = reports_by_line.get((file_path, lineno))
|
||||
comment_match = re.search(r"(?P<indent>\s+)(?P<comment># [EWN]: .+)$", source_line)
|
||||
if comment_match:
|
||||
source_line = source_line[: comment_match.start("indent")] # strip old comment
|
||||
if reports:
|
||||
indent = comment_match.group("indent") if comment_match else " "
|
||||
# multiline comments are on the first line and then on subsequent lines emtpy lines
|
||||
# with a continuation backslash
|
||||
for j, (severity, msg) in enumerate(reports):
|
||||
out_l = source_line if j == 0 else " " * len(source_line)
|
||||
is_last = j == len(reports) - 1
|
||||
severity_char = severity[0].upper()
|
||||
continuation = "" if is_last else " \\"
|
||||
fix_lines.append(f"{out_l}{indent}# {severity_char}: {msg}{continuation}")
|
||||
else:
|
||||
fix_lines.append(source_line)
|
||||
|
||||
yield DataFileFix(
|
||||
lineno=testcase.line + test_item.line - 1,
|
||||
end_lineno=testcase.line + test_item.end_line - 1,
|
||||
lines=fix_lines + [""] * test_item.trimmed_newlines,
|
||||
)
|
||||
Binary file not shown.
63
venv/lib/python3.12/site-packages/mypy/test/visitors.py
Normal file
63
venv/lib/python3.12/site-packages/mypy/test/visitors.py
Normal file
@@ -0,0 +1,63 @@
|
||||
"""Visitor classes pulled out from different tests
|
||||
|
||||
These are here because we don't currently support having interpreted
|
||||
classes subtype compiled ones but pytest grabs the python file
|
||||
even if the test was compiled.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from mypy.nodes import AssignmentStmt, CallExpr, Expression, IntExpr, NameExpr, Node, TypeVarExpr
|
||||
from mypy.traverser import TraverserVisitor
|
||||
from mypy.treetransform import TransformVisitor
|
||||
from mypy.types import Type
|
||||
|
||||
|
||||
# from testtypegen
|
||||
class SkippedNodeSearcher(TraverserVisitor):
|
||||
def __init__(self) -> None:
|
||||
self.nodes: set[Node] = set()
|
||||
self.ignore_file = False
|
||||
|
||||
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
|
||||
if s.type or ignore_node(s.rvalue):
|
||||
for lvalue in s.lvalues:
|
||||
if isinstance(lvalue, NameExpr):
|
||||
self.nodes.add(lvalue)
|
||||
super().visit_assignment_stmt(s)
|
||||
|
||||
def visit_name_expr(self, n: NameExpr) -> None:
|
||||
if self.ignore_file:
|
||||
self.nodes.add(n)
|
||||
super().visit_name_expr(n)
|
||||
|
||||
def visit_int_expr(self, n: IntExpr) -> None:
|
||||
if self.ignore_file:
|
||||
self.nodes.add(n)
|
||||
super().visit_int_expr(n)
|
||||
|
||||
|
||||
def ignore_node(node: Expression) -> bool:
|
||||
"""Return True if node is to be omitted from test case output."""
|
||||
|
||||
# We want to get rid of object() expressions in the typing module stub
|
||||
# and also TypeVar(...) expressions. Since detecting whether a node comes
|
||||
# from the typing module is not easy, we just to strip them all away.
|
||||
if isinstance(node, TypeVarExpr):
|
||||
return True
|
||||
if isinstance(node, NameExpr) and node.fullname == "builtins.object":
|
||||
return True
|
||||
if isinstance(node, NameExpr) and node.fullname == "builtins.None":
|
||||
return True
|
||||
if isinstance(node, CallExpr) and (ignore_node(node.callee) or node.analyzed):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
# from testtransform
|
||||
class TypeAssertTransformVisitor(TransformVisitor):
|
||||
def type(self, type: Type) -> Type:
|
||||
assert type is not None
|
||||
return type
|
||||
Reference in New Issue
Block a user