Major fixes and new features
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
This commit is contained in:
429
venv/lib/python3.12/site-packages/mypyc/test/test_run.py
Normal file
429
venv/lib/python3.12/site-packages/mypyc/test/test_run.py
Normal file
@@ -0,0 +1,429 @@
|
||||
"""Test cases for building an C extension and running it."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import contextlib
|
||||
import glob
|
||||
import os.path
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from typing import Any, Iterator
|
||||
|
||||
from mypy import build
|
||||
from mypy.errors import CompileError
|
||||
from mypy.options import TYPE_VAR_TUPLE, UNPACK, Options
|
||||
from mypy.test.config import test_temp_dir
|
||||
from mypy.test.data import DataDrivenTestCase
|
||||
from mypy.test.helpers import assert_module_equivalence, perform_file_operations
|
||||
from mypyc.build import construct_groups
|
||||
from mypyc.codegen import emitmodule
|
||||
from mypyc.errors import Errors
|
||||
from mypyc.options import CompilerOptions
|
||||
from mypyc.test.test_serialization import check_serialization_roundtrip
|
||||
from mypyc.test.testutil import (
|
||||
ICODE_GEN_BUILTINS,
|
||||
TESTUTIL_PATH,
|
||||
MypycDataSuite,
|
||||
assert_test_output,
|
||||
fudge_dir_mtimes,
|
||||
show_c,
|
||||
use_custom_builtins,
|
||||
)
|
||||
|
||||
files = [
|
||||
"run-async.test",
|
||||
"run-misc.test",
|
||||
"run-functions.test",
|
||||
"run-integers.test",
|
||||
"run-i64.test",
|
||||
"run-i32.test",
|
||||
"run-i16.test",
|
||||
"run-u8.test",
|
||||
"run-floats.test",
|
||||
"run-math.test",
|
||||
"run-bools.test",
|
||||
"run-strings.test",
|
||||
"run-bytes.test",
|
||||
"run-tuples.test",
|
||||
"run-lists.test",
|
||||
"run-dicts.test",
|
||||
"run-sets.test",
|
||||
"run-primitives.test",
|
||||
"run-loops.test",
|
||||
"run-exceptions.test",
|
||||
"run-imports.test",
|
||||
"run-classes.test",
|
||||
"run-traits.test",
|
||||
"run-generators.test",
|
||||
"run-multimodule.test",
|
||||
"run-bench.test",
|
||||
"run-mypy-sim.test",
|
||||
"run-dunders.test",
|
||||
"run-singledispatch.test",
|
||||
"run-attrs.test",
|
||||
"run-python37.test",
|
||||
"run-python38.test",
|
||||
]
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
files.append("run-match.test")
|
||||
|
||||
setup_format = """\
|
||||
from setuptools import setup
|
||||
from mypyc.build import mypycify
|
||||
|
||||
setup(name='test_run_output',
|
||||
ext_modules=mypycify({}, separate={}, skip_cgen_input={!r}, strip_asserts=False,
|
||||
multi_file={}, opt_level='{}'),
|
||||
)
|
||||
"""
|
||||
|
||||
WORKDIR = "build"
|
||||
|
||||
|
||||
def run_setup(script_name: str, script_args: list[str]) -> bool:
|
||||
"""Run a setup script in a somewhat controlled environment.
|
||||
|
||||
This is adapted from code in distutils and our goal here is that is
|
||||
faster to not need to spin up a python interpreter to run it.
|
||||
|
||||
We had to fork it because the real run_setup swallows errors
|
||||
and KeyboardInterrupt with no way to recover them (!).
|
||||
The real version has some extra features that we removed since
|
||||
we weren't using them.
|
||||
|
||||
Returns whether the setup succeeded.
|
||||
"""
|
||||
save_argv = sys.argv.copy()
|
||||
g = {"__file__": script_name}
|
||||
try:
|
||||
try:
|
||||
sys.argv[0] = script_name
|
||||
sys.argv[1:] = script_args
|
||||
with open(script_name, "rb") as f:
|
||||
exec(f.read(), g)
|
||||
finally:
|
||||
sys.argv = save_argv
|
||||
except SystemExit as e:
|
||||
# distutils converts KeyboardInterrupt into a SystemExit with
|
||||
# "interrupted" as the argument. Convert it back so that
|
||||
# pytest will exit instead of just failing the test.
|
||||
if e.code == "interrupted":
|
||||
raise KeyboardInterrupt from e
|
||||
|
||||
return e.code == 0 or e.code is None
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def chdir_manager(target: str) -> Iterator[None]:
|
||||
dir = os.getcwd()
|
||||
os.chdir(target)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.chdir(dir)
|
||||
|
||||
|
||||
class TestRun(MypycDataSuite):
|
||||
"""Test cases that build a C extension and run code."""
|
||||
|
||||
files = files
|
||||
base_path = test_temp_dir
|
||||
optional_out = True
|
||||
multi_file = False
|
||||
separate = False # If True, using separate (incremental) compilation
|
||||
|
||||
def run_case(self, testcase: DataDrivenTestCase) -> None:
|
||||
# setup.py wants to be run from the root directory of the package, which we accommodate
|
||||
# by chdiring into tmp/
|
||||
with use_custom_builtins(
|
||||
os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase
|
||||
), chdir_manager("tmp"):
|
||||
self.run_case_inner(testcase)
|
||||
|
||||
def run_case_inner(self, testcase: DataDrivenTestCase) -> None:
|
||||
if not os.path.isdir(WORKDIR): # (one test puts something in build...)
|
||||
os.mkdir(WORKDIR)
|
||||
|
||||
text = "\n".join(testcase.input)
|
||||
|
||||
with open("native.py", "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
with open("interpreted.py", "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
|
||||
shutil.copyfile(TESTUTIL_PATH, "testutil.py")
|
||||
|
||||
step = 1
|
||||
self.run_case_step(testcase, step)
|
||||
|
||||
steps = testcase.find_steps()
|
||||
if steps == [[]]:
|
||||
steps = []
|
||||
|
||||
for operations in steps:
|
||||
# To make sure that any new changes get picked up as being
|
||||
# new by distutils, shift the mtime of all of the
|
||||
# generated artifacts back by a second.
|
||||
fudge_dir_mtimes(WORKDIR, -1)
|
||||
# On Ubuntu, changing the mtime doesn't work reliably. As
|
||||
# a workaround, sleep.
|
||||
#
|
||||
# TODO: Figure out a better approach, since this slows down tests.
|
||||
if sys.platform == "linux":
|
||||
time.sleep(1.0)
|
||||
|
||||
step += 1
|
||||
with chdir_manager(".."):
|
||||
perform_file_operations(operations)
|
||||
self.run_case_step(testcase, step)
|
||||
|
||||
def run_case_step(self, testcase: DataDrivenTestCase, incremental_step: int) -> None:
|
||||
bench = testcase.config.getoption("--bench", False) and "Benchmark" in testcase.name
|
||||
|
||||
options = Options()
|
||||
options.use_builtins_fixtures = True
|
||||
options.show_traceback = True
|
||||
options.strict_optional = True
|
||||
options.python_version = sys.version_info[:2]
|
||||
options.export_types = True
|
||||
options.preserve_asts = True
|
||||
options.allow_empty_bodies = True
|
||||
options.incremental = self.separate
|
||||
options.enable_incomplete_feature = [TYPE_VAR_TUPLE, UNPACK]
|
||||
|
||||
# Avoid checking modules/packages named 'unchecked', to provide a way
|
||||
# to test interacting with code we don't have types for.
|
||||
options.per_module_options["unchecked.*"] = {"follow_imports": "error"}
|
||||
|
||||
source = build.BuildSource("native.py", "native", None)
|
||||
sources = [source]
|
||||
module_names = ["native"]
|
||||
module_paths = ["native.py"]
|
||||
|
||||
# Hard code another module name to compile in the same compilation unit.
|
||||
to_delete = []
|
||||
for fn, text in testcase.files:
|
||||
fn = os.path.relpath(fn, test_temp_dir)
|
||||
|
||||
if os.path.basename(fn).startswith("other") and fn.endswith(".py"):
|
||||
name = fn.split(".")[0].replace(os.sep, ".")
|
||||
module_names.append(name)
|
||||
sources.append(build.BuildSource(fn, name, None))
|
||||
to_delete.append(fn)
|
||||
module_paths.append(fn)
|
||||
|
||||
shutil.copyfile(fn, os.path.join(os.path.dirname(fn), name + "_interpreted.py"))
|
||||
|
||||
for source in sources:
|
||||
options.per_module_options.setdefault(source.module, {})["mypyc"] = True
|
||||
|
||||
separate = (
|
||||
self.get_separate("\n".join(testcase.input), incremental_step)
|
||||
if self.separate
|
||||
else False
|
||||
)
|
||||
|
||||
groups = construct_groups(sources, separate, len(module_names) > 1)
|
||||
|
||||
try:
|
||||
compiler_options = CompilerOptions(multi_file=self.multi_file, separate=self.separate)
|
||||
result = emitmodule.parse_and_typecheck(
|
||||
sources=sources,
|
||||
options=options,
|
||||
compiler_options=compiler_options,
|
||||
groups=groups,
|
||||
alt_lib_path=".",
|
||||
)
|
||||
errors = Errors(options)
|
||||
ir, cfiles = emitmodule.compile_modules_to_c(
|
||||
result, compiler_options=compiler_options, errors=errors, groups=groups
|
||||
)
|
||||
if errors.num_errors:
|
||||
errors.flush_errors()
|
||||
assert False, "Compile error"
|
||||
except CompileError as e:
|
||||
for line in e.messages:
|
||||
print(fix_native_line_number(line, testcase.file, testcase.line))
|
||||
assert False, "Compile error"
|
||||
|
||||
# Check that serialization works on this IR. (Only on the first
|
||||
# step because the returned ir only includes updated code.)
|
||||
if incremental_step == 1:
|
||||
check_serialization_roundtrip(ir)
|
||||
|
||||
opt_level = int(os.environ.get("MYPYC_OPT_LEVEL", 0))
|
||||
debug_level = int(os.environ.get("MYPYC_DEBUG_LEVEL", 0))
|
||||
|
||||
setup_file = os.path.abspath(os.path.join(WORKDIR, "setup.py"))
|
||||
# We pass the C file information to the build script via setup.py unfortunately
|
||||
with open(setup_file, "w", encoding="utf-8") as f:
|
||||
f.write(
|
||||
setup_format.format(
|
||||
module_paths, separate, cfiles, self.multi_file, opt_level, debug_level
|
||||
)
|
||||
)
|
||||
|
||||
if not run_setup(setup_file, ["build_ext", "--inplace"]):
|
||||
if testcase.config.getoption("--mypyc-showc"):
|
||||
show_c(cfiles)
|
||||
assert False, "Compilation failed"
|
||||
|
||||
# Assert that an output file got created
|
||||
suffix = "pyd" if sys.platform == "win32" else "so"
|
||||
assert glob.glob(f"native.*.{suffix}") or glob.glob(f"native.{suffix}")
|
||||
|
||||
driver_path = "driver.py"
|
||||
if not os.path.isfile(driver_path):
|
||||
# No driver.py provided by test case. Use the default one
|
||||
# (mypyc/test-data/driver/driver.py) that calls each
|
||||
# function named test_*.
|
||||
default_driver = os.path.join(
|
||||
os.path.dirname(__file__), "..", "test-data", "driver", "driver.py"
|
||||
)
|
||||
shutil.copy(default_driver, driver_path)
|
||||
env = os.environ.copy()
|
||||
env["MYPYC_RUN_BENCH"] = "1" if bench else "0"
|
||||
|
||||
debugger = testcase.config.getoption("debugger")
|
||||
if debugger:
|
||||
if debugger == "lldb":
|
||||
subprocess.check_call(["lldb", "--", sys.executable, driver_path], env=env)
|
||||
elif debugger == "gdb":
|
||||
subprocess.check_call(["gdb", "--args", sys.executable, driver_path], env=env)
|
||||
else:
|
||||
assert False, "Unsupported debugger"
|
||||
# TODO: find a way to automatically disable capturing
|
||||
# stdin/stdout when in debugging mode
|
||||
assert False, (
|
||||
"Test can't pass in debugging mode. "
|
||||
"(Make sure to pass -s to pytest to interact with the debugger)"
|
||||
)
|
||||
proc = subprocess.Popen(
|
||||
[sys.executable, driver_path],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
env=env,
|
||||
)
|
||||
if sys.version_info >= (3, 12):
|
||||
# TODO: testDecorators1 hangs on 3.12, remove this once fixed
|
||||
proc.wait(timeout=30)
|
||||
output = proc.communicate()[0].decode("utf8")
|
||||
outlines = output.splitlines()
|
||||
|
||||
if testcase.config.getoption("--mypyc-showc"):
|
||||
show_c(cfiles)
|
||||
if proc.returncode != 0:
|
||||
print()
|
||||
print("*** Exit status: %d" % proc.returncode)
|
||||
|
||||
# Verify output.
|
||||
if bench:
|
||||
print("Test output:")
|
||||
print(output)
|
||||
else:
|
||||
if incremental_step == 1:
|
||||
msg = "Invalid output"
|
||||
expected = testcase.output
|
||||
else:
|
||||
msg = f"Invalid output (step {incremental_step})"
|
||||
expected = testcase.output2.get(incremental_step, [])
|
||||
|
||||
if not expected:
|
||||
# Tweak some line numbers, but only if the expected output is empty,
|
||||
# as tweaked output might not match expected output.
|
||||
outlines = [
|
||||
fix_native_line_number(line, testcase.file, testcase.line) for line in outlines
|
||||
]
|
||||
assert_test_output(testcase, outlines, msg, expected)
|
||||
|
||||
if incremental_step > 1 and options.incremental:
|
||||
suffix = "" if incremental_step == 2 else str(incremental_step - 1)
|
||||
expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
|
||||
if expected_rechecked is not None:
|
||||
assert_module_equivalence(
|
||||
"rechecked" + suffix, expected_rechecked, result.manager.rechecked_modules
|
||||
)
|
||||
expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
|
||||
if expected_stale is not None:
|
||||
assert_module_equivalence(
|
||||
"stale" + suffix, expected_stale, result.manager.stale_modules
|
||||
)
|
||||
|
||||
assert proc.returncode == 0
|
||||
|
||||
def get_separate(self, program_text: str, incremental_step: int) -> Any:
|
||||
template = r"# separate{}: (\[.*\])$"
|
||||
m = re.search(template.format(incremental_step), program_text, flags=re.MULTILINE)
|
||||
if not m:
|
||||
m = re.search(template.format(""), program_text, flags=re.MULTILINE)
|
||||
if m:
|
||||
return ast.literal_eval(m.group(1))
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
class TestRunMultiFile(TestRun):
|
||||
"""Run the main multi-module tests in multi-file compilation mode.
|
||||
|
||||
In multi-file mode each module gets compiled into a separate C file,
|
||||
but all modules (C files) are compiled together.
|
||||
"""
|
||||
|
||||
multi_file = True
|
||||
test_name_suffix = "_multi"
|
||||
files = ["run-multimodule.test", "run-mypy-sim.test"]
|
||||
|
||||
|
||||
class TestRunSeparate(TestRun):
|
||||
"""Run the main multi-module tests in separate compilation mode.
|
||||
|
||||
In this mode there are multiple compilation groups, which are compiled
|
||||
incrementally. Each group is compiled to a separate C file, and these C
|
||||
files are compiled separately.
|
||||
|
||||
Each compiled module is placed into a separate compilation group, unless
|
||||
overridden by a special comment. Consider this example:
|
||||
|
||||
# separate: [(["other.py", "other_b.py"], "stuff")]
|
||||
|
||||
This puts other.py and other_b.py into a compilation group named "stuff".
|
||||
Any files not mentioned in the comment will get single-file groups.
|
||||
"""
|
||||
|
||||
separate = True
|
||||
test_name_suffix = "_separate"
|
||||
files = ["run-multimodule.test", "run-mypy-sim.test"]
|
||||
|
||||
|
||||
def fix_native_line_number(message: str, fnam: str, delta: int) -> str:
|
||||
"""Update code locations in test case output to point to the .test file.
|
||||
|
||||
The description of the test case is written to native.py, and line numbers
|
||||
in test case output often are relative to native.py. This translates the
|
||||
line numbers to be relative to the .test file that contains the test case
|
||||
description, and also updates the file name to the .test file name.
|
||||
|
||||
Args:
|
||||
message: message to update
|
||||
fnam: path of the .test file
|
||||
delta: line number of the beginning of the test case in the .test file
|
||||
|
||||
Returns updated message (or original message if we couldn't find anything).
|
||||
"""
|
||||
fnam = os.path.basename(fnam)
|
||||
message = re.sub(
|
||||
r"native\.py:([0-9]+):", lambda m: "%s:%d:" % (fnam, int(m.group(1)) + delta), message
|
||||
)
|
||||
message = re.sub(
|
||||
r'"native.py", line ([0-9]+),',
|
||||
lambda m: '"%s", line %d,' % (fnam, int(m.group(1)) + delta),
|
||||
message,
|
||||
)
|
||||
return message
|
||||
Reference in New Issue
Block a user