new models, frontend functions, public pages
This commit is contained in:
276
.venv/lib/python3.10/site-packages/jsonschema/tests/_suite.py
Normal file
276
.venv/lib/python3.10/site-packages/jsonschema/tests/_suite.py
Normal file
@@ -0,0 +1,276 @@
|
||||
"""
|
||||
Python representations of the JSON Schema Test Suite tests.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from attrs import field, frozen
|
||||
from referencing import Registry
|
||||
import referencing.jsonschema
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Mapping, Sequence
|
||||
|
||||
import pyperf
|
||||
|
||||
from jsonschema.validators import _VALIDATORS
|
||||
import jsonschema
|
||||
|
||||
_DELIMITERS = re.compile(r"[\W\- ]+")
|
||||
|
||||
|
||||
def _find_suite():
|
||||
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
|
||||
if root is not None:
|
||||
return Path(root)
|
||||
|
||||
root = Path(jsonschema.__file__).parent.parent / "json"
|
||||
if not root.is_dir(): # pragma: no cover
|
||||
raise ValueError(
|
||||
(
|
||||
"Can't find the JSON-Schema-Test-Suite directory. "
|
||||
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
|
||||
"variable or run the tests from alongside a checkout "
|
||||
"of the suite."
|
||||
),
|
||||
)
|
||||
return root
|
||||
|
||||
|
||||
@frozen
|
||||
class Suite:
|
||||
|
||||
_root: Path = field(factory=_find_suite)
|
||||
_remotes: referencing.jsonschema.SchemaRegistry = field(init=False)
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
jsonschema_suite = self._root.joinpath("bin", "jsonschema_suite")
|
||||
argv = [sys.executable, str(jsonschema_suite), "remotes"]
|
||||
remotes = subprocess.check_output(argv).decode("utf-8")
|
||||
|
||||
resources = json.loads(remotes)
|
||||
|
||||
li = "http://localhost:1234/locationIndependentIdentifierPre2019.json"
|
||||
li4 = "http://localhost:1234/locationIndependentIdentifierDraft4.json"
|
||||
|
||||
registry = Registry().with_resources(
|
||||
[
|
||||
(
|
||||
li,
|
||||
referencing.jsonschema.DRAFT7.create_resource(
|
||||
contents=resources.pop(li),
|
||||
),
|
||||
),
|
||||
(
|
||||
li4,
|
||||
referencing.jsonschema.DRAFT4.create_resource(
|
||||
contents=resources.pop(li4),
|
||||
),
|
||||
),
|
||||
],
|
||||
).with_contents(
|
||||
resources.items(),
|
||||
default_specification=referencing.jsonschema.DRAFT202012,
|
||||
)
|
||||
object.__setattr__(self, "_remotes", registry)
|
||||
|
||||
def benchmark(self, runner: pyperf.Runner): # pragma: no cover
|
||||
for name, Validator in _VALIDATORS.items():
|
||||
self.version(name=name).benchmark(
|
||||
runner=runner,
|
||||
Validator=Validator,
|
||||
)
|
||||
|
||||
def version(self, name) -> Version:
|
||||
return Version(
|
||||
name=name,
|
||||
path=self._root / "tests" / name,
|
||||
remotes=self._remotes,
|
||||
)
|
||||
|
||||
|
||||
@frozen
|
||||
class Version:
|
||||
|
||||
_path: Path
|
||||
_remotes: referencing.jsonschema.SchemaRegistry
|
||||
|
||||
name: str
|
||||
|
||||
def benchmark(self, **kwargs): # pragma: no cover
|
||||
for case in self.cases():
|
||||
case.benchmark(**kwargs)
|
||||
|
||||
def cases(self) -> Iterable[_Case]:
|
||||
return self._cases_in(paths=self._path.glob("*.json"))
|
||||
|
||||
def format_cases(self) -> Iterable[_Case]:
|
||||
return self._cases_in(paths=self._path.glob("optional/format/*.json"))
|
||||
|
||||
def optional_cases_of(self, name: str) -> Iterable[_Case]:
|
||||
return self._cases_in(paths=[self._path / "optional" / f"{name}.json"])
|
||||
|
||||
def to_unittest_testcase(self, *groups, **kwargs):
|
||||
name = kwargs.pop("name", "Test" + self.name.title().replace("-", ""))
|
||||
methods = {
|
||||
method.__name__: method
|
||||
for method in (
|
||||
test.to_unittest_method(**kwargs)
|
||||
for group in groups
|
||||
for case in group
|
||||
for test in case.tests
|
||||
)
|
||||
}
|
||||
cls = type(name, (unittest.TestCase,), methods)
|
||||
|
||||
# We're doing crazy things, so if they go wrong, like a function
|
||||
# behaving differently on some other interpreter, just make them
|
||||
# not happen.
|
||||
with suppress(Exception):
|
||||
cls.__module__ = _someone_save_us_the_module_of_the_caller()
|
||||
|
||||
return cls
|
||||
|
||||
def _cases_in(self, paths: Iterable[Path]) -> Iterable[_Case]:
|
||||
for path in paths:
|
||||
for case in json.loads(path.read_text(encoding="utf-8")):
|
||||
yield _Case.from_dict(
|
||||
case,
|
||||
version=self,
|
||||
subject=path.stem,
|
||||
remotes=self._remotes,
|
||||
)
|
||||
|
||||
|
||||
@frozen
|
||||
class _Case:
|
||||
|
||||
version: Version
|
||||
|
||||
subject: str
|
||||
description: str
|
||||
schema: Mapping[str, Any] | bool
|
||||
tests: list[_Test]
|
||||
comment: str | None = None
|
||||
specification: Sequence[dict[str, str]] = ()
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data, remotes, **kwargs):
|
||||
data.update(kwargs)
|
||||
tests = [
|
||||
_Test(
|
||||
version=data["version"],
|
||||
subject=data["subject"],
|
||||
case_description=data["description"],
|
||||
schema=data["schema"],
|
||||
remotes=remotes,
|
||||
**test,
|
||||
) for test in data.pop("tests")
|
||||
]
|
||||
return cls(tests=tests, **data)
|
||||
|
||||
def benchmark(self, runner: pyperf.Runner, **kwargs): # pragma: no cover
|
||||
for test in self.tests:
|
||||
runner.bench_func(
|
||||
test.fully_qualified_name,
|
||||
partial(test.validate_ignoring_errors, **kwargs),
|
||||
)
|
||||
|
||||
|
||||
@frozen(repr=False)
|
||||
class _Test:
|
||||
|
||||
version: Version
|
||||
|
||||
subject: str
|
||||
case_description: str
|
||||
description: str
|
||||
|
||||
data: Any
|
||||
schema: Mapping[str, Any] | bool
|
||||
|
||||
valid: bool
|
||||
|
||||
_remotes: referencing.jsonschema.SchemaRegistry
|
||||
|
||||
comment: str | None = None
|
||||
|
||||
def __repr__(self): # pragma: no cover
|
||||
return f"<Test {self.fully_qualified_name}>"
|
||||
|
||||
@property
|
||||
def fully_qualified_name(self): # pragma: no cover
|
||||
return " > ".join( # noqa: FLY002
|
||||
[
|
||||
self.version.name,
|
||||
self.subject,
|
||||
self.case_description,
|
||||
self.description,
|
||||
],
|
||||
)
|
||||
|
||||
def to_unittest_method(self, skip=lambda test: None, **kwargs):
|
||||
if self.valid:
|
||||
def fn(this):
|
||||
self.validate(**kwargs)
|
||||
else:
|
||||
def fn(this):
|
||||
with this.assertRaises(jsonschema.ValidationError):
|
||||
self.validate(**kwargs)
|
||||
|
||||
fn.__name__ = "_".join(
|
||||
[
|
||||
"test",
|
||||
_DELIMITERS.sub("_", self.subject),
|
||||
_DELIMITERS.sub("_", self.case_description),
|
||||
_DELIMITERS.sub("_", self.description),
|
||||
],
|
||||
)
|
||||
reason = skip(self)
|
||||
if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
|
||||
return fn
|
||||
elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0": # pragma: no cover # noqa: E501
|
||||
return unittest.expectedFailure(fn)
|
||||
else:
|
||||
return unittest.skip(reason)(fn)
|
||||
|
||||
def validate(self, Validator, **kwargs):
|
||||
Validator.check_schema(self.schema)
|
||||
validator = Validator(
|
||||
schema=self.schema,
|
||||
registry=self._remotes,
|
||||
**kwargs,
|
||||
)
|
||||
if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0": # pragma: no cover
|
||||
breakpoint() # noqa: T100
|
||||
validator.validate(instance=self.data)
|
||||
|
||||
def validate_ignoring_errors(self, Validator): # pragma: no cover
|
||||
with suppress(jsonschema.ValidationError):
|
||||
self.validate(Validator=Validator)
|
||||
|
||||
|
||||
def _someone_save_us_the_module_of_the_caller():
|
||||
"""
|
||||
The FQON of the module 2nd stack frames up from here.
|
||||
|
||||
This is intended to allow us to dynamically return test case classes that
|
||||
are indistinguishable from being defined in the module that wants them.
|
||||
|
||||
Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
|
||||
the class that really is running.
|
||||
|
||||
Save us all, this is all so so so so so terrible.
|
||||
"""
|
||||
|
||||
return sys._getframe(2).f_globals["__name__"]
|
||||
@@ -0,0 +1,50 @@
|
||||
"""
|
||||
Fuzzing setup for OSS-Fuzz.
|
||||
|
||||
See https://github.com/google/oss-fuzz/tree/master/projects/jsonschema for the
|
||||
other half of the setup here.
|
||||
"""
|
||||
import sys
|
||||
|
||||
from hypothesis import given, strategies
|
||||
|
||||
import jsonschema
|
||||
|
||||
PRIM = strategies.one_of(
|
||||
strategies.booleans(),
|
||||
strategies.integers(),
|
||||
strategies.floats(allow_nan=False, allow_infinity=False),
|
||||
strategies.text(),
|
||||
)
|
||||
DICT = strategies.recursive(
|
||||
base=strategies.one_of(
|
||||
strategies.booleans(),
|
||||
strategies.dictionaries(strategies.text(), PRIM),
|
||||
),
|
||||
extend=lambda inner: strategies.dictionaries(strategies.text(), inner),
|
||||
)
|
||||
|
||||
|
||||
@given(obj1=DICT, obj2=DICT)
|
||||
def test_schemas(obj1, obj2):
|
||||
try:
|
||||
jsonschema.validate(instance=obj1, schema=obj2)
|
||||
except jsonschema.exceptions.ValidationError:
|
||||
pass
|
||||
except jsonschema.exceptions.SchemaError:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
atheris.instrument_all()
|
||||
atheris.Setup(
|
||||
sys.argv,
|
||||
test_schemas.hypothesis.fuzz_one_input,
|
||||
enable_python_coverage=True,
|
||||
)
|
||||
atheris.Fuzz()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import atheris
|
||||
main()
|
||||
907
.venv/lib/python3.10/site-packages/jsonschema/tests/test_cli.py
Normal file
907
.venv/lib/python3.10/site-packages/jsonschema/tests/test_cli.py
Normal file
@@ -0,0 +1,907 @@
|
||||
from contextlib import redirect_stderr, redirect_stdout
|
||||
from importlib import metadata
|
||||
from io import StringIO
|
||||
from json import JSONDecodeError
|
||||
from pathlib import Path
|
||||
from textwrap import dedent
|
||||
from unittest import TestCase
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
from jsonschema import Draft4Validator, Draft202012Validator
|
||||
from jsonschema.exceptions import (
|
||||
SchemaError,
|
||||
ValidationError,
|
||||
_RefResolutionError,
|
||||
)
|
||||
from jsonschema.validators import _LATEST_VERSION, validate
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
from jsonschema import cli
|
||||
|
||||
|
||||
def fake_validator(*errors):
|
||||
errors = list(reversed(errors))
|
||||
|
||||
class FakeValidator:
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def iter_errors(self, instance):
|
||||
if errors:
|
||||
return errors.pop()
|
||||
return [] # pragma: no cover
|
||||
|
||||
@classmethod
|
||||
def check_schema(self, schema):
|
||||
pass
|
||||
|
||||
return FakeValidator
|
||||
|
||||
|
||||
def fake_open(all_contents):
|
||||
def open(path):
|
||||
contents = all_contents.get(path)
|
||||
if contents is None:
|
||||
raise FileNotFoundError(path)
|
||||
return StringIO(contents)
|
||||
return open
|
||||
|
||||
|
||||
def _message_for(non_json):
|
||||
try:
|
||||
json.loads(non_json)
|
||||
except JSONDecodeError as error:
|
||||
return str(error)
|
||||
else: # pragma: no cover
|
||||
raise RuntimeError("Tried and failed to capture a JSON dump error.")
|
||||
|
||||
|
||||
class TestCLI(TestCase):
|
||||
def run_cli(
|
||||
self, argv, files=None, stdin=StringIO(), exit_code=0, **override,
|
||||
):
|
||||
arguments = cli.parse_args(argv)
|
||||
arguments.update(override)
|
||||
|
||||
self.assertFalse(hasattr(cli, "open"))
|
||||
cli.open = fake_open(files or {})
|
||||
try:
|
||||
stdout, stderr = StringIO(), StringIO()
|
||||
actual_exit_code = cli.run(
|
||||
arguments,
|
||||
stdin=stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
finally:
|
||||
del cli.open
|
||||
|
||||
self.assertEqual(
|
||||
actual_exit_code, exit_code, msg=dedent(
|
||||
f"""
|
||||
Expected an exit code of {exit_code} != {actual_exit_code}.
|
||||
|
||||
stdout: {stdout.getvalue()}
|
||||
|
||||
stderr: {stderr.getvalue()}
|
||||
""",
|
||||
),
|
||||
)
|
||||
return stdout.getvalue(), stderr.getvalue()
|
||||
|
||||
def assertOutputs(self, stdout="", stderr="", **kwargs):
|
||||
self.assertEqual(
|
||||
self.run_cli(**kwargs),
|
||||
(dedent(stdout), dedent(stderr)),
|
||||
)
|
||||
|
||||
def test_invalid_instance(self):
|
||||
error = ValidationError("I am an error!", instance=12)
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(error.instance),
|
||||
),
|
||||
validator=fake_validator([error]),
|
||||
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="12: I am an error!\n",
|
||||
)
|
||||
|
||||
def test_invalid_instance_pretty_output(self):
|
||||
error = ValidationError("I am an error!", instance=12)
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(error.instance),
|
||||
),
|
||||
validator=fake_validator([error]),
|
||||
|
||||
argv=["-i", "some_instance", "--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[ValidationError]===(some_instance)===
|
||||
|
||||
I am an error!
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_instance_explicit_plain_output(self):
|
||||
error = ValidationError("I am an error!", instance=12)
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(error.instance),
|
||||
),
|
||||
validator=fake_validator([error]),
|
||||
|
||||
argv=["--output", "plain", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="12: I am an error!\n",
|
||||
)
|
||||
|
||||
def test_invalid_instance_multiple_errors(self):
|
||||
instance = 12
|
||||
first = ValidationError("First error", instance=instance)
|
||||
second = ValidationError("Second error", instance=instance)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(instance),
|
||||
),
|
||||
validator=fake_validator([first, second]),
|
||||
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: First error
|
||||
12: Second error
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_instance_multiple_errors_pretty_output(self):
|
||||
instance = 12
|
||||
first = ValidationError("First error", instance=instance)
|
||||
second = ValidationError("Second error", instance=instance)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(instance),
|
||||
),
|
||||
validator=fake_validator([first, second]),
|
||||
|
||||
argv=["-i", "some_instance", "--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[ValidationError]===(some_instance)===
|
||||
|
||||
First error
|
||||
-----------------------------
|
||||
===[ValidationError]===(some_instance)===
|
||||
|
||||
Second error
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_multiple_invalid_instances(self):
|
||||
first_instance = 12
|
||||
first_errors = [
|
||||
ValidationError("An error", instance=first_instance),
|
||||
ValidationError("Another error", instance=first_instance),
|
||||
]
|
||||
second_instance = "foo"
|
||||
second_errors = [ValidationError("BOOM", instance=second_instance)]
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_first_instance=json.dumps(first_instance),
|
||||
some_second_instance=json.dumps(second_instance),
|
||||
),
|
||||
validator=fake_validator(first_errors, second_errors),
|
||||
|
||||
argv=[
|
||||
"-i", "some_first_instance",
|
||||
"-i", "some_second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: An error
|
||||
12: Another error
|
||||
foo: BOOM
|
||||
""",
|
||||
)
|
||||
|
||||
def test_multiple_invalid_instances_pretty_output(self):
|
||||
first_instance = 12
|
||||
first_errors = [
|
||||
ValidationError("An error", instance=first_instance),
|
||||
ValidationError("Another error", instance=first_instance),
|
||||
]
|
||||
second_instance = "foo"
|
||||
second_errors = [ValidationError("BOOM", instance=second_instance)]
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_first_instance=json.dumps(first_instance),
|
||||
some_second_instance=json.dumps(second_instance),
|
||||
),
|
||||
validator=fake_validator(first_errors, second_errors),
|
||||
|
||||
argv=[
|
||||
"--output", "pretty",
|
||||
"-i", "some_first_instance",
|
||||
"-i", "some_second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[ValidationError]===(some_first_instance)===
|
||||
|
||||
An error
|
||||
-----------------------------
|
||||
===[ValidationError]===(some_first_instance)===
|
||||
|
||||
Another error
|
||||
-----------------------------
|
||||
===[ValidationError]===(some_second_instance)===
|
||||
|
||||
BOOM
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_custom_error_format(self):
|
||||
first_instance = 12
|
||||
first_errors = [
|
||||
ValidationError("An error", instance=first_instance),
|
||||
ValidationError("Another error", instance=first_instance),
|
||||
]
|
||||
second_instance = "foo"
|
||||
second_errors = [ValidationError("BOOM", instance=second_instance)]
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_first_instance=json.dumps(first_instance),
|
||||
some_second_instance=json.dumps(second_instance),
|
||||
),
|
||||
validator=fake_validator(first_errors, second_errors),
|
||||
|
||||
argv=[
|
||||
"--error-format", ":{error.message}._-_.{error.instance}:",
|
||||
"-i", "some_first_instance",
|
||||
"-i", "some_second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr=":An error._-_.12::Another error._-_.12::BOOM._-_.foo:",
|
||||
)
|
||||
|
||||
def test_invalid_schema(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema='{"type": 12}'),
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: 12 is not valid under any of the given schemas
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_schema_pretty_output(self):
|
||||
schema = {"type": 12}
|
||||
|
||||
with self.assertRaises(SchemaError) as e:
|
||||
validate(schema=schema, instance="")
|
||||
error = str(e.exception)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=json.dumps(schema)),
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=(
|
||||
"===[SchemaError]===(some_schema)===\n\n"
|
||||
+ str(error)
|
||||
+ "\n-----------------------------\n"
|
||||
),
|
||||
)
|
||||
|
||||
def test_invalid_schema_multiple_errors(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema='{"type": 12, "items": 57}'),
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
57: 57 is not of type 'object', 'boolean'
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_schema_multiple_errors_pretty_output(self):
|
||||
schema = {"type": 12, "items": 57}
|
||||
|
||||
with self.assertRaises(SchemaError) as e:
|
||||
validate(schema=schema, instance="")
|
||||
error = str(e.exception)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=json.dumps(schema)),
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=(
|
||||
"===[SchemaError]===(some_schema)===\n\n"
|
||||
+ str(error)
|
||||
+ "\n-----------------------------\n"
|
||||
),
|
||||
)
|
||||
|
||||
def test_invalid_schema_with_invalid_instance(self):
|
||||
"""
|
||||
"Validating" an instance that's invalid under an invalid schema
|
||||
just shows the schema error.
|
||||
"""
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"type": 12, "minimum": 30}',
|
||||
some_instance="13",
|
||||
),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: 12 is not valid under any of the given schemas
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_schema_with_invalid_instance_pretty_output(self):
|
||||
instance, schema = 13, {"type": 12, "minimum": 30}
|
||||
|
||||
with self.assertRaises(SchemaError) as e:
|
||||
validate(schema=schema, instance=instance)
|
||||
error = str(e.exception)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema=json.dumps(schema),
|
||||
some_instance=json.dumps(instance),
|
||||
),
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=(
|
||||
"===[SchemaError]===(some_schema)===\n\n"
|
||||
+ str(error)
|
||||
+ "\n-----------------------------\n"
|
||||
),
|
||||
)
|
||||
|
||||
def test_invalid_instance_continues_with_the_rest(self):
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"minimum": 30}',
|
||||
first_instance="not valid JSON!",
|
||||
second_instance="12",
|
||||
),
|
||||
argv=[
|
||||
"-i", "first_instance",
|
||||
"-i", "second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
Failed to parse 'first_instance': {}
|
||||
12: 12 is less than the minimum of 30
|
||||
""".format(_message_for("not valid JSON!")),
|
||||
)
|
||||
|
||||
def test_custom_error_format_applies_to_schema_errors(self):
|
||||
instance, schema = 13, {"type": 12, "minimum": 30}
|
||||
|
||||
with self.assertRaises(SchemaError):
|
||||
validate(schema=schema, instance=instance)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=json.dumps(schema)),
|
||||
|
||||
argv=[
|
||||
"--error-format", ":{error.message}._-_.{error.instance}:",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr=":12 is not valid under any of the given schemas._-_.12:",
|
||||
)
|
||||
|
||||
def test_instance_is_invalid_JSON(self):
|
||||
instance = "not valid JSON!"
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance=instance),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse 'some_instance': {_message_for(instance)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_instance_is_invalid_JSON_pretty_output(self):
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(
|
||||
some_schema="{}",
|
||||
some_instance="not valid JSON!",
|
||||
),
|
||||
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(some_instance)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
self.assertNotIn("some_schema", stderr)
|
||||
|
||||
def test_instance_is_invalid_JSON_on_stdin(self):
|
||||
instance = "not valid JSON!"
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO(instance),
|
||||
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse <stdin>: {_message_for(instance)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_instance_is_invalid_JSON_on_stdin_pretty_output(self):
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO("not valid JSON!"),
|
||||
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(<stdin>)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
self.assertNotIn("some_schema", stderr)
|
||||
|
||||
def test_schema_is_invalid_JSON(self):
|
||||
schema = "not valid JSON!"
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema),
|
||||
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse 'some_schema': {_message_for(schema)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_schema_is_invalid_JSON_pretty_output(self):
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(some_schema="not valid JSON!"),
|
||||
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(some_schema)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
|
||||
def test_schema_and_instance_are_both_invalid_JSON(self):
|
||||
"""
|
||||
Only the schema error is reported, as we abort immediately.
|
||||
"""
|
||||
schema, instance = "not valid JSON!", "also not valid JSON!"
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance=instance),
|
||||
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse 'some_schema': {_message_for(schema)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_schema_and_instance_are_both_invalid_JSON_pretty_output(self):
|
||||
"""
|
||||
Only the schema error is reported, as we abort immediately.
|
||||
"""
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(
|
||||
some_schema="not valid JSON!",
|
||||
some_instance="also not valid JSON!",
|
||||
),
|
||||
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(some_schema)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
self.assertNotIn("some_instance", stderr)
|
||||
|
||||
def test_instance_does_not_exist(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
argv=["-i", "nonexisting_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
'nonexisting_instance' does not exist.
|
||||
""",
|
||||
)
|
||||
|
||||
def test_instance_does_not_exist_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
argv=[
|
||||
"--output", "pretty",
|
||||
"-i", "nonexisting_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[FileNotFoundError]===(nonexisting_instance)===
|
||||
|
||||
'nonexisting_instance' does not exist.
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_schema_does_not_exist(self):
|
||||
self.assertOutputs(
|
||||
argv=["nonexisting_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="'nonexisting_schema' does not exist.\n",
|
||||
)
|
||||
|
||||
def test_schema_does_not_exist_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
argv=["--output", "pretty", "nonexisting_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[FileNotFoundError]===(nonexisting_schema)===
|
||||
|
||||
'nonexisting_schema' does not exist.
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_neither_instance_nor_schema_exist(self):
|
||||
self.assertOutputs(
|
||||
argv=["-i", "nonexisting_instance", "nonexisting_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="'nonexisting_schema' does not exist.\n",
|
||||
)
|
||||
|
||||
def test_neither_instance_nor_schema_exist_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
argv=[
|
||||
"--output", "pretty",
|
||||
"-i", "nonexisting_instance",
|
||||
"nonexisting_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[FileNotFoundError]===(nonexisting_schema)===
|
||||
|
||||
'nonexisting_schema' does not exist.
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_successful_validation(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
stdout="===[SUCCESS]===(some_instance)===\n",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_stdin(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO("{}"),
|
||||
argv=["some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_stdin_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO("{}"),
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
stdout="===[SUCCESS]===(<stdin>)===\n",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_just_the_schema(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_just_the_schema_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
stdout="===[SUCCESS]===(some_instance)===\n",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_via_explicit_base_uri(self):
|
||||
ref_schema_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
ref_schema_file.close()
|
||||
self.addCleanup(os.remove, ref_schema_file.name)
|
||||
|
||||
ref_path = Path(ref_schema_file.name)
|
||||
ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}')
|
||||
|
||||
schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}'
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance="1"),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", ref_path.parent.as_uri() + "/",
|
||||
"some_schema",
|
||||
],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_unsuccessful_validation_via_explicit_base_uri(self):
|
||||
ref_schema_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
ref_schema_file.close()
|
||||
self.addCleanup(os.remove, ref_schema_file.name)
|
||||
|
||||
ref_path = Path(ref_schema_file.name)
|
||||
ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}')
|
||||
|
||||
schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}'
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance='"1"'),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", ref_path.parent.as_uri() + "/",
|
||||
"some_schema",
|
||||
],
|
||||
exit_code=1,
|
||||
stdout="",
|
||||
stderr="1: '1' is not of type 'integer'\n",
|
||||
)
|
||||
|
||||
def test_nonexistent_file_with_explicit_base_uri(self):
|
||||
schema = '{"$ref": "someNonexistentFile.json#definitions/num"}'
|
||||
instance = "1"
|
||||
|
||||
with self.assertRaises(_RefResolutionError) as e:
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema=schema,
|
||||
some_instance=instance,
|
||||
),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", Path.cwd().as_uri(),
|
||||
"some_schema",
|
||||
],
|
||||
)
|
||||
error = str(e.exception)
|
||||
self.assertIn(f"{os.sep}someNonexistentFile.json'", error)
|
||||
|
||||
def test_invalid_explicit_base_uri(self):
|
||||
schema = '{"$ref": "foo.json#definitions/num"}'
|
||||
instance = "1"
|
||||
|
||||
with self.assertRaises(_RefResolutionError) as e:
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema=schema,
|
||||
some_instance=instance,
|
||||
),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", "not@UR1",
|
||||
"some_schema",
|
||||
],
|
||||
)
|
||||
error = str(e.exception)
|
||||
self.assertEqual(
|
||||
error, "unknown url type: 'foo.json'",
|
||||
)
|
||||
|
||||
def test_it_validates_using_the_latest_validator_when_unspecified(self):
|
||||
# There isn't a better way now I can think of to ensure that the
|
||||
# latest version was used, given that the call to validator_for
|
||||
# is hidden inside the CLI, so guard that that's the case, and
|
||||
# this test will have to be updated when versions change until
|
||||
# we can think of a better way to ensure this behavior.
|
||||
self.assertIs(Draft202012Validator, _LATEST_VERSION)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema='{"const": "check"}', some_instance='"a"'),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
exit_code=1,
|
||||
stdout="",
|
||||
stderr="a: 'check' was expected\n",
|
||||
)
|
||||
|
||||
def test_it_validates_using_draft7_when_specified(self):
|
||||
"""
|
||||
Specifically, `const` validation applies for Draft 7.
|
||||
"""
|
||||
schema = """
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"const": "check"
|
||||
}
|
||||
"""
|
||||
instance = '"foo"'
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance=instance),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
exit_code=1,
|
||||
stdout="",
|
||||
stderr="foo: 'check' was expected\n",
|
||||
)
|
||||
|
||||
def test_it_validates_using_draft4_when_specified(self):
|
||||
"""
|
||||
Specifically, `const` validation *does not* apply for Draft 4.
|
||||
"""
|
||||
schema = """
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"const": "check"
|
||||
}
|
||||
"""
|
||||
instance = '"foo"'
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance=instance),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
|
||||
class TestParser(TestCase):
|
||||
|
||||
FakeValidator = fake_validator()
|
||||
|
||||
def test_find_validator_by_fully_qualified_object_name(self):
|
||||
arguments = cli.parse_args(
|
||||
[
|
||||
"--validator",
|
||||
"jsonschema.tests.test_cli.TestParser.FakeValidator",
|
||||
"--instance", "mem://some/instance",
|
||||
"mem://some/schema",
|
||||
],
|
||||
)
|
||||
self.assertIs(arguments["validator"], self.FakeValidator)
|
||||
|
||||
def test_find_validator_in_jsonschema(self):
|
||||
arguments = cli.parse_args(
|
||||
[
|
||||
"--validator", "Draft4Validator",
|
||||
"--instance", "mem://some/instance",
|
||||
"mem://some/schema",
|
||||
],
|
||||
)
|
||||
self.assertIs(arguments["validator"], Draft4Validator)
|
||||
|
||||
def cli_output_for(self, *argv):
|
||||
stdout, stderr = StringIO(), StringIO()
|
||||
with redirect_stdout(stdout), redirect_stderr(stderr): # noqa: SIM117
|
||||
with self.assertRaises(SystemExit):
|
||||
cli.parse_args(argv)
|
||||
return stdout.getvalue(), stderr.getvalue()
|
||||
|
||||
def test_unknown_output(self):
|
||||
stdout, stderr = self.cli_output_for(
|
||||
"--output", "foo",
|
||||
"mem://some/schema",
|
||||
)
|
||||
self.assertIn("invalid choice: 'foo'", stderr)
|
||||
self.assertFalse(stdout)
|
||||
|
||||
def test_useless_error_format(self):
|
||||
stdout, stderr = self.cli_output_for(
|
||||
"--output", "pretty",
|
||||
"--error-format", "foo",
|
||||
"mem://some/schema",
|
||||
)
|
||||
self.assertIn(
|
||||
"--error-format can only be used with --output plain",
|
||||
stderr,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
|
||||
|
||||
class TestCLIIntegration(TestCase):
|
||||
def test_license(self):
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, "-m", "pip", "show", "jsonschema"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
self.assertIn(b"License: MIT", output)
|
||||
|
||||
def test_version(self):
|
||||
version = subprocess.check_output(
|
||||
[sys.executable, "-W", "ignore", "-m", "jsonschema", "--version"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
version = version.decode("utf-8").strip()
|
||||
self.assertEqual(version, metadata.version("jsonschema"))
|
||||
|
||||
def test_no_arguments_shows_usage_notes(self):
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, "-m", "jsonschema"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
output_for_help = subprocess.check_output(
|
||||
[sys.executable, "-m", "jsonschema", "--help"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
self.assertEqual(output, output_for_help)
|
||||
@@ -0,0 +1,432 @@
|
||||
from contextlib import contextmanager
|
||||
from io import BytesIO
|
||||
from unittest import TestCase, mock
|
||||
import importlib.metadata
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import urllib.request
|
||||
|
||||
import referencing.exceptions
|
||||
|
||||
from jsonschema import FormatChecker, exceptions, protocols, validators
|
||||
|
||||
|
||||
class TestDeprecations(TestCase):
|
||||
def test_version(self):
|
||||
"""
|
||||
As of v4.0.0, __version__ is deprecated in favor of importlib.metadata.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.__version__ is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import __version__
|
||||
|
||||
self.assertEqual(__version__, importlib.metadata.version("jsonschema"))
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_validators_ErrorTree(self):
|
||||
"""
|
||||
As of v4.0.0, importing ErrorTree from jsonschema.validators is
|
||||
deprecated in favor of doing so from jsonschema.exceptions.
|
||||
"""
|
||||
|
||||
message = "Importing ErrorTree from jsonschema.validators is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema.validators import ErrorTree
|
||||
|
||||
self.assertEqual(ErrorTree, exceptions.ErrorTree)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_import_ErrorTree(self):
|
||||
"""
|
||||
As of v4.18.0, importing ErrorTree from the package root is
|
||||
deprecated in favor of doing so from jsonschema.exceptions.
|
||||
"""
|
||||
|
||||
message = "Importing ErrorTree directly from the jsonschema package "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import ErrorTree
|
||||
|
||||
self.assertEqual(ErrorTree, exceptions.ErrorTree)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_ErrorTree_setitem(self):
|
||||
"""
|
||||
As of v4.20.0, setting items on an ErrorTree is deprecated.
|
||||
"""
|
||||
|
||||
e = exceptions.ValidationError("some error", path=["foo"])
|
||||
tree = exceptions.ErrorTree()
|
||||
subtree = exceptions.ErrorTree(errors=[e])
|
||||
|
||||
message = "ErrorTree.__setitem__ is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
tree["foo"] = subtree
|
||||
|
||||
self.assertEqual(tree["foo"], subtree)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_import_FormatError(self):
|
||||
"""
|
||||
As of v4.18.0, importing FormatError from the package root is
|
||||
deprecated in favor of doing so from jsonschema.exceptions.
|
||||
"""
|
||||
|
||||
message = "Importing FormatError directly from the jsonschema package "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import FormatError
|
||||
|
||||
self.assertEqual(FormatError, exceptions.FormatError)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_import_Validator(self):
|
||||
"""
|
||||
As of v4.19.0, importing Validator from the package root is
|
||||
deprecated in favor of doing so from jsonschema.protocols.
|
||||
"""
|
||||
|
||||
message = "Importing Validator directly from the jsonschema package "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import Validator
|
||||
|
||||
self.assertEqual(Validator, protocols.Validator)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_validators_validators(self):
|
||||
"""
|
||||
As of v4.0.0, accessing jsonschema.validators.validators is
|
||||
deprecated.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.validators.validators is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
value = validators.validators
|
||||
|
||||
self.assertEqual(value, validators._VALIDATORS)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_validators_meta_schemas(self):
|
||||
"""
|
||||
As of v4.0.0, accessing jsonschema.validators.meta_schemas is
|
||||
deprecated.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.validators.meta_schemas is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
value = validators.meta_schemas
|
||||
|
||||
self.assertEqual(value, validators._META_SCHEMAS)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_RefResolver_in_scope(self):
|
||||
"""
|
||||
As of v4.0.0, RefResolver.in_scope is deprecated.
|
||||
"""
|
||||
|
||||
resolver = validators._RefResolver.from_schema({})
|
||||
message = "jsonschema.RefResolver.in_scope is deprecated "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w: # noqa: SIM117
|
||||
with resolver.in_scope("foo"):
|
||||
pass
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_Validator_is_valid_two_arguments(self):
|
||||
"""
|
||||
As of v4.0.0, calling is_valid with two arguments (to provide a
|
||||
different schema) is deprecated.
|
||||
"""
|
||||
|
||||
validator = validators.Draft7Validator({})
|
||||
message = "Passing a schema to Validator.is_valid is deprecated "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
result = validator.is_valid("foo", {"type": "number"})
|
||||
|
||||
self.assertFalse(result)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_Validator_iter_errors_two_arguments(self):
|
||||
"""
|
||||
As of v4.0.0, calling iter_errors with two arguments (to provide a
|
||||
different schema) is deprecated.
|
||||
"""
|
||||
|
||||
validator = validators.Draft7Validator({})
|
||||
message = "Passing a schema to Validator.iter_errors is deprecated "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
error, = validator.iter_errors("foo", {"type": "number"})
|
||||
|
||||
self.assertEqual(error.validator, "type")
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_Validator_resolver(self):
|
||||
"""
|
||||
As of v4.18.0, accessing Validator.resolver is deprecated.
|
||||
"""
|
||||
|
||||
validator = validators.Draft7Validator({})
|
||||
message = "Accessing Draft7Validator.resolver is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
self.assertIsInstance(validator.resolver, validators._RefResolver)
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_RefResolver(self):
|
||||
"""
|
||||
As of v4.18.0, RefResolver is fully deprecated.
|
||||
"""
|
||||
|
||||
message = "jsonschema.RefResolver is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import RefResolver
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema.validators import RefResolver # noqa: F401, F811
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_RefResolutionError(self):
|
||||
"""
|
||||
As of v4.18.0, RefResolutionError is deprecated in favor of directly
|
||||
catching errors from the referencing library.
|
||||
"""
|
||||
|
||||
message = "jsonschema.exceptions.RefResolutionError is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import RefResolutionError
|
||||
|
||||
self.assertEqual(RefResolutionError, exceptions._RefResolutionError)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema.exceptions import RefResolutionError
|
||||
|
||||
self.assertEqual(RefResolutionError, exceptions._RefResolutionError)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_catching_Unresolvable_directly(self):
|
||||
"""
|
||||
This behavior is the intended behavior (i.e. it's not deprecated), but
|
||||
given we do "tricksy" things in the iterim to wrap exceptions in a
|
||||
multiple inheritance subclass, we need to be extra sure it works and
|
||||
stays working.
|
||||
"""
|
||||
validator = validators.Draft202012Validator({"$ref": "urn:nothing"})
|
||||
|
||||
with self.assertRaises(referencing.exceptions.Unresolvable) as e:
|
||||
validator.validate(12)
|
||||
|
||||
expected = referencing.exceptions.Unresolvable(ref="urn:nothing")
|
||||
self.assertEqual(
|
||||
(e.exception, str(e.exception)),
|
||||
(expected, "Unresolvable: urn:nothing"),
|
||||
)
|
||||
|
||||
def test_catching_Unresolvable_via_RefResolutionError(self):
|
||||
"""
|
||||
Until RefResolutionError is removed, it is still possible to catch
|
||||
exceptions from reference resolution using it, even though they may
|
||||
have been raised by referencing.
|
||||
"""
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
from jsonschema import RefResolutionError
|
||||
|
||||
validator = validators.Draft202012Validator({"$ref": "urn:nothing"})
|
||||
|
||||
with self.assertRaises(referencing.exceptions.Unresolvable) as u:
|
||||
validator.validate(12)
|
||||
|
||||
with self.assertRaises(RefResolutionError) as e:
|
||||
validator.validate(12)
|
||||
|
||||
self.assertEqual(
|
||||
(e.exception, str(e.exception)),
|
||||
(u.exception, "Unresolvable: urn:nothing"),
|
||||
)
|
||||
|
||||
def test_WrappedReferencingError_hashability(self):
|
||||
"""
|
||||
Ensure the wrapped referencing errors are hashable when possible.
|
||||
"""
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
from jsonschema import RefResolutionError
|
||||
|
||||
validator = validators.Draft202012Validator({"$ref": "urn:nothing"})
|
||||
|
||||
with self.assertRaises(referencing.exceptions.Unresolvable) as u:
|
||||
validator.validate(12)
|
||||
|
||||
with self.assertRaises(RefResolutionError) as e:
|
||||
validator.validate(12)
|
||||
|
||||
self.assertIn(e.exception, {u.exception})
|
||||
self.assertIn(u.exception, {e.exception})
|
||||
|
||||
def test_Validator_subclassing(self):
|
||||
"""
|
||||
As of v4.12.0, subclassing a validator class produces an explicit
|
||||
deprecation warning.
|
||||
|
||||
This was never intended to be public API (and some comments over the
|
||||
years in issues said so, but obviously that's not a great way to make
|
||||
sure it's followed).
|
||||
|
||||
A future version will explicitly raise an error.
|
||||
"""
|
||||
|
||||
message = "Subclassing validator classes is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
class Subclass(validators.Draft202012Validator):
|
||||
pass
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
class AnotherSubclass(validators.create(meta_schema={})):
|
||||
pass
|
||||
|
||||
def test_FormatChecker_cls_checks(self):
|
||||
"""
|
||||
As of v4.14.0, FormatChecker.cls_checks is deprecated without
|
||||
replacement.
|
||||
"""
|
||||
|
||||
self.addCleanup(FormatChecker.checkers.pop, "boom", None)
|
||||
|
||||
message = "FormatChecker.cls_checks "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
FormatChecker.cls_checks("boom")
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_draftN_format_checker(self):
|
||||
"""
|
||||
As of v4.16.0, accessing jsonschema.draftn_format_checker is deprecated
|
||||
in favor of Validator.FORMAT_CHECKER.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.draft202012_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft202012_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft202012_format_checker,
|
||||
validators.Draft202012Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft201909_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft201909_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft201909_format_checker,
|
||||
validators.Draft201909Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft7_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft7_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft7_format_checker,
|
||||
validators.Draft7Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft6_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft6_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft6_format_checker,
|
||||
validators.Draft6Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft4_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft4_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft4_format_checker,
|
||||
validators.Draft4Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft3_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft3_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft3_format_checker,
|
||||
validators.Draft3Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertRaises(ImportError):
|
||||
from jsonschema import draft1234_format_checker # noqa: F401
|
||||
|
||||
def test_import_cli(self):
|
||||
"""
|
||||
As of v4.17.0, importing jsonschema.cli is deprecated.
|
||||
"""
|
||||
|
||||
message = "The jsonschema CLI is deprecated and will be removed "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
import jsonschema.cli
|
||||
importlib.reload(jsonschema.cli)
|
||||
|
||||
self.assertEqual(w.filename, importlib.__file__)
|
||||
|
||||
def test_cli(self):
|
||||
"""
|
||||
As of v4.17.0, the jsonschema CLI is deprecated.
|
||||
"""
|
||||
|
||||
process = subprocess.run(
|
||||
[sys.executable, "-m", "jsonschema"],
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
self.assertIn(b"The jsonschema CLI is deprecated ", process.stderr)
|
||||
|
||||
def test_automatic_remote_retrieval(self):
|
||||
"""
|
||||
Automatic retrieval of remote references is deprecated as of v4.18.0.
|
||||
"""
|
||||
ref = "http://bar#/$defs/baz"
|
||||
schema = {"$defs": {"baz": {"type": "integer"}}}
|
||||
|
||||
if "requests" in sys.modules: # pragma: no cover
|
||||
self.addCleanup(
|
||||
sys.modules.__setitem__, "requests", sys.modules["requests"],
|
||||
)
|
||||
sys.modules["requests"] = None
|
||||
|
||||
@contextmanager
|
||||
def fake_urlopen(request):
|
||||
self.assertIsInstance(request, urllib.request.Request)
|
||||
self.assertEqual(request.full_url, "http://bar")
|
||||
|
||||
# Ha ha urllib.request.Request "normalizes" header names and
|
||||
# Request.get_header does not also normalize them...
|
||||
(header, value), = request.header_items()
|
||||
self.assertEqual(header.lower(), "user-agent")
|
||||
self.assertEqual(
|
||||
value, "python-jsonschema (deprecated $ref resolution)",
|
||||
)
|
||||
yield BytesIO(json.dumps(schema).encode("utf8"))
|
||||
|
||||
validator = validators.Draft202012Validator({"$ref": ref})
|
||||
|
||||
message = "Automatically retrieving remote references "
|
||||
patch = mock.patch.object(urllib.request, "urlopen", new=fake_urlopen)
|
||||
|
||||
with patch, self.assertWarnsRegex(DeprecationWarning, message):
|
||||
self.assertEqual(
|
||||
(validator.is_valid({}), validator.is_valid(37)),
|
||||
(False, True),
|
||||
)
|
||||
@@ -0,0 +1,702 @@
|
||||
from unittest import TestCase
|
||||
import textwrap
|
||||
|
||||
from jsonschema import exceptions
|
||||
from jsonschema.validators import _LATEST_VERSION
|
||||
|
||||
|
||||
class TestBestMatch(TestCase):
|
||||
def best_match_of(self, instance, schema):
|
||||
errors = list(_LATEST_VERSION(schema).iter_errors(instance))
|
||||
msg = f"No errors found for {instance} under {schema!r}!"
|
||||
self.assertTrue(errors, msg=msg)
|
||||
|
||||
best = exceptions.best_match(iter(errors))
|
||||
reversed_best = exceptions.best_match(reversed(errors))
|
||||
|
||||
self.assertEqual(
|
||||
best._contents(),
|
||||
reversed_best._contents(),
|
||||
f"No consistent best match!\nGot: {best}\n\nThen: {reversed_best}",
|
||||
)
|
||||
return best
|
||||
|
||||
def test_shallower_errors_are_better_matches(self):
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"minProperties": 2,
|
||||
"properties": {"bar": {"type": "object"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": []}}, schema=schema)
|
||||
self.assertEqual(best.validator, "minProperties")
|
||||
|
||||
def test_oneOf_and_anyOf_are_weak_matches(self):
|
||||
"""
|
||||
A property you *must* match is probably better than one you have to
|
||||
match a part of.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"minProperties": 2,
|
||||
"anyOf": [{"type": "string"}, {"type": "number"}],
|
||||
"oneOf": [{"type": "string"}, {"type": "number"}],
|
||||
}
|
||||
best = self.best_match_of(instance={}, schema=schema)
|
||||
self.assertEqual(best.validator, "minProperties")
|
||||
|
||||
def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
|
||||
"""
|
||||
If the most relevant error is an anyOf, then we traverse its context
|
||||
and select the otherwise *least* relevant error, since in this case
|
||||
that means the most specific, deep, error inside the instance.
|
||||
|
||||
I.e. since only one of the schemas must match, we look for the most
|
||||
relevant one.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"properties": {"bar": {"type": "array"}}},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "array")
|
||||
|
||||
def test_no_anyOf_traversal_for_equally_relevant_errors(self):
|
||||
"""
|
||||
We don't traverse into an anyOf (as above) if all of its context errors
|
||||
seem to be equally "wrong" against the instance.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer"},
|
||||
{"type": "object"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "anyOf")
|
||||
|
||||
def test_anyOf_traversal_for_single_equally_relevant_error(self):
|
||||
"""
|
||||
We *do* traverse anyOf with a single nested error, even though it is
|
||||
vacuously equally relevant to itself.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "type")
|
||||
|
||||
def test_anyOf_traversal_for_single_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse anyOf with a single subschema that fails multiple
|
||||
times (e.g. on multiple items).
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_anyOf_traversal_for_non_type_matching_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse anyOf with multiple subschemas when one does not type
|
||||
match.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"type": "object"},
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
|
||||
"""
|
||||
If the most relevant error is an oneOf, then we traverse its context
|
||||
and select the otherwise *least* relevant error, since in this case
|
||||
that means the most specific, deep, error inside the instance.
|
||||
|
||||
I.e. since only one of the schemas must match, we look for the most
|
||||
relevant one.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"properties": {"bar": {"type": "array"}}},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "array")
|
||||
|
||||
def test_no_oneOf_traversal_for_equally_relevant_errors(self):
|
||||
"""
|
||||
We don't traverse into an oneOf (as above) if all of its context errors
|
||||
seem to be equally "wrong" against the instance.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer"},
|
||||
{"type": "object"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "oneOf")
|
||||
|
||||
def test_oneOf_traversal_for_single_equally_relevant_error(self):
|
||||
"""
|
||||
We *do* traverse oneOf with a single nested error, even though it is
|
||||
vacuously equally relevant to itself.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "type")
|
||||
|
||||
def test_oneOf_traversal_for_single_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse oneOf with a single subschema that fails multiple
|
||||
times (e.g. on multiple items).
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_oneOf_traversal_for_non_type_matching_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse oneOf with multiple subschemas when one does not type
|
||||
match.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"type": "object"},
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
|
||||
"""
|
||||
Now, if the error is allOf, we traverse but select the *most* relevant
|
||||
error from the context, because all schemas here must match anyways.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"allOf": [
|
||||
{"type": "string"},
|
||||
{"properties": {"bar": {"type": "array"}}},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "string")
|
||||
|
||||
def test_nested_context_for_oneOf(self):
|
||||
"""
|
||||
We traverse into nested contexts (a oneOf containing an error in a
|
||||
nested oneOf here).
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"properties": {
|
||||
"bar": {"type": "array"},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "array")
|
||||
|
||||
def test_it_prioritizes_matching_types(self):
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "array", "minItems": 2},
|
||||
{"type": "string", "minLength": 10},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
reordered = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "string", "minLength": 10},
|
||||
{"type": "array", "minItems": 2},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=reordered)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
def test_it_prioritizes_matching_union_types(self):
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": ["array", "object"], "minItems": 2},
|
||||
{"type": ["integer", "string"], "minLength": 10},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
reordered = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "string", "minLength": 10},
|
||||
{"type": "array", "minItems": 2},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=reordered)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
def test_boolean_schemas(self):
|
||||
schema = {"properties": {"foo": False}}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
|
||||
self.assertIsNone(best.validator)
|
||||
|
||||
def test_one_error(self):
|
||||
validator = _LATEST_VERSION({"minProperties": 2})
|
||||
error, = validator.iter_errors({})
|
||||
self.assertEqual(
|
||||
exceptions.best_match(validator.iter_errors({})).validator,
|
||||
"minProperties",
|
||||
)
|
||||
|
||||
def test_no_errors(self):
|
||||
validator = _LATEST_VERSION({})
|
||||
self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
|
||||
|
||||
|
||||
class TestByRelevance(TestCase):
|
||||
def test_short_paths_are_better_matches(self):
|
||||
shallow = exceptions.ValidationError("Oh no!", path=["baz"])
|
||||
deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
|
||||
match = max([shallow, deep], key=exceptions.relevance)
|
||||
self.assertIs(match, shallow)
|
||||
|
||||
match = max([deep, shallow], key=exceptions.relevance)
|
||||
self.assertIs(match, shallow)
|
||||
|
||||
def test_global_errors_are_even_better_matches(self):
|
||||
shallow = exceptions.ValidationError("Oh no!", path=[])
|
||||
deep = exceptions.ValidationError("Oh yes!", path=["foo"])
|
||||
|
||||
errors = sorted([shallow, deep], key=exceptions.relevance)
|
||||
self.assertEqual(
|
||||
[list(error.path) for error in errors],
|
||||
[["foo"], []],
|
||||
)
|
||||
|
||||
errors = sorted([deep, shallow], key=exceptions.relevance)
|
||||
self.assertEqual(
|
||||
[list(error.path) for error in errors],
|
||||
[["foo"], []],
|
||||
)
|
||||
|
||||
def test_weak_keywords_are_lower_priority(self):
|
||||
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
|
||||
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
|
||||
|
||||
best_match = exceptions.by_relevance(weak="a")
|
||||
|
||||
match = max([weak, normal], key=best_match)
|
||||
self.assertIs(match, normal)
|
||||
|
||||
match = max([normal, weak], key=best_match)
|
||||
self.assertIs(match, normal)
|
||||
|
||||
def test_strong_keywords_are_higher_priority(self):
|
||||
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
|
||||
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
|
||||
strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
|
||||
|
||||
best_match = exceptions.by_relevance(weak="a", strong="c")
|
||||
|
||||
match = max([weak, normal, strong], key=best_match)
|
||||
self.assertIs(match, strong)
|
||||
|
||||
match = max([strong, normal, weak], key=best_match)
|
||||
self.assertIs(match, strong)
|
||||
|
||||
|
||||
class TestErrorTree(TestCase):
|
||||
def test_it_knows_how_many_total_errors_it_contains(self):
|
||||
# FIXME: #442
|
||||
errors = [
|
||||
exceptions.ValidationError("Something", validator=i)
|
||||
for i in range(8)
|
||||
]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertEqual(tree.total_errors, 8)
|
||||
|
||||
def test_it_contains_an_item_if_the_item_had_an_error(self):
|
||||
errors = [exceptions.ValidationError("a message", path=["bar"])]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertIn("bar", tree)
|
||||
|
||||
def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
|
||||
errors = [exceptions.ValidationError("a message", path=["bar"])]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertNotIn("foo", tree)
|
||||
|
||||
def test_keywords_that_failed_appear_in_errors_dict(self):
|
||||
error = exceptions.ValidationError("a message", validator="foo")
|
||||
tree = exceptions.ErrorTree([error])
|
||||
self.assertEqual(tree.errors, {"foo": error})
|
||||
|
||||
def test_it_creates_a_child_tree_for_each_nested_path(self):
|
||||
errors = [
|
||||
exceptions.ValidationError("a bar message", path=["bar"]),
|
||||
exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]),
|
||||
]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertIn(0, tree["bar"])
|
||||
self.assertNotIn(1, tree["bar"])
|
||||
|
||||
def test_children_have_their_errors_dicts_built(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError("1", validator="foo", path=["bar", 0]),
|
||||
exceptions.ValidationError("2", validator="quux", path=["bar", 0]),
|
||||
)
|
||||
tree = exceptions.ErrorTree([e1, e2])
|
||||
self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2})
|
||||
|
||||
def test_multiple_errors_with_instance(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1"),
|
||||
exceptions.ValidationError(
|
||||
"2",
|
||||
validator="quux",
|
||||
path=["foobar", 2],
|
||||
instance="i2"),
|
||||
)
|
||||
exceptions.ErrorTree([e1, e2])
|
||||
|
||||
def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
|
||||
error = exceptions.ValidationError("123", validator="foo", instance=[])
|
||||
tree = exceptions.ErrorTree([error])
|
||||
|
||||
with self.assertRaises(IndexError):
|
||||
tree[0]
|
||||
|
||||
def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
|
||||
"""
|
||||
If a keyword refers to a path that isn't in the instance, the
|
||||
tree still properly returns a subtree for that path.
|
||||
"""
|
||||
|
||||
error = exceptions.ValidationError(
|
||||
"a message", validator="foo", instance={}, path=["foo"],
|
||||
)
|
||||
tree = exceptions.ErrorTree([error])
|
||||
self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
|
||||
|
||||
def test_iter(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1"),
|
||||
exceptions.ValidationError(
|
||||
"2",
|
||||
validator="quux",
|
||||
path=["foobar", 2],
|
||||
instance="i2"),
|
||||
)
|
||||
tree = exceptions.ErrorTree([e1, e2])
|
||||
self.assertEqual(set(tree), {"bar", "foobar"})
|
||||
|
||||
def test_repr_single(self):
|
||||
error = exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1",
|
||||
)
|
||||
tree = exceptions.ErrorTree([error])
|
||||
self.assertEqual(repr(tree), "<ErrorTree (1 total error)>")
|
||||
|
||||
def test_repr_multiple(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1"),
|
||||
exceptions.ValidationError(
|
||||
"2",
|
||||
validator="quux",
|
||||
path=["foobar", 2],
|
||||
instance="i2"),
|
||||
)
|
||||
tree = exceptions.ErrorTree([e1, e2])
|
||||
self.assertEqual(repr(tree), "<ErrorTree (2 total errors)>")
|
||||
|
||||
def test_repr_empty(self):
|
||||
tree = exceptions.ErrorTree([])
|
||||
self.assertEqual(repr(tree), "<ErrorTree (0 total errors)>")
|
||||
|
||||
|
||||
class TestErrorInitReprStr(TestCase):
|
||||
def make_error(self, **kwargs):
|
||||
defaults = dict(
|
||||
message="hello",
|
||||
validator="type",
|
||||
validator_value="string",
|
||||
instance=5,
|
||||
schema={"type": "string"},
|
||||
)
|
||||
defaults.update(kwargs)
|
||||
return exceptions.ValidationError(**defaults)
|
||||
|
||||
def assertShows(self, expected, **kwargs):
|
||||
expected = textwrap.dedent(expected).rstrip("\n")
|
||||
|
||||
error = self.make_error(**kwargs)
|
||||
message_line, _, rest = str(error).partition("\n")
|
||||
self.assertEqual(message_line, error.message)
|
||||
self.assertEqual(rest, expected)
|
||||
|
||||
def test_it_calls_super_and_sets_args(self):
|
||||
error = self.make_error()
|
||||
self.assertGreater(len(error.args), 1)
|
||||
|
||||
def test_repr(self):
|
||||
self.assertEqual(
|
||||
repr(exceptions.ValidationError(message="Hello!")),
|
||||
"<ValidationError: 'Hello!'>",
|
||||
)
|
||||
|
||||
def test_unset_error(self):
|
||||
error = exceptions.ValidationError("message")
|
||||
self.assertEqual(str(error), "message")
|
||||
|
||||
kwargs = {
|
||||
"validator": "type",
|
||||
"validator_value": "string",
|
||||
"instance": 5,
|
||||
"schema": {"type": "string"},
|
||||
}
|
||||
# Just the message should show if any of the attributes are unset
|
||||
for attr in kwargs:
|
||||
k = dict(kwargs)
|
||||
del k[attr]
|
||||
error = exceptions.ValidationError("message", **k)
|
||||
self.assertEqual(str(error), "message")
|
||||
|
||||
def test_empty_paths(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema:
|
||||
{'type': 'string'}
|
||||
|
||||
On instance:
|
||||
5
|
||||
""",
|
||||
path=[],
|
||||
schema_path=[],
|
||||
)
|
||||
|
||||
def test_one_item_paths(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema:
|
||||
{'type': 'string'}
|
||||
|
||||
On instance[0]:
|
||||
5
|
||||
""",
|
||||
path=[0],
|
||||
schema_path=["items"],
|
||||
)
|
||||
|
||||
def test_multiple_item_paths(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema['items'][0]:
|
||||
{'type': 'string'}
|
||||
|
||||
On instance[0]['a']:
|
||||
5
|
||||
""",
|
||||
path=[0, "a"],
|
||||
schema_path=["items", 0, 1],
|
||||
)
|
||||
|
||||
def test_uses_pprint(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'maxLength' in schema:
|
||||
{0: 0,
|
||||
1: 1,
|
||||
2: 2,
|
||||
3: 3,
|
||||
4: 4,
|
||||
5: 5,
|
||||
6: 6,
|
||||
7: 7,
|
||||
8: 8,
|
||||
9: 9,
|
||||
10: 10,
|
||||
11: 11,
|
||||
12: 12,
|
||||
13: 13,
|
||||
14: 14,
|
||||
15: 15,
|
||||
16: 16,
|
||||
17: 17,
|
||||
18: 18,
|
||||
19: 19}
|
||||
|
||||
On instance:
|
||||
[0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24]
|
||||
""",
|
||||
instance=list(range(25)),
|
||||
schema=dict(zip(range(20), range(20))),
|
||||
validator="maxLength",
|
||||
)
|
||||
|
||||
def test_does_not_reorder_dicts(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema:
|
||||
{'do': 3, 'not': 7, 'sort': 37, 'me': 73}
|
||||
|
||||
On instance:
|
||||
{'here': 73, 'too': 37, 'no': 7, 'sorting': 3}
|
||||
""",
|
||||
schema={
|
||||
"do": 3,
|
||||
"not": 7,
|
||||
"sort": 37,
|
||||
"me": 73,
|
||||
},
|
||||
instance={
|
||||
"here": 73,
|
||||
"too": 37,
|
||||
"no": 7,
|
||||
"sorting": 3,
|
||||
},
|
||||
)
|
||||
|
||||
def test_str_works_with_instances_having_overriden_eq_operator(self):
|
||||
"""
|
||||
Check for #164 which rendered exceptions unusable when a
|
||||
`ValidationError` involved instances with an `__eq__` method
|
||||
that returned truthy values.
|
||||
"""
|
||||
|
||||
class DontEQMeBro:
|
||||
def __eq__(this, other): # pragma: no cover
|
||||
self.fail("Don't!")
|
||||
|
||||
def __ne__(this, other): # pragma: no cover
|
||||
self.fail("Don't!")
|
||||
|
||||
instance = DontEQMeBro()
|
||||
error = exceptions.ValidationError(
|
||||
"a message",
|
||||
validator="foo",
|
||||
instance=instance,
|
||||
validator_value="some",
|
||||
schema="schema",
|
||||
)
|
||||
self.assertIn(repr(instance), str(error))
|
||||
|
||||
|
||||
class TestHashable(TestCase):
|
||||
def test_hashable(self):
|
||||
{exceptions.ValidationError("")}
|
||||
{exceptions.SchemaError("")}
|
||||
@@ -0,0 +1,91 @@
|
||||
"""
|
||||
Tests for the parts of jsonschema related to the :kw:`format` keyword.
|
||||
"""
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
from jsonschema import FormatChecker, ValidationError
|
||||
from jsonschema.exceptions import FormatError
|
||||
from jsonschema.validators import Draft4Validator
|
||||
|
||||
BOOM = ValueError("Boom!")
|
||||
BANG = ZeroDivisionError("Bang!")
|
||||
|
||||
|
||||
def boom(thing):
|
||||
if thing == "bang":
|
||||
raise BANG
|
||||
raise BOOM
|
||||
|
||||
|
||||
class TestFormatChecker(TestCase):
|
||||
def test_it_can_validate_no_formats(self):
|
||||
checker = FormatChecker(formats=())
|
||||
self.assertFalse(checker.checkers)
|
||||
|
||||
def test_it_raises_a_key_error_for_unknown_formats(self):
|
||||
with self.assertRaises(KeyError):
|
||||
FormatChecker(formats=["o noes"])
|
||||
|
||||
def test_it_can_register_cls_checkers(self):
|
||||
original = dict(FormatChecker.checkers)
|
||||
self.addCleanup(FormatChecker.checkers.pop, "boom")
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
FormatChecker.cls_checks("boom")(boom)
|
||||
self.assertEqual(
|
||||
FormatChecker.checkers,
|
||||
dict(original, boom=(boom, ())),
|
||||
)
|
||||
|
||||
def test_it_can_register_checkers(self):
|
||||
checker = FormatChecker()
|
||||
checker.checks("boom")(boom)
|
||||
self.assertEqual(
|
||||
checker.checkers,
|
||||
dict(FormatChecker.checkers, boom=(boom, ())),
|
||||
)
|
||||
|
||||
def test_it_catches_registered_errors(self):
|
||||
checker = FormatChecker()
|
||||
checker.checks("boom", raises=type(BOOM))(boom)
|
||||
|
||||
with self.assertRaises(FormatError) as cm:
|
||||
checker.check(instance=12, format="boom")
|
||||
|
||||
self.assertIs(cm.exception.cause, BOOM)
|
||||
self.assertIs(cm.exception.__cause__, BOOM)
|
||||
self.assertEqual(str(cm.exception), "12 is not a 'boom'")
|
||||
|
||||
# Unregistered errors should not be caught
|
||||
with self.assertRaises(type(BANG)):
|
||||
checker.check(instance="bang", format="boom")
|
||||
|
||||
def test_format_error_causes_become_validation_error_causes(self):
|
||||
checker = FormatChecker()
|
||||
checker.checks("boom", raises=ValueError)(boom)
|
||||
validator = Draft4Validator({"format": "boom"}, format_checker=checker)
|
||||
|
||||
with self.assertRaises(ValidationError) as cm:
|
||||
validator.validate("BOOM")
|
||||
|
||||
self.assertIs(cm.exception.cause, BOOM)
|
||||
self.assertIs(cm.exception.__cause__, BOOM)
|
||||
|
||||
def test_format_checkers_come_with_defaults(self):
|
||||
# This is bad :/ but relied upon.
|
||||
# The docs for quite awhile recommended people do things like
|
||||
# validate(..., format_checker=FormatChecker())
|
||||
# We should change that, but we can't without deprecation...
|
||||
checker = FormatChecker()
|
||||
with self.assertRaises(FormatError):
|
||||
checker.check(instance="not-an-ipv4", format="ipv4")
|
||||
|
||||
def test_repr(self):
|
||||
checker = FormatChecker(formats=())
|
||||
checker.checks("foo")(lambda thing: True) # pragma: no cover
|
||||
checker.checks("bar")(lambda thing: True) # pragma: no cover
|
||||
checker.checks("baz")(lambda thing: True) # pragma: no cover
|
||||
self.assertEqual(
|
||||
repr(checker),
|
||||
"<FormatChecker checkers=['bar', 'baz', 'foo']>",
|
||||
)
|
||||
@@ -0,0 +1,269 @@
|
||||
"""
|
||||
Test runner for the JSON Schema official test suite
|
||||
|
||||
Tests comprehensive correctness of each draft's validator.
|
||||
|
||||
See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from jsonschema.tests._suite import Suite
|
||||
import jsonschema
|
||||
|
||||
SUITE = Suite()
|
||||
DRAFT3 = SUITE.version(name="draft3")
|
||||
DRAFT4 = SUITE.version(name="draft4")
|
||||
DRAFT6 = SUITE.version(name="draft6")
|
||||
DRAFT7 = SUITE.version(name="draft7")
|
||||
DRAFT201909 = SUITE.version(name="draft2019-09")
|
||||
DRAFT202012 = SUITE.version(name="draft2020-12")
|
||||
|
||||
|
||||
def skip(message, **kwargs):
|
||||
def skipper(test):
|
||||
if all(value == getattr(test, attr) for attr, value in kwargs.items()):
|
||||
return message
|
||||
return skipper
|
||||
|
||||
|
||||
def missing_format(Validator):
|
||||
def missing_format(test): # pragma: no cover
|
||||
schema = test.schema
|
||||
if (
|
||||
schema is True
|
||||
or schema is False
|
||||
or "format" not in schema
|
||||
or schema["format"] in Validator.FORMAT_CHECKER.checkers
|
||||
or test.valid
|
||||
):
|
||||
return
|
||||
|
||||
return f"Format checker {schema['format']!r} not found."
|
||||
return missing_format
|
||||
|
||||
|
||||
def complex_email_validation(test):
|
||||
if test.subject != "email":
|
||||
return
|
||||
|
||||
message = "Complex email validation is (intentionally) unsupported."
|
||||
return skip(
|
||||
message=message,
|
||||
description="an invalid domain",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="an invalid IPv4-address-literal",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="dot after local part is not valid",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="dot before local part is not valid",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="two subsequent dots inside local part are not valid",
|
||||
)(test)
|
||||
|
||||
|
||||
if sys.version_info < (3, 9): # pragma: no cover
|
||||
message = "Rejecting leading zeros is 3.9+"
|
||||
allowed_leading_zeros = skip(
|
||||
message=message,
|
||||
subject="ipv4",
|
||||
description="invalid leading zeroes, as they are treated as octals",
|
||||
)
|
||||
else:
|
||||
def allowed_leading_zeros(test): # pragma: no cover
|
||||
return
|
||||
|
||||
|
||||
def leap_second(test):
|
||||
message = "Leap seconds are unsupported."
|
||||
return skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="a valid time string with leap second",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="a valid time string with leap second, Zulu",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="a valid time string with leap second with offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, positive time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, negative time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, large positive time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, large negative time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, zero time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="date-time",
|
||||
description="a valid date-time with a leap second, UTC",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="date-time",
|
||||
description="a valid date-time with a leap second, with minus offset",
|
||||
)(test)
|
||||
|
||||
|
||||
TestDraft3 = DRAFT3.to_unittest_testcase(
|
||||
DRAFT3.cases(),
|
||||
DRAFT3.format_cases(),
|
||||
DRAFT3.optional_cases_of(name="bignum"),
|
||||
DRAFT3.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT3.optional_cases_of(name="zeroTerminatedFloats"),
|
||||
Validator=jsonschema.Draft3Validator,
|
||||
format_checker=jsonschema.Draft3Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
missing_format(jsonschema.Draft3Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft4 = DRAFT4.to_unittest_testcase(
|
||||
DRAFT4.cases(),
|
||||
DRAFT4.format_cases(),
|
||||
DRAFT4.optional_cases_of(name="bignum"),
|
||||
DRAFT4.optional_cases_of(name="float-overflow"),
|
||||
DRAFT4.optional_cases_of(name="id"),
|
||||
DRAFT4.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT4.optional_cases_of(name="zeroTerminatedFloats"),
|
||||
Validator=jsonschema.Draft4Validator,
|
||||
format_checker=jsonschema.Draft4Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft4Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft6 = DRAFT6.to_unittest_testcase(
|
||||
DRAFT6.cases(),
|
||||
DRAFT6.format_cases(),
|
||||
DRAFT6.optional_cases_of(name="bignum"),
|
||||
DRAFT6.optional_cases_of(name="float-overflow"),
|
||||
DRAFT6.optional_cases_of(name="id"),
|
||||
DRAFT6.optional_cases_of(name="non-bmp-regex"),
|
||||
Validator=jsonschema.Draft6Validator,
|
||||
format_checker=jsonschema.Draft6Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft6Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft7 = DRAFT7.to_unittest_testcase(
|
||||
DRAFT7.cases(),
|
||||
DRAFT7.format_cases(),
|
||||
DRAFT7.optional_cases_of(name="bignum"),
|
||||
DRAFT7.optional_cases_of(name="cross-draft"),
|
||||
DRAFT7.optional_cases_of(name="float-overflow"),
|
||||
DRAFT6.optional_cases_of(name="id"),
|
||||
DRAFT7.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT7.optional_cases_of(name="unknownKeyword"),
|
||||
Validator=jsonschema.Draft7Validator,
|
||||
format_checker=jsonschema.Draft7Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft7Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft201909 = DRAFT201909.to_unittest_testcase(
|
||||
DRAFT201909.cases(),
|
||||
DRAFT201909.optional_cases_of(name="anchor"),
|
||||
DRAFT201909.optional_cases_of(name="bignum"),
|
||||
DRAFT201909.optional_cases_of(name="cross-draft"),
|
||||
DRAFT201909.optional_cases_of(name="float-overflow"),
|
||||
DRAFT201909.optional_cases_of(name="id"),
|
||||
DRAFT201909.optional_cases_of(name="no-schema"),
|
||||
DRAFT201909.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT201909.optional_cases_of(name="refOfUnknownKeyword"),
|
||||
DRAFT201909.optional_cases_of(name="unknownKeyword"),
|
||||
Validator=jsonschema.Draft201909Validator,
|
||||
skip=skip(
|
||||
message="Vocabulary support is still in-progress.",
|
||||
subject="vocabulary",
|
||||
description=(
|
||||
"no validation: invalid number, but it still validates"
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft201909Format = DRAFT201909.to_unittest_testcase(
|
||||
DRAFT201909.format_cases(),
|
||||
name="TestDraft201909Format",
|
||||
Validator=jsonschema.Draft201909Validator,
|
||||
format_checker=jsonschema.Draft201909Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
complex_email_validation(test)
|
||||
or allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft201909Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft202012 = DRAFT202012.to_unittest_testcase(
|
||||
DRAFT202012.cases(),
|
||||
DRAFT201909.optional_cases_of(name="anchor"),
|
||||
DRAFT202012.optional_cases_of(name="bignum"),
|
||||
DRAFT202012.optional_cases_of(name="cross-draft"),
|
||||
DRAFT202012.optional_cases_of(name="float-overflow"),
|
||||
DRAFT202012.optional_cases_of(name="id"),
|
||||
DRAFT202012.optional_cases_of(name="no-schema"),
|
||||
DRAFT202012.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT202012.optional_cases_of(name="refOfUnknownKeyword"),
|
||||
DRAFT202012.optional_cases_of(name="unknownKeyword"),
|
||||
Validator=jsonschema.Draft202012Validator,
|
||||
skip=skip(
|
||||
message="Vocabulary support is still in-progress.",
|
||||
subject="vocabulary",
|
||||
description=(
|
||||
"no validation: invalid number, but it still validates"
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft202012Format = DRAFT202012.to_unittest_testcase(
|
||||
DRAFT202012.format_cases(),
|
||||
name="TestDraft202012Format",
|
||||
Validator=jsonschema.Draft202012Validator,
|
||||
format_checker=jsonschema.Draft202012Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
complex_email_validation(test)
|
||||
or allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft202012Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,221 @@
|
||||
"""
|
||||
Tests for the `TypeChecker`-based type interface.
|
||||
|
||||
The actual correctness of the type checking is handled in
|
||||
`test_jsonschema_test_suite`; these tests check that TypeChecker
|
||||
functions correctly at a more granular level.
|
||||
"""
|
||||
from collections import namedtuple
|
||||
from unittest import TestCase
|
||||
|
||||
from jsonschema import ValidationError, _keywords
|
||||
from jsonschema._types import TypeChecker
|
||||
from jsonschema.exceptions import UndefinedTypeCheck, UnknownType
|
||||
from jsonschema.validators import Draft202012Validator, extend
|
||||
|
||||
|
||||
def equals_2(checker, instance):
|
||||
return instance == 2
|
||||
|
||||
|
||||
def is_namedtuple(instance):
|
||||
return isinstance(instance, tuple) and getattr(instance, "_fields", None)
|
||||
|
||||
|
||||
def is_object_or_named_tuple(checker, instance):
|
||||
if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"):
|
||||
return True
|
||||
return is_namedtuple(instance)
|
||||
|
||||
|
||||
class TestTypeChecker(TestCase):
|
||||
def test_is_type(self):
|
||||
checker = TypeChecker({"two": equals_2})
|
||||
self.assertEqual(
|
||||
(
|
||||
checker.is_type(instance=2, type="two"),
|
||||
checker.is_type(instance="bar", type="two"),
|
||||
),
|
||||
(True, False),
|
||||
)
|
||||
|
||||
def test_is_unknown_type(self):
|
||||
with self.assertRaises(UndefinedTypeCheck) as e:
|
||||
TypeChecker().is_type(4, "foobar")
|
||||
self.assertIn(
|
||||
"'foobar' is unknown to this type checker",
|
||||
str(e.exception),
|
||||
)
|
||||
self.assertTrue(
|
||||
e.exception.__suppress_context__,
|
||||
msg="Expected the internal KeyError to be hidden.",
|
||||
)
|
||||
|
||||
def test_checks_can_be_added_at_init(self):
|
||||
checker = TypeChecker({"two": equals_2})
|
||||
self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
|
||||
|
||||
def test_redefine_existing_type(self):
|
||||
self.assertEqual(
|
||||
TypeChecker().redefine("two", object()).redefine("two", equals_2),
|
||||
TypeChecker().redefine("two", equals_2),
|
||||
)
|
||||
|
||||
def test_remove(self):
|
||||
self.assertEqual(
|
||||
TypeChecker({"two": equals_2}).remove("two"),
|
||||
TypeChecker(),
|
||||
)
|
||||
|
||||
def test_remove_unknown_type(self):
|
||||
with self.assertRaises(UndefinedTypeCheck) as context:
|
||||
TypeChecker().remove("foobar")
|
||||
self.assertIn("foobar", str(context.exception))
|
||||
|
||||
def test_redefine_many(self):
|
||||
self.assertEqual(
|
||||
TypeChecker().redefine_many({"foo": int, "bar": str}),
|
||||
TypeChecker().redefine("foo", int).redefine("bar", str),
|
||||
)
|
||||
|
||||
def test_remove_multiple(self):
|
||||
self.assertEqual(
|
||||
TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
|
||||
TypeChecker(),
|
||||
)
|
||||
|
||||
def test_type_check_can_raise_key_error(self):
|
||||
"""
|
||||
Make sure no one writes:
|
||||
|
||||
try:
|
||||
self._type_checkers[type](...)
|
||||
except KeyError:
|
||||
|
||||
ignoring the fact that the function itself can raise that.
|
||||
"""
|
||||
|
||||
error = KeyError("Stuff")
|
||||
|
||||
def raises_keyerror(checker, instance):
|
||||
raise error
|
||||
|
||||
with self.assertRaises(KeyError) as context:
|
||||
TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
|
||||
|
||||
self.assertIs(context.exception, error)
|
||||
|
||||
def test_repr(self):
|
||||
checker = TypeChecker({"foo": is_namedtuple, "bar": is_namedtuple})
|
||||
self.assertEqual(repr(checker), "<TypeChecker types={'bar', 'foo'}>")
|
||||
|
||||
|
||||
class TestCustomTypes(TestCase):
|
||||
def test_simple_type_can_be_extended(self):
|
||||
def int_or_str_int(checker, instance):
|
||||
if not isinstance(instance, (int, str)):
|
||||
return False
|
||||
try:
|
||||
int(instance)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"integer", int_or_str_int,
|
||||
),
|
||||
)
|
||||
validator = CustomValidator({"type": "integer"})
|
||||
|
||||
validator.validate(4)
|
||||
validator.validate("4")
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate(4.4)
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate("foo")
|
||||
|
||||
def test_object_can_be_extended(self):
|
||||
schema = {"type": "object"}
|
||||
|
||||
Point = namedtuple("Point", ["x", "y"])
|
||||
|
||||
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"object", is_object_or_named_tuple,
|
||||
)
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=type_checker,
|
||||
)
|
||||
validator = CustomValidator(schema)
|
||||
|
||||
validator.validate(Point(x=4, y=5))
|
||||
|
||||
def test_object_extensions_require_custom_validators(self):
|
||||
schema = {"type": "object", "required": ["x"]}
|
||||
|
||||
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"object", is_object_or_named_tuple,
|
||||
)
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=type_checker,
|
||||
)
|
||||
validator = CustomValidator(schema)
|
||||
|
||||
Point = namedtuple("Point", ["x", "y"])
|
||||
# Cannot handle required
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate(Point(x=4, y=5))
|
||||
|
||||
def test_object_extensions_can_handle_custom_validators(self):
|
||||
schema = {
|
||||
"type": "object",
|
||||
"required": ["x"],
|
||||
"properties": {"x": {"type": "integer"}},
|
||||
}
|
||||
|
||||
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"object", is_object_or_named_tuple,
|
||||
)
|
||||
|
||||
def coerce_named_tuple(fn):
|
||||
def coerced(validator, value, instance, schema):
|
||||
if is_namedtuple(instance):
|
||||
instance = instance._asdict()
|
||||
return fn(validator, value, instance, schema)
|
||||
return coerced
|
||||
|
||||
required = coerce_named_tuple(_keywords.required)
|
||||
properties = coerce_named_tuple(_keywords.properties)
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=type_checker,
|
||||
validators={"required": required, "properties": properties},
|
||||
)
|
||||
|
||||
validator = CustomValidator(schema)
|
||||
|
||||
Point = namedtuple("Point", ["x", "y"])
|
||||
# Can now process required and properties
|
||||
validator.validate(Point(x=4, y=5))
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate(Point(x="not an integer", y=5))
|
||||
|
||||
# As well as still handle objects.
|
||||
validator.validate({"x": 4, "y": 5})
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate({"x": "not an integer", "y": 5})
|
||||
|
||||
def test_unknown_type(self):
|
||||
with self.assertRaises(UnknownType) as e:
|
||||
Draft202012Validator({}).is_type(12, "some unknown type")
|
||||
self.assertIn("'some unknown type'", str(e.exception))
|
||||
@@ -0,0 +1,138 @@
|
||||
from math import nan
|
||||
from unittest import TestCase
|
||||
|
||||
from jsonschema._utils import equal
|
||||
|
||||
|
||||
class TestEqual(TestCase):
|
||||
def test_none(self):
|
||||
self.assertTrue(equal(None, None))
|
||||
|
||||
def test_nan(self):
|
||||
self.assertTrue(equal(nan, nan))
|
||||
|
||||
|
||||
class TestDictEqual(TestCase):
|
||||
def test_equal_dictionaries(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "a": "b"}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_equal_dictionaries_with_nan(self):
|
||||
dict_1 = {"a": nan, "c": "d"}
|
||||
dict_2 = {"c": "d", "a": nan}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_missing_key(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "x": "b"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_additional_key(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "a": "b", "x": "x"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_missing_value(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "a": "x"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_empty_dictionaries(self):
|
||||
dict_1 = {}
|
||||
dict_2 = {}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_one_none(self):
|
||||
dict_1 = None
|
||||
dict_2 = {"a": "b", "c": "d"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_same_item(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
self.assertTrue(equal(dict_1, dict_1))
|
||||
|
||||
def test_nested_equal(self):
|
||||
dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"}
|
||||
dict_2 = {"c": "d", "a": {"a": "b", "c": "d"}}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_nested_dict_unequal(self):
|
||||
dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"}
|
||||
dict_2 = {"c": "d", "a": {"a": "b", "c": "x"}}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_mixed_nested_equal(self):
|
||||
dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"}
|
||||
dict_2 = {"c": "d", "a": ["a", "b", "c", "d"]}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_nested_list_unequal(self):
|
||||
dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"}
|
||||
dict_2 = {"c": "d", "a": ["b", "c", "d", "a"]}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
|
||||
class TestListEqual(TestCase):
|
||||
def test_equal_lists(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
list_2 = ["a", "b", "c"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_equal_lists_with_nan(self):
|
||||
list_1 = ["a", nan, "c"]
|
||||
list_2 = ["a", nan, "c"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_unsorted_lists(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
list_2 = ["b", "b", "a"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_first_list_larger(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
list_2 = ["a", "b"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_second_list_larger(self):
|
||||
list_1 = ["a", "b"]
|
||||
list_2 = ["a", "b", "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_list_with_none_unequal(self):
|
||||
list_1 = ["a", "b", None]
|
||||
list_2 = ["a", "b", "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
list_1 = ["a", "b", None]
|
||||
list_2 = [None, "b", "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_list_with_none_equal(self):
|
||||
list_1 = ["a", None, "c"]
|
||||
list_2 = ["a", None, "c"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_empty_list(self):
|
||||
list_1 = []
|
||||
list_2 = []
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_one_none(self):
|
||||
list_1 = None
|
||||
list_2 = []
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_same_list(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
self.assertTrue(equal(list_1, list_1))
|
||||
|
||||
def test_equal_nested_lists(self):
|
||||
list_1 = ["a", ["b", "c"], "d"]
|
||||
list_2 = ["a", ["b", "c"], "d"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_unequal_nested_lists(self):
|
||||
list_1 = ["a", ["b", "c"], "d"]
|
||||
list_2 = ["a", [], "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user