new models, frontend functions, public pages
This commit is contained in:
120
.venv/lib/python3.10/site-packages/jsonschema/__init__.py
Normal file
120
.venv/lib/python3.10/site-packages/jsonschema/__init__.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""
|
||||
An implementation of JSON Schema for Python.
|
||||
|
||||
The main functionality is provided by the validator classes for each of the
|
||||
supported JSON Schema versions.
|
||||
|
||||
Most commonly, `jsonschema.validators.validate` is the quickest way to simply
|
||||
validate a given instance under a schema, and will create a validator
|
||||
for you.
|
||||
"""
|
||||
import warnings
|
||||
|
||||
from jsonschema._format import FormatChecker
|
||||
from jsonschema._types import TypeChecker
|
||||
from jsonschema.exceptions import SchemaError, ValidationError
|
||||
from jsonschema.validators import (
|
||||
Draft3Validator,
|
||||
Draft4Validator,
|
||||
Draft6Validator,
|
||||
Draft7Validator,
|
||||
Draft201909Validator,
|
||||
Draft202012Validator,
|
||||
validate,
|
||||
)
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name == "__version__":
|
||||
warnings.warn(
|
||||
"Accessing jsonschema.__version__ is deprecated and will be "
|
||||
"removed in a future release. Use importlib.metadata directly "
|
||||
"to query for jsonschema's version.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
from importlib import metadata
|
||||
return metadata.version("jsonschema")
|
||||
elif name == "RefResolver":
|
||||
from jsonschema.validators import _RefResolver
|
||||
warnings.warn(
|
||||
_RefResolver._DEPRECATION_MESSAGE,
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return _RefResolver
|
||||
elif name == "ErrorTree":
|
||||
warnings.warn(
|
||||
"Importing ErrorTree directly from the jsonschema package "
|
||||
"is deprecated and will become an ImportError. Import it from "
|
||||
"jsonschema.exceptions instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
from jsonschema.exceptions import ErrorTree
|
||||
return ErrorTree
|
||||
elif name == "FormatError":
|
||||
warnings.warn(
|
||||
"Importing FormatError directly from the jsonschema package "
|
||||
"is deprecated and will become an ImportError. Import it from "
|
||||
"jsonschema.exceptions instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
from jsonschema.exceptions import FormatError
|
||||
return FormatError
|
||||
elif name == "Validator":
|
||||
warnings.warn(
|
||||
"Importing Validator directly from the jsonschema package "
|
||||
"is deprecated and will become an ImportError. Import it from "
|
||||
"jsonschema.protocols instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
from jsonschema.protocols import Validator
|
||||
return Validator
|
||||
elif name == "RefResolutionError":
|
||||
from jsonschema.exceptions import _RefResolutionError
|
||||
warnings.warn(
|
||||
_RefResolutionError._DEPRECATION_MESSAGE,
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return _RefResolutionError
|
||||
|
||||
format_checkers = {
|
||||
"draft3_format_checker": Draft3Validator,
|
||||
"draft4_format_checker": Draft4Validator,
|
||||
"draft6_format_checker": Draft6Validator,
|
||||
"draft7_format_checker": Draft7Validator,
|
||||
"draft201909_format_checker": Draft201909Validator,
|
||||
"draft202012_format_checker": Draft202012Validator,
|
||||
}
|
||||
ValidatorForFormat = format_checkers.get(name)
|
||||
if ValidatorForFormat is not None:
|
||||
warnings.warn(
|
||||
f"Accessing jsonschema.{name} is deprecated and will be "
|
||||
"removed in a future release. Instead, use the FORMAT_CHECKER "
|
||||
"attribute on the corresponding Validator.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return ValidatorForFormat.FORMAT_CHECKER
|
||||
|
||||
raise AttributeError(f"module {__name__} has no attribute {name}")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Draft201909Validator",
|
||||
"Draft202012Validator",
|
||||
"Draft3Validator",
|
||||
"Draft4Validator",
|
||||
"Draft6Validator",
|
||||
"Draft7Validator",
|
||||
"FormatChecker",
|
||||
"SchemaError",
|
||||
"TypeChecker",
|
||||
"ValidationError",
|
||||
"validate",
|
||||
]
|
||||
@@ -0,0 +1,6 @@
|
||||
"""
|
||||
The jsonschema CLI is now deprecated in favor of check-jsonschema.
|
||||
"""
|
||||
from jsonschema.cli import main
|
||||
|
||||
main()
|
||||
519
.venv/lib/python3.10/site-packages/jsonschema/_format.py
Normal file
519
.venv/lib/python3.10/site-packages/jsonschema/_format.py
Normal file
@@ -0,0 +1,519 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from datetime import date, datetime
|
||||
from uuid import UUID
|
||||
import ipaddress
|
||||
import re
|
||||
import typing
|
||||
import warnings
|
||||
|
||||
from jsonschema.exceptions import FormatError
|
||||
|
||||
_FormatCheckCallable = typing.Callable[[object], bool]
|
||||
#: A format checker callable.
|
||||
_F = typing.TypeVar("_F", bound=_FormatCheckCallable)
|
||||
_RaisesType = typing.Union[
|
||||
typing.Type[Exception], typing.Tuple[typing.Type[Exception], ...],
|
||||
]
|
||||
|
||||
_RE_DATE = re.compile(r"^\d{4}-\d{2}-\d{2}$", re.ASCII)
|
||||
|
||||
|
||||
class FormatChecker:
|
||||
"""
|
||||
A ``format`` property checker.
|
||||
|
||||
JSON Schema does not mandate that the ``format`` property actually do any
|
||||
validation. If validation is desired however, instances of this class can
|
||||
be hooked into validators to enable format validation.
|
||||
|
||||
`FormatChecker` objects always return ``True`` when asked about
|
||||
formats that they do not know how to validate.
|
||||
|
||||
To add a check for a custom format use the `FormatChecker.checks`
|
||||
decorator.
|
||||
|
||||
Arguments:
|
||||
|
||||
formats:
|
||||
|
||||
The known formats to validate. This argument can be used to
|
||||
limit which formats will be used during validation.
|
||||
|
||||
"""
|
||||
|
||||
checkers: dict[
|
||||
str,
|
||||
tuple[_FormatCheckCallable, _RaisesType],
|
||||
] = {} # noqa: RUF012
|
||||
|
||||
def __init__(self, formats: typing.Iterable[str] | None = None):
|
||||
if formats is None:
|
||||
formats = self.checkers.keys()
|
||||
self.checkers = {k: self.checkers[k] for k in formats}
|
||||
|
||||
def __repr__(self):
|
||||
return f"<FormatChecker checkers={sorted(self.checkers)}>"
|
||||
|
||||
def checks(
|
||||
self, format: str, raises: _RaisesType = (),
|
||||
) -> typing.Callable[[_F], _F]:
|
||||
"""
|
||||
Register a decorated function as validating a new format.
|
||||
|
||||
Arguments:
|
||||
|
||||
format:
|
||||
|
||||
The format that the decorated function will check.
|
||||
|
||||
raises:
|
||||
|
||||
The exception(s) raised by the decorated function when an
|
||||
invalid instance is found.
|
||||
|
||||
The exception object will be accessible as the
|
||||
`jsonschema.exceptions.ValidationError.cause` attribute of the
|
||||
resulting validation error.
|
||||
|
||||
"""
|
||||
|
||||
def _checks(func: _F) -> _F:
|
||||
self.checkers[format] = (func, raises)
|
||||
return func
|
||||
|
||||
return _checks
|
||||
|
||||
@classmethod
|
||||
def cls_checks(
|
||||
cls, format: str, raises: _RaisesType = (),
|
||||
) -> typing.Callable[[_F], _F]:
|
||||
warnings.warn(
|
||||
(
|
||||
"FormatChecker.cls_checks is deprecated. Call "
|
||||
"FormatChecker.checks on a specific FormatChecker instance "
|
||||
"instead."
|
||||
),
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return cls._cls_checks(format=format, raises=raises)
|
||||
|
||||
@classmethod
|
||||
def _cls_checks(
|
||||
cls, format: str, raises: _RaisesType = (),
|
||||
) -> typing.Callable[[_F], _F]:
|
||||
def _checks(func: _F) -> _F:
|
||||
cls.checkers[format] = (func, raises)
|
||||
return func
|
||||
|
||||
return _checks
|
||||
|
||||
def check(self, instance: object, format: str) -> None:
|
||||
"""
|
||||
Check whether the instance conforms to the given format.
|
||||
|
||||
Arguments:
|
||||
|
||||
instance (*any primitive type*, i.e. str, number, bool):
|
||||
|
||||
The instance to check
|
||||
|
||||
format:
|
||||
|
||||
The format that instance should conform to
|
||||
|
||||
Raises:
|
||||
|
||||
FormatError:
|
||||
|
||||
if the instance does not conform to ``format``
|
||||
|
||||
"""
|
||||
if format not in self.checkers:
|
||||
return
|
||||
|
||||
func, raises = self.checkers[format]
|
||||
result, cause = None, None
|
||||
try:
|
||||
result = func(instance)
|
||||
except raises as e:
|
||||
cause = e
|
||||
if not result:
|
||||
raise FormatError(f"{instance!r} is not a {format!r}", cause=cause)
|
||||
|
||||
def conforms(self, instance: object, format: str) -> bool:
|
||||
"""
|
||||
Check whether the instance conforms to the given format.
|
||||
|
||||
Arguments:
|
||||
|
||||
instance (*any primitive type*, i.e. str, number, bool):
|
||||
|
||||
The instance to check
|
||||
|
||||
format:
|
||||
|
||||
The format that instance should conform to
|
||||
|
||||
Returns:
|
||||
|
||||
bool: whether it conformed
|
||||
|
||||
"""
|
||||
try:
|
||||
self.check(instance, format)
|
||||
except FormatError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
draft3_format_checker = FormatChecker()
|
||||
draft4_format_checker = FormatChecker()
|
||||
draft6_format_checker = FormatChecker()
|
||||
draft7_format_checker = FormatChecker()
|
||||
draft201909_format_checker = FormatChecker()
|
||||
draft202012_format_checker = FormatChecker()
|
||||
|
||||
_draft_checkers: dict[str, FormatChecker] = dict(
|
||||
draft3=draft3_format_checker,
|
||||
draft4=draft4_format_checker,
|
||||
draft6=draft6_format_checker,
|
||||
draft7=draft7_format_checker,
|
||||
draft201909=draft201909_format_checker,
|
||||
draft202012=draft202012_format_checker,
|
||||
)
|
||||
|
||||
|
||||
def _checks_drafts(
|
||||
name=None,
|
||||
draft3=None,
|
||||
draft4=None,
|
||||
draft6=None,
|
||||
draft7=None,
|
||||
draft201909=None,
|
||||
draft202012=None,
|
||||
raises=(),
|
||||
) -> typing.Callable[[_F], _F]:
|
||||
draft3 = draft3 or name
|
||||
draft4 = draft4 or name
|
||||
draft6 = draft6 or name
|
||||
draft7 = draft7 or name
|
||||
draft201909 = draft201909 or name
|
||||
draft202012 = draft202012 or name
|
||||
|
||||
def wrap(func: _F) -> _F:
|
||||
if draft3:
|
||||
func = _draft_checkers["draft3"].checks(draft3, raises)(func)
|
||||
if draft4:
|
||||
func = _draft_checkers["draft4"].checks(draft4, raises)(func)
|
||||
if draft6:
|
||||
func = _draft_checkers["draft6"].checks(draft6, raises)(func)
|
||||
if draft7:
|
||||
func = _draft_checkers["draft7"].checks(draft7, raises)(func)
|
||||
if draft201909:
|
||||
func = _draft_checkers["draft201909"].checks(draft201909, raises)(
|
||||
func,
|
||||
)
|
||||
if draft202012:
|
||||
func = _draft_checkers["draft202012"].checks(draft202012, raises)(
|
||||
func,
|
||||
)
|
||||
|
||||
# Oy. This is bad global state, but relied upon for now, until
|
||||
# deprecation. See #519 and test_format_checkers_come_with_defaults
|
||||
FormatChecker._cls_checks(
|
||||
draft202012 or draft201909 or draft7 or draft6 or draft4 or draft3,
|
||||
raises,
|
||||
)(func)
|
||||
return func
|
||||
|
||||
return wrap
|
||||
|
||||
|
||||
@_checks_drafts(name="idn-email")
|
||||
@_checks_drafts(name="email")
|
||||
def is_email(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return "@" in instance
|
||||
|
||||
|
||||
@_checks_drafts(
|
||||
draft3="ip-address",
|
||||
draft4="ipv4",
|
||||
draft6="ipv4",
|
||||
draft7="ipv4",
|
||||
draft201909="ipv4",
|
||||
draft202012="ipv4",
|
||||
raises=ipaddress.AddressValueError,
|
||||
)
|
||||
def is_ipv4(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return bool(ipaddress.IPv4Address(instance))
|
||||
|
||||
|
||||
@_checks_drafts(name="ipv6", raises=ipaddress.AddressValueError)
|
||||
def is_ipv6(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
address = ipaddress.IPv6Address(instance)
|
||||
return not getattr(address, "scope_id", "")
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
from fqdn import FQDN
|
||||
|
||||
@_checks_drafts(
|
||||
draft3="host-name",
|
||||
draft4="hostname",
|
||||
draft6="hostname",
|
||||
draft7="hostname",
|
||||
draft201909="hostname",
|
||||
draft202012="hostname",
|
||||
)
|
||||
def is_host_name(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return FQDN(instance, min_labels=1).is_valid
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
# The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
|
||||
import idna
|
||||
|
||||
@_checks_drafts(
|
||||
draft7="idn-hostname",
|
||||
draft201909="idn-hostname",
|
||||
draft202012="idn-hostname",
|
||||
raises=(idna.IDNAError, UnicodeError),
|
||||
)
|
||||
def is_idn_host_name(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
idna.encode(instance)
|
||||
return True
|
||||
|
||||
|
||||
try:
|
||||
import rfc3987
|
||||
except ImportError:
|
||||
with suppress(ImportError):
|
||||
from rfc3986_validator import validate_rfc3986
|
||||
|
||||
@_checks_drafts(name="uri")
|
||||
def is_uri(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return validate_rfc3986(instance, rule="URI")
|
||||
|
||||
@_checks_drafts(
|
||||
draft6="uri-reference",
|
||||
draft7="uri-reference",
|
||||
draft201909="uri-reference",
|
||||
draft202012="uri-reference",
|
||||
raises=ValueError,
|
||||
)
|
||||
def is_uri_reference(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return validate_rfc3986(instance, rule="URI_reference")
|
||||
|
||||
else:
|
||||
|
||||
@_checks_drafts(
|
||||
draft7="iri",
|
||||
draft201909="iri",
|
||||
draft202012="iri",
|
||||
raises=ValueError,
|
||||
)
|
||||
def is_iri(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return rfc3987.parse(instance, rule="IRI")
|
||||
|
||||
@_checks_drafts(
|
||||
draft7="iri-reference",
|
||||
draft201909="iri-reference",
|
||||
draft202012="iri-reference",
|
||||
raises=ValueError,
|
||||
)
|
||||
def is_iri_reference(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return rfc3987.parse(instance, rule="IRI_reference")
|
||||
|
||||
@_checks_drafts(name="uri", raises=ValueError)
|
||||
def is_uri(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return rfc3987.parse(instance, rule="URI")
|
||||
|
||||
@_checks_drafts(
|
||||
draft6="uri-reference",
|
||||
draft7="uri-reference",
|
||||
draft201909="uri-reference",
|
||||
draft202012="uri-reference",
|
||||
raises=ValueError,
|
||||
)
|
||||
def is_uri_reference(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return rfc3987.parse(instance, rule="URI_reference")
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
from rfc3339_validator import validate_rfc3339
|
||||
|
||||
@_checks_drafts(name="date-time")
|
||||
def is_datetime(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return validate_rfc3339(instance.upper())
|
||||
|
||||
@_checks_drafts(
|
||||
draft7="time",
|
||||
draft201909="time",
|
||||
draft202012="time",
|
||||
)
|
||||
def is_time(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return is_datetime("1970-01-01T" + instance)
|
||||
|
||||
|
||||
@_checks_drafts(name="regex", raises=re.error)
|
||||
def is_regex(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return bool(re.compile(instance))
|
||||
|
||||
|
||||
@_checks_drafts(
|
||||
draft3="date",
|
||||
draft7="date",
|
||||
draft201909="date",
|
||||
draft202012="date",
|
||||
raises=ValueError,
|
||||
)
|
||||
def is_date(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return bool(_RE_DATE.fullmatch(instance) and date.fromisoformat(instance))
|
||||
|
||||
|
||||
@_checks_drafts(draft3="time", raises=ValueError)
|
||||
def is_draft3_time(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return bool(datetime.strptime(instance, "%H:%M:%S")) # noqa: DTZ007
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import webcolors
|
||||
|
||||
@_checks_drafts(draft3="color", raises=(ValueError, TypeError))
|
||||
def is_css21_color(instance: object) -> bool:
|
||||
if isinstance(instance, str):
|
||||
try:
|
||||
webcolors.name_to_hex(instance)
|
||||
except ValueError:
|
||||
webcolors.normalize_hex(instance.lower())
|
||||
return True
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import jsonpointer
|
||||
|
||||
@_checks_drafts(
|
||||
draft6="json-pointer",
|
||||
draft7="json-pointer",
|
||||
draft201909="json-pointer",
|
||||
draft202012="json-pointer",
|
||||
raises=jsonpointer.JsonPointerException,
|
||||
)
|
||||
def is_json_pointer(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return bool(jsonpointer.JsonPointer(instance))
|
||||
|
||||
# TODO: I don't want to maintain this, so it
|
||||
# needs to go either into jsonpointer (pending
|
||||
# https://github.com/stefankoegl/python-json-pointer/issues/34) or
|
||||
# into a new external library.
|
||||
@_checks_drafts(
|
||||
draft7="relative-json-pointer",
|
||||
draft201909="relative-json-pointer",
|
||||
draft202012="relative-json-pointer",
|
||||
raises=jsonpointer.JsonPointerException,
|
||||
)
|
||||
def is_relative_json_pointer(instance: object) -> bool:
|
||||
# Definition taken from:
|
||||
# https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
if not instance:
|
||||
return False
|
||||
|
||||
non_negative_integer, rest = [], ""
|
||||
for i, character in enumerate(instance):
|
||||
if character.isdigit():
|
||||
# digits with a leading "0" are not allowed
|
||||
if i > 0 and int(instance[i - 1]) == 0:
|
||||
return False
|
||||
|
||||
non_negative_integer.append(character)
|
||||
continue
|
||||
|
||||
if not non_negative_integer:
|
||||
return False
|
||||
|
||||
rest = instance[i:]
|
||||
break
|
||||
return (rest == "#") or bool(jsonpointer.JsonPointer(rest))
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import uri_template
|
||||
|
||||
@_checks_drafts(
|
||||
draft6="uri-template",
|
||||
draft7="uri-template",
|
||||
draft201909="uri-template",
|
||||
draft202012="uri-template",
|
||||
)
|
||||
def is_uri_template(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
return uri_template.validate(instance)
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import isoduration
|
||||
|
||||
@_checks_drafts(
|
||||
draft201909="duration",
|
||||
draft202012="duration",
|
||||
raises=isoduration.DurationParsingException,
|
||||
)
|
||||
def is_duration(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
isoduration.parse_duration(instance)
|
||||
# FIXME: See bolsote/isoduration#25 and bolsote/isoduration#21
|
||||
return instance.endswith(tuple("DMYWHMS"))
|
||||
|
||||
|
||||
@_checks_drafts(
|
||||
draft201909="uuid",
|
||||
draft202012="uuid",
|
||||
raises=ValueError,
|
||||
)
|
||||
def is_uuid(instance: object) -> bool:
|
||||
if not isinstance(instance, str):
|
||||
return True
|
||||
UUID(instance)
|
||||
return all(instance[position] == "-" for position in (8, 13, 18, 23))
|
||||
449
.venv/lib/python3.10/site-packages/jsonschema/_keywords.py
Normal file
449
.venv/lib/python3.10/site-packages/jsonschema/_keywords.py
Normal file
@@ -0,0 +1,449 @@
|
||||
from fractions import Fraction
|
||||
import re
|
||||
|
||||
from jsonschema._utils import (
|
||||
ensure_list,
|
||||
equal,
|
||||
extras_msg,
|
||||
find_additional_properties,
|
||||
find_evaluated_item_indexes_by_schema,
|
||||
find_evaluated_property_keys_by_schema,
|
||||
uniq,
|
||||
)
|
||||
from jsonschema.exceptions import FormatError, ValidationError
|
||||
|
||||
|
||||
def patternProperties(validator, patternProperties, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for pattern, subschema in patternProperties.items():
|
||||
for k, v in instance.items():
|
||||
if re.search(pattern, k):
|
||||
yield from validator.descend(
|
||||
v, subschema, path=k, schema_path=pattern,
|
||||
)
|
||||
|
||||
|
||||
def propertyNames(validator, propertyNames, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for property in instance:
|
||||
yield from validator.descend(instance=property, schema=propertyNames)
|
||||
|
||||
|
||||
def additionalProperties(validator, aP, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
extras = set(find_additional_properties(instance, schema))
|
||||
|
||||
if validator.is_type(aP, "object"):
|
||||
for extra in extras:
|
||||
yield from validator.descend(instance[extra], aP, path=extra)
|
||||
elif not aP and extras:
|
||||
if "patternProperties" in schema:
|
||||
verb = "does" if len(extras) == 1 else "do"
|
||||
joined = ", ".join(repr(each) for each in sorted(extras))
|
||||
patterns = ", ".join(
|
||||
repr(each) for each in sorted(schema["patternProperties"])
|
||||
)
|
||||
error = f"{joined} {verb} not match any of the regexes: {patterns}"
|
||||
yield ValidationError(error)
|
||||
else:
|
||||
error = "Additional properties are not allowed (%s %s unexpected)"
|
||||
yield ValidationError(error % extras_msg(sorted(extras, key=str)))
|
||||
|
||||
|
||||
def items(validator, items, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
|
||||
prefix = len(schema.get("prefixItems", []))
|
||||
total = len(instance)
|
||||
extra = total - prefix
|
||||
if extra <= 0:
|
||||
return
|
||||
|
||||
if items is False:
|
||||
rest = instance[prefix:] if extra != 1 else instance[prefix]
|
||||
item = "items" if prefix != 1 else "item"
|
||||
yield ValidationError(
|
||||
f"Expected at most {prefix} {item} but found {extra} "
|
||||
f"extra: {rest!r}",
|
||||
)
|
||||
else:
|
||||
for index in range(prefix, total):
|
||||
yield from validator.descend(
|
||||
instance=instance[index],
|
||||
schema=items,
|
||||
path=index,
|
||||
)
|
||||
|
||||
|
||||
def const(validator, const, instance, schema):
|
||||
if not equal(instance, const):
|
||||
yield ValidationError(f"{const!r} was expected")
|
||||
|
||||
|
||||
def contains(validator, contains, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
|
||||
matches = 0
|
||||
min_contains = schema.get("minContains", 1)
|
||||
max_contains = schema.get("maxContains", len(instance))
|
||||
|
||||
contains_validator = validator.evolve(schema=contains)
|
||||
|
||||
for each in instance:
|
||||
if contains_validator.is_valid(each):
|
||||
matches += 1
|
||||
if matches > max_contains:
|
||||
yield ValidationError(
|
||||
"Too many items match the given schema "
|
||||
f"(expected at most {max_contains})",
|
||||
validator="maxContains",
|
||||
validator_value=max_contains,
|
||||
)
|
||||
return
|
||||
|
||||
if matches < min_contains:
|
||||
if not matches:
|
||||
yield ValidationError(
|
||||
f"{instance!r} does not contain items "
|
||||
"matching the given schema",
|
||||
)
|
||||
else:
|
||||
yield ValidationError(
|
||||
"Too few items match the given schema (expected at least "
|
||||
f"{min_contains} but only {matches} matched)",
|
||||
validator="minContains",
|
||||
validator_value=min_contains,
|
||||
)
|
||||
|
||||
|
||||
def exclusiveMinimum(validator, minimum, instance, schema):
|
||||
if not validator.is_type(instance, "number"):
|
||||
return
|
||||
|
||||
if instance <= minimum:
|
||||
yield ValidationError(
|
||||
f"{instance!r} is less than or equal to "
|
||||
f"the minimum of {minimum!r}",
|
||||
)
|
||||
|
||||
|
||||
def exclusiveMaximum(validator, maximum, instance, schema):
|
||||
if not validator.is_type(instance, "number"):
|
||||
return
|
||||
|
||||
if instance >= maximum:
|
||||
yield ValidationError(
|
||||
f"{instance!r} is greater than or equal "
|
||||
f"to the maximum of {maximum!r}",
|
||||
)
|
||||
|
||||
|
||||
def minimum(validator, minimum, instance, schema):
|
||||
if not validator.is_type(instance, "number"):
|
||||
return
|
||||
|
||||
if instance < minimum:
|
||||
message = f"{instance!r} is less than the minimum of {minimum!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def maximum(validator, maximum, instance, schema):
|
||||
if not validator.is_type(instance, "number"):
|
||||
return
|
||||
|
||||
if instance > maximum:
|
||||
message = f"{instance!r} is greater than the maximum of {maximum!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def multipleOf(validator, dB, instance, schema):
|
||||
if not validator.is_type(instance, "number"):
|
||||
return
|
||||
|
||||
if isinstance(dB, float):
|
||||
quotient = instance / dB
|
||||
try:
|
||||
failed = int(quotient) != quotient
|
||||
except OverflowError:
|
||||
# When `instance` is large and `dB` is less than one,
|
||||
# quotient can overflow to infinity; and then casting to int
|
||||
# raises an error.
|
||||
#
|
||||
# In this case we fall back to Fraction logic, which is
|
||||
# exact and cannot overflow. The performance is also
|
||||
# acceptable: we try the fast all-float option first, and
|
||||
# we know that fraction(dB) can have at most a few hundred
|
||||
# digits in each part. The worst-case slowdown is therefore
|
||||
# for already-slow enormous integers or Decimals.
|
||||
failed = (Fraction(instance) / Fraction(dB)).denominator != 1
|
||||
else:
|
||||
failed = instance % dB
|
||||
|
||||
if failed:
|
||||
yield ValidationError(f"{instance!r} is not a multiple of {dB}")
|
||||
|
||||
|
||||
def minItems(validator, mI, instance, schema):
|
||||
if validator.is_type(instance, "array") and len(instance) < mI:
|
||||
message = "should be non-empty" if mI == 1 else "is too short"
|
||||
yield ValidationError(f"{instance!r} {message}")
|
||||
|
||||
|
||||
def maxItems(validator, mI, instance, schema):
|
||||
if validator.is_type(instance, "array") and len(instance) > mI:
|
||||
message = "is expected to be empty" if mI == 0 else "is too long"
|
||||
yield ValidationError(f"{instance!r} {message}")
|
||||
|
||||
|
||||
def uniqueItems(validator, uI, instance, schema):
|
||||
if (
|
||||
uI
|
||||
and validator.is_type(instance, "array")
|
||||
and not uniq(instance)
|
||||
):
|
||||
yield ValidationError(f"{instance!r} has non-unique elements")
|
||||
|
||||
|
||||
def pattern(validator, patrn, instance, schema):
|
||||
if (
|
||||
validator.is_type(instance, "string")
|
||||
and not re.search(patrn, instance)
|
||||
):
|
||||
yield ValidationError(f"{instance!r} does not match {patrn!r}")
|
||||
|
||||
|
||||
def format(validator, format, instance, schema):
|
||||
if validator.format_checker is not None:
|
||||
try:
|
||||
validator.format_checker.check(instance, format)
|
||||
except FormatError as error:
|
||||
yield ValidationError(error.message, cause=error.cause)
|
||||
|
||||
|
||||
def minLength(validator, mL, instance, schema):
|
||||
if validator.is_type(instance, "string") and len(instance) < mL:
|
||||
message = "should be non-empty" if mL == 1 else "is too short"
|
||||
yield ValidationError(f"{instance!r} {message}")
|
||||
|
||||
|
||||
def maxLength(validator, mL, instance, schema):
|
||||
if validator.is_type(instance, "string") and len(instance) > mL:
|
||||
message = "is expected to be empty" if mL == 0 else "is too long"
|
||||
yield ValidationError(f"{instance!r} {message}")
|
||||
|
||||
|
||||
def dependentRequired(validator, dependentRequired, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for property, dependency in dependentRequired.items():
|
||||
if property not in instance:
|
||||
continue
|
||||
|
||||
for each in dependency:
|
||||
if each not in instance:
|
||||
message = f"{each!r} is a dependency of {property!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def dependentSchemas(validator, dependentSchemas, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for property, dependency in dependentSchemas.items():
|
||||
if property not in instance:
|
||||
continue
|
||||
yield from validator.descend(
|
||||
instance, dependency, schema_path=property,
|
||||
)
|
||||
|
||||
|
||||
def enum(validator, enums, instance, schema):
|
||||
if all(not equal(each, instance) for each in enums):
|
||||
yield ValidationError(f"{instance!r} is not one of {enums!r}")
|
||||
|
||||
|
||||
def ref(validator, ref, instance, schema):
|
||||
yield from validator._validate_reference(ref=ref, instance=instance)
|
||||
|
||||
|
||||
def dynamicRef(validator, dynamicRef, instance, schema):
|
||||
yield from validator._validate_reference(ref=dynamicRef, instance=instance)
|
||||
|
||||
|
||||
def type(validator, types, instance, schema):
|
||||
types = ensure_list(types)
|
||||
|
||||
if not any(validator.is_type(instance, type) for type in types):
|
||||
reprs = ", ".join(repr(type) for type in types)
|
||||
yield ValidationError(f"{instance!r} is not of type {reprs}")
|
||||
|
||||
|
||||
def properties(validator, properties, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for property, subschema in properties.items():
|
||||
if property in instance:
|
||||
yield from validator.descend(
|
||||
instance[property],
|
||||
subschema,
|
||||
path=property,
|
||||
schema_path=property,
|
||||
)
|
||||
|
||||
|
||||
def required(validator, required, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
for property in required:
|
||||
if property not in instance:
|
||||
yield ValidationError(f"{property!r} is a required property")
|
||||
|
||||
|
||||
def minProperties(validator, mP, instance, schema):
|
||||
if validator.is_type(instance, "object") and len(instance) < mP:
|
||||
message = (
|
||||
"should be non-empty" if mP == 1
|
||||
else "does not have enough properties"
|
||||
)
|
||||
yield ValidationError(f"{instance!r} {message}")
|
||||
|
||||
|
||||
def maxProperties(validator, mP, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
if validator.is_type(instance, "object") and len(instance) > mP:
|
||||
message = (
|
||||
"is expected to be empty" if mP == 0
|
||||
else "has too many properties"
|
||||
)
|
||||
yield ValidationError(f"{instance!r} {message}")
|
||||
|
||||
|
||||
def allOf(validator, allOf, instance, schema):
|
||||
for index, subschema in enumerate(allOf):
|
||||
yield from validator.descend(instance, subschema, schema_path=index)
|
||||
|
||||
|
||||
def anyOf(validator, anyOf, instance, schema):
|
||||
all_errors = []
|
||||
for index, subschema in enumerate(anyOf):
|
||||
errs = list(validator.descend(instance, subschema, schema_path=index))
|
||||
if not errs:
|
||||
break
|
||||
all_errors.extend(errs)
|
||||
else:
|
||||
yield ValidationError(
|
||||
f"{instance!r} is not valid under any of the given schemas",
|
||||
context=all_errors,
|
||||
)
|
||||
|
||||
|
||||
def oneOf(validator, oneOf, instance, schema):
|
||||
subschemas = enumerate(oneOf)
|
||||
all_errors = []
|
||||
for index, subschema in subschemas:
|
||||
errs = list(validator.descend(instance, subschema, schema_path=index))
|
||||
if not errs:
|
||||
first_valid = subschema
|
||||
break
|
||||
all_errors.extend(errs)
|
||||
else:
|
||||
yield ValidationError(
|
||||
f"{instance!r} is not valid under any of the given schemas",
|
||||
context=all_errors,
|
||||
)
|
||||
|
||||
more_valid = [
|
||||
each for _, each in subschemas
|
||||
if validator.evolve(schema=each).is_valid(instance)
|
||||
]
|
||||
if more_valid:
|
||||
more_valid.append(first_valid)
|
||||
reprs = ", ".join(repr(schema) for schema in more_valid)
|
||||
yield ValidationError(f"{instance!r} is valid under each of {reprs}")
|
||||
|
||||
|
||||
def not_(validator, not_schema, instance, schema):
|
||||
if validator.evolve(schema=not_schema).is_valid(instance):
|
||||
message = f"{instance!r} should not be valid under {not_schema!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def if_(validator, if_schema, instance, schema):
|
||||
if validator.evolve(schema=if_schema).is_valid(instance):
|
||||
if "then" in schema:
|
||||
then = schema["then"]
|
||||
yield from validator.descend(instance, then, schema_path="then")
|
||||
elif "else" in schema:
|
||||
else_ = schema["else"]
|
||||
yield from validator.descend(instance, else_, schema_path="else")
|
||||
|
||||
|
||||
def unevaluatedItems(validator, unevaluatedItems, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
evaluated_item_indexes = find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema,
|
||||
)
|
||||
unevaluated_items = [
|
||||
item for index, item in enumerate(instance)
|
||||
if index not in evaluated_item_indexes
|
||||
]
|
||||
if unevaluated_items:
|
||||
error = "Unevaluated items are not allowed (%s %s unexpected)"
|
||||
yield ValidationError(error % extras_msg(unevaluated_items))
|
||||
|
||||
|
||||
def unevaluatedProperties(validator, unevaluatedProperties, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
evaluated_keys = find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema,
|
||||
)
|
||||
unevaluated_keys = []
|
||||
for property in instance:
|
||||
if property not in evaluated_keys:
|
||||
for _ in validator.descend(
|
||||
instance[property],
|
||||
unevaluatedProperties,
|
||||
path=property,
|
||||
schema_path=property,
|
||||
):
|
||||
# FIXME: Include context for each unevaluated property
|
||||
# indicating why it's invalid under the subschema.
|
||||
unevaluated_keys.append(property) # noqa: PERF401
|
||||
|
||||
if unevaluated_keys:
|
||||
if unevaluatedProperties is False:
|
||||
error = "Unevaluated properties are not allowed (%s %s unexpected)"
|
||||
extras = sorted(unevaluated_keys, key=str)
|
||||
yield ValidationError(error % extras_msg(extras))
|
||||
else:
|
||||
error = (
|
||||
"Unevaluated properties are not valid under "
|
||||
"the given schema (%s %s unevaluated and invalid)"
|
||||
)
|
||||
yield ValidationError(error % extras_msg(unevaluated_keys))
|
||||
|
||||
|
||||
def prefixItems(validator, prefixItems, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
|
||||
for (index, item), subschema in zip(enumerate(instance), prefixItems):
|
||||
yield from validator.descend(
|
||||
instance=item,
|
||||
schema=subschema,
|
||||
schema_path=index,
|
||||
path=index,
|
||||
)
|
||||
@@ -0,0 +1,449 @@
|
||||
import re
|
||||
|
||||
from referencing.jsonschema import lookup_recursive_ref
|
||||
|
||||
from jsonschema import _utils
|
||||
from jsonschema.exceptions import ValidationError
|
||||
|
||||
|
||||
def ignore_ref_siblings(schema):
|
||||
"""
|
||||
Ignore siblings of ``$ref`` if it is present.
|
||||
|
||||
Otherwise, return all keywords.
|
||||
|
||||
Suitable for use with `create`'s ``applicable_validators`` argument.
|
||||
"""
|
||||
ref = schema.get("$ref")
|
||||
if ref is not None:
|
||||
return [("$ref", ref)]
|
||||
else:
|
||||
return schema.items()
|
||||
|
||||
|
||||
def dependencies_draft3(validator, dependencies, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for property, dependency in dependencies.items():
|
||||
if property not in instance:
|
||||
continue
|
||||
|
||||
if validator.is_type(dependency, "object"):
|
||||
yield from validator.descend(
|
||||
instance, dependency, schema_path=property,
|
||||
)
|
||||
elif validator.is_type(dependency, "string"):
|
||||
if dependency not in instance:
|
||||
message = f"{dependency!r} is a dependency of {property!r}"
|
||||
yield ValidationError(message)
|
||||
else:
|
||||
for each in dependency:
|
||||
if each not in instance:
|
||||
message = f"{each!r} is a dependency of {property!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def dependencies_draft4_draft6_draft7(
|
||||
validator,
|
||||
dependencies,
|
||||
instance,
|
||||
schema,
|
||||
):
|
||||
"""
|
||||
Support for the ``dependencies`` keyword from pre-draft 2019-09.
|
||||
|
||||
In later drafts, the keyword was split into separate
|
||||
``dependentRequired`` and ``dependentSchemas`` validators.
|
||||
"""
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for property, dependency in dependencies.items():
|
||||
if property not in instance:
|
||||
continue
|
||||
|
||||
if validator.is_type(dependency, "array"):
|
||||
for each in dependency:
|
||||
if each not in instance:
|
||||
message = f"{each!r} is a dependency of {property!r}"
|
||||
yield ValidationError(message)
|
||||
else:
|
||||
yield from validator.descend(
|
||||
instance, dependency, schema_path=property,
|
||||
)
|
||||
|
||||
|
||||
def disallow_draft3(validator, disallow, instance, schema):
|
||||
for disallowed in _utils.ensure_list(disallow):
|
||||
if validator.evolve(schema={"type": [disallowed]}).is_valid(instance):
|
||||
message = f"{disallowed!r} is disallowed for {instance!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def extends_draft3(validator, extends, instance, schema):
|
||||
if validator.is_type(extends, "object"):
|
||||
yield from validator.descend(instance, extends)
|
||||
return
|
||||
for index, subschema in enumerate(extends):
|
||||
yield from validator.descend(instance, subschema, schema_path=index)
|
||||
|
||||
|
||||
def items_draft3_draft4(validator, items, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
|
||||
if validator.is_type(items, "object"):
|
||||
for index, item in enumerate(instance):
|
||||
yield from validator.descend(item, items, path=index)
|
||||
else:
|
||||
for (index, item), subschema in zip(enumerate(instance), items):
|
||||
yield from validator.descend(
|
||||
item, subschema, path=index, schema_path=index,
|
||||
)
|
||||
|
||||
|
||||
def additionalItems(validator, aI, instance, schema):
|
||||
if (
|
||||
not validator.is_type(instance, "array")
|
||||
or validator.is_type(schema.get("items", {}), "object")
|
||||
):
|
||||
return
|
||||
|
||||
len_items = len(schema.get("items", []))
|
||||
if validator.is_type(aI, "object"):
|
||||
for index, item in enumerate(instance[len_items:], start=len_items):
|
||||
yield from validator.descend(item, aI, path=index)
|
||||
elif not aI and len(instance) > len(schema.get("items", [])):
|
||||
error = "Additional items are not allowed (%s %s unexpected)"
|
||||
yield ValidationError(
|
||||
error % _utils.extras_msg(instance[len(schema.get("items", [])):]),
|
||||
)
|
||||
|
||||
|
||||
def items_draft6_draft7_draft201909(validator, items, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
|
||||
if validator.is_type(items, "array"):
|
||||
for (index, item), subschema in zip(enumerate(instance), items):
|
||||
yield from validator.descend(
|
||||
item, subschema, path=index, schema_path=index,
|
||||
)
|
||||
else:
|
||||
for index, item in enumerate(instance):
|
||||
yield from validator.descend(item, items, path=index)
|
||||
|
||||
|
||||
def minimum_draft3_draft4(validator, minimum, instance, schema):
|
||||
if not validator.is_type(instance, "number"):
|
||||
return
|
||||
|
||||
if schema.get("exclusiveMinimum", False):
|
||||
failed = instance <= minimum
|
||||
cmp = "less than or equal to"
|
||||
else:
|
||||
failed = instance < minimum
|
||||
cmp = "less than"
|
||||
|
||||
if failed:
|
||||
message = f"{instance!r} is {cmp} the minimum of {minimum!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def maximum_draft3_draft4(validator, maximum, instance, schema):
|
||||
if not validator.is_type(instance, "number"):
|
||||
return
|
||||
|
||||
if schema.get("exclusiveMaximum", False):
|
||||
failed = instance >= maximum
|
||||
cmp = "greater than or equal to"
|
||||
else:
|
||||
failed = instance > maximum
|
||||
cmp = "greater than"
|
||||
|
||||
if failed:
|
||||
message = f"{instance!r} is {cmp} the maximum of {maximum!r}"
|
||||
yield ValidationError(message)
|
||||
|
||||
|
||||
def properties_draft3(validator, properties, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
|
||||
for property, subschema in properties.items():
|
||||
if property in instance:
|
||||
yield from validator.descend(
|
||||
instance[property],
|
||||
subschema,
|
||||
path=property,
|
||||
schema_path=property,
|
||||
)
|
||||
elif subschema.get("required", False):
|
||||
error = ValidationError(f"{property!r} is a required property")
|
||||
error._set(
|
||||
validator="required",
|
||||
validator_value=subschema["required"],
|
||||
instance=instance,
|
||||
schema=schema,
|
||||
)
|
||||
error.path.appendleft(property)
|
||||
error.schema_path.extend([property, "required"])
|
||||
yield error
|
||||
|
||||
|
||||
def type_draft3(validator, types, instance, schema):
|
||||
types = _utils.ensure_list(types)
|
||||
|
||||
all_errors = []
|
||||
for index, type in enumerate(types):
|
||||
if validator.is_type(type, "object"):
|
||||
errors = list(validator.descend(instance, type, schema_path=index))
|
||||
if not errors:
|
||||
return
|
||||
all_errors.extend(errors)
|
||||
elif validator.is_type(instance, type):
|
||||
return
|
||||
|
||||
reprs = []
|
||||
for type in types:
|
||||
try:
|
||||
reprs.append(repr(type["name"]))
|
||||
except Exception: # noqa: BLE001
|
||||
reprs.append(repr(type))
|
||||
yield ValidationError(
|
||||
f"{instance!r} is not of type {', '.join(reprs)}",
|
||||
context=all_errors,
|
||||
)
|
||||
|
||||
|
||||
def contains_draft6_draft7(validator, contains, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
|
||||
if not any(
|
||||
validator.evolve(schema=contains).is_valid(element)
|
||||
for element in instance
|
||||
):
|
||||
yield ValidationError(
|
||||
f"None of {instance!r} are valid under the given schema",
|
||||
)
|
||||
|
||||
|
||||
def recursiveRef(validator, recursiveRef, instance, schema):
|
||||
resolved = lookup_recursive_ref(validator._resolver)
|
||||
yield from validator.descend(
|
||||
instance,
|
||||
resolved.contents,
|
||||
resolver=resolved.resolver,
|
||||
)
|
||||
|
||||
|
||||
def find_evaluated_item_indexes_by_schema(validator, instance, schema):
|
||||
"""
|
||||
Get all indexes of items that get evaluated under the current schema.
|
||||
|
||||
Covers all keywords related to unevaluatedItems: items, prefixItems, if,
|
||||
then, else, contains, unevaluatedItems, allOf, oneOf, anyOf
|
||||
"""
|
||||
if validator.is_type(schema, "boolean"):
|
||||
return []
|
||||
evaluated_indexes = []
|
||||
|
||||
ref = schema.get("$ref")
|
||||
if ref is not None:
|
||||
resolved = validator._resolver.lookup(ref)
|
||||
evaluated_indexes.extend(
|
||||
find_evaluated_item_indexes_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
if "$recursiveRef" in schema:
|
||||
resolved = lookup_recursive_ref(validator._resolver)
|
||||
evaluated_indexes.extend(
|
||||
find_evaluated_item_indexes_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
if "items" in schema:
|
||||
if "additionalItems" in schema:
|
||||
return list(range(len(instance)))
|
||||
|
||||
if validator.is_type(schema["items"], "object"):
|
||||
return list(range(len(instance)))
|
||||
evaluated_indexes += list(range(len(schema["items"])))
|
||||
|
||||
if "if" in schema:
|
||||
if validator.evolve(schema=schema["if"]).is_valid(instance):
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema["if"],
|
||||
)
|
||||
if "then" in schema:
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema["then"],
|
||||
)
|
||||
elif "else" in schema:
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema["else"],
|
||||
)
|
||||
|
||||
for keyword in ["contains", "unevaluatedItems"]:
|
||||
if keyword in schema:
|
||||
for k, v in enumerate(instance):
|
||||
if validator.evolve(schema=schema[keyword]).is_valid(v):
|
||||
evaluated_indexes.append(k)
|
||||
|
||||
for keyword in ["allOf", "oneOf", "anyOf"]:
|
||||
if keyword in schema:
|
||||
for subschema in schema[keyword]:
|
||||
errs = next(validator.descend(instance, subschema), None)
|
||||
if errs is None:
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, subschema,
|
||||
)
|
||||
|
||||
return evaluated_indexes
|
||||
|
||||
|
||||
def unevaluatedItems_draft2019(validator, unevaluatedItems, instance, schema):
|
||||
if not validator.is_type(instance, "array"):
|
||||
return
|
||||
evaluated_item_indexes = find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema,
|
||||
)
|
||||
unevaluated_items = [
|
||||
item for index, item in enumerate(instance)
|
||||
if index not in evaluated_item_indexes
|
||||
]
|
||||
if unevaluated_items:
|
||||
error = "Unevaluated items are not allowed (%s %s unexpected)"
|
||||
yield ValidationError(error % _utils.extras_msg(unevaluated_items))
|
||||
|
||||
|
||||
def find_evaluated_property_keys_by_schema(validator, instance, schema):
|
||||
if validator.is_type(schema, "boolean"):
|
||||
return []
|
||||
evaluated_keys = []
|
||||
|
||||
ref = schema.get("$ref")
|
||||
if ref is not None:
|
||||
resolved = validator._resolver.lookup(ref)
|
||||
evaluated_keys.extend(
|
||||
find_evaluated_property_keys_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
if "$recursiveRef" in schema:
|
||||
resolved = lookup_recursive_ref(validator._resolver)
|
||||
evaluated_keys.extend(
|
||||
find_evaluated_property_keys_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
for keyword in [
|
||||
"properties", "additionalProperties", "unevaluatedProperties",
|
||||
]:
|
||||
if keyword in schema:
|
||||
schema_value = schema[keyword]
|
||||
if validator.is_type(schema_value, "boolean") and schema_value:
|
||||
evaluated_keys += instance.keys()
|
||||
|
||||
elif validator.is_type(schema_value, "object"):
|
||||
for property in schema_value:
|
||||
if property in instance:
|
||||
evaluated_keys.append(property)
|
||||
|
||||
if "patternProperties" in schema:
|
||||
for property in instance:
|
||||
for pattern in schema["patternProperties"]:
|
||||
if re.search(pattern, property):
|
||||
evaluated_keys.append(property)
|
||||
|
||||
if "dependentSchemas" in schema:
|
||||
for property, subschema in schema["dependentSchemas"].items():
|
||||
if property not in instance:
|
||||
continue
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, subschema,
|
||||
)
|
||||
|
||||
for keyword in ["allOf", "oneOf", "anyOf"]:
|
||||
if keyword in schema:
|
||||
for subschema in schema[keyword]:
|
||||
errs = next(validator.descend(instance, subschema), None)
|
||||
if errs is None:
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, subschema,
|
||||
)
|
||||
|
||||
if "if" in schema:
|
||||
if validator.evolve(schema=schema["if"]).is_valid(instance):
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema["if"],
|
||||
)
|
||||
if "then" in schema:
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema["then"],
|
||||
)
|
||||
elif "else" in schema:
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema["else"],
|
||||
)
|
||||
|
||||
return evaluated_keys
|
||||
|
||||
|
||||
def unevaluatedProperties_draft2019(validator, uP, instance, schema):
|
||||
if not validator.is_type(instance, "object"):
|
||||
return
|
||||
evaluated_keys = find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema,
|
||||
)
|
||||
unevaluated_keys = []
|
||||
for property in instance:
|
||||
if property not in evaluated_keys:
|
||||
for _ in validator.descend(
|
||||
instance[property],
|
||||
uP,
|
||||
path=property,
|
||||
schema_path=property,
|
||||
):
|
||||
# FIXME: Include context for each unevaluated property
|
||||
# indicating why it's invalid under the subschema.
|
||||
unevaluated_keys.append(property) # noqa: PERF401
|
||||
|
||||
if unevaluated_keys:
|
||||
if uP is False:
|
||||
error = "Unevaluated properties are not allowed (%s %s unexpected)"
|
||||
extras = sorted(unevaluated_keys, key=str)
|
||||
yield ValidationError(error % _utils.extras_msg(extras))
|
||||
else:
|
||||
error = (
|
||||
"Unevaluated properties are not valid under "
|
||||
"the given schema (%s %s unevaluated and invalid)"
|
||||
)
|
||||
yield ValidationError(error % _utils.extras_msg(unevaluated_keys))
|
||||
200
.venv/lib/python3.10/site-packages/jsonschema/_types.py
Normal file
200
.venv/lib/python3.10/site-packages/jsonschema/_types.py
Normal file
@@ -0,0 +1,200 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Callable, Mapping
|
||||
import numbers
|
||||
|
||||
from attrs import evolve, field, frozen
|
||||
from rpds import HashTrieMap
|
||||
|
||||
from jsonschema.exceptions import UndefinedTypeCheck
|
||||
|
||||
|
||||
# unfortunately, the type of HashTrieMap is generic, and if used as an attrs
|
||||
# converter, the generic type is presented to mypy, which then fails to match
|
||||
# the concrete type of a type checker mapping
|
||||
# this "do nothing" wrapper presents the correct information to mypy
|
||||
def _typed_map_converter(
|
||||
init_val: Mapping[str, Callable[[TypeChecker, Any], bool]],
|
||||
) -> HashTrieMap[str, Callable[[TypeChecker, Any], bool]]:
|
||||
return HashTrieMap.convert(init_val)
|
||||
|
||||
|
||||
def is_array(checker, instance):
|
||||
return isinstance(instance, list)
|
||||
|
||||
|
||||
def is_bool(checker, instance):
|
||||
return isinstance(instance, bool)
|
||||
|
||||
|
||||
def is_integer(checker, instance):
|
||||
# bool inherits from int, so ensure bools aren't reported as ints
|
||||
if isinstance(instance, bool):
|
||||
return False
|
||||
return isinstance(instance, int)
|
||||
|
||||
|
||||
def is_null(checker, instance):
|
||||
return instance is None
|
||||
|
||||
|
||||
def is_number(checker, instance):
|
||||
# bool inherits from int, so ensure bools aren't reported as ints
|
||||
if isinstance(instance, bool):
|
||||
return False
|
||||
return isinstance(instance, numbers.Number)
|
||||
|
||||
|
||||
def is_object(checker, instance):
|
||||
return isinstance(instance, dict)
|
||||
|
||||
|
||||
def is_string(checker, instance):
|
||||
return isinstance(instance, str)
|
||||
|
||||
|
||||
def is_any(checker, instance):
|
||||
return True
|
||||
|
||||
|
||||
@frozen(repr=False)
|
||||
class TypeChecker:
|
||||
"""
|
||||
A :kw:`type` property checker.
|
||||
|
||||
A `TypeChecker` performs type checking for a `Validator`, converting
|
||||
between the defined JSON Schema types and some associated Python types or
|
||||
objects.
|
||||
|
||||
Modifying the behavior just mentioned by redefining which Python objects
|
||||
are considered to be of which JSON Schema types can be done using
|
||||
`TypeChecker.redefine` or `TypeChecker.redefine_many`, and types can be
|
||||
removed via `TypeChecker.remove`. Each of these return a new `TypeChecker`.
|
||||
|
||||
Arguments:
|
||||
|
||||
type_checkers:
|
||||
|
||||
The initial mapping of types to their checking functions.
|
||||
|
||||
"""
|
||||
|
||||
_type_checkers: HashTrieMap[
|
||||
str, Callable[[TypeChecker, Any], bool],
|
||||
] = field(default=HashTrieMap(), converter=_typed_map_converter)
|
||||
|
||||
def __repr__(self):
|
||||
types = ", ".join(repr(k) for k in sorted(self._type_checkers))
|
||||
return f"<{self.__class__.__name__} types={{{types}}}>"
|
||||
|
||||
def is_type(self, instance, type: str) -> bool:
|
||||
"""
|
||||
Check if the instance is of the appropriate type.
|
||||
|
||||
Arguments:
|
||||
|
||||
instance:
|
||||
|
||||
The instance to check
|
||||
|
||||
type:
|
||||
|
||||
The name of the type that is expected.
|
||||
|
||||
Raises:
|
||||
|
||||
`jsonschema.exceptions.UndefinedTypeCheck`:
|
||||
|
||||
if ``type`` is unknown to this object.
|
||||
|
||||
"""
|
||||
try:
|
||||
fn = self._type_checkers[type]
|
||||
except KeyError:
|
||||
raise UndefinedTypeCheck(type) from None
|
||||
|
||||
return fn(self, instance)
|
||||
|
||||
def redefine(self, type: str, fn) -> TypeChecker:
|
||||
"""
|
||||
Produce a new checker with the given type redefined.
|
||||
|
||||
Arguments:
|
||||
|
||||
type:
|
||||
|
||||
The name of the type to check.
|
||||
|
||||
fn (collections.abc.Callable):
|
||||
|
||||
A callable taking exactly two parameters - the type
|
||||
checker calling the function and the instance to check.
|
||||
The function should return true if instance is of this
|
||||
type and false otherwise.
|
||||
|
||||
"""
|
||||
return self.redefine_many({type: fn})
|
||||
|
||||
def redefine_many(self, definitions=()) -> TypeChecker:
|
||||
"""
|
||||
Produce a new checker with the given types redefined.
|
||||
|
||||
Arguments:
|
||||
|
||||
definitions (dict):
|
||||
|
||||
A dictionary mapping types to their checking functions.
|
||||
|
||||
"""
|
||||
type_checkers = self._type_checkers.update(definitions)
|
||||
return evolve(self, type_checkers=type_checkers)
|
||||
|
||||
def remove(self, *types) -> TypeChecker:
|
||||
"""
|
||||
Produce a new checker with the given types forgotten.
|
||||
|
||||
Arguments:
|
||||
|
||||
types:
|
||||
|
||||
the names of the types to remove.
|
||||
|
||||
Raises:
|
||||
|
||||
`jsonschema.exceptions.UndefinedTypeCheck`:
|
||||
|
||||
if any given type is unknown to this object
|
||||
|
||||
"""
|
||||
type_checkers = self._type_checkers
|
||||
for each in types:
|
||||
try:
|
||||
type_checkers = type_checkers.remove(each)
|
||||
except KeyError:
|
||||
raise UndefinedTypeCheck(each) from None
|
||||
return evolve(self, type_checkers=type_checkers)
|
||||
|
||||
|
||||
draft3_type_checker = TypeChecker(
|
||||
{
|
||||
"any": is_any,
|
||||
"array": is_array,
|
||||
"boolean": is_bool,
|
||||
"integer": is_integer,
|
||||
"object": is_object,
|
||||
"null": is_null,
|
||||
"number": is_number,
|
||||
"string": is_string,
|
||||
},
|
||||
)
|
||||
draft4_type_checker = draft3_type_checker.remove("any")
|
||||
draft6_type_checker = draft4_type_checker.redefine(
|
||||
"integer",
|
||||
lambda checker, instance: (
|
||||
is_integer(checker, instance)
|
||||
or isinstance(instance, float) and instance.is_integer()
|
||||
),
|
||||
)
|
||||
draft7_type_checker = draft6_type_checker
|
||||
draft201909_type_checker = draft7_type_checker
|
||||
draft202012_type_checker = draft201909_type_checker
|
||||
28
.venv/lib/python3.10/site-packages/jsonschema/_typing.py
Normal file
28
.venv/lib/python3.10/site-packages/jsonschema/_typing.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""
|
||||
Some (initially private) typing helpers for jsonschema's types.
|
||||
"""
|
||||
from typing import Any, Callable, Iterable, Protocol, Tuple, Union
|
||||
|
||||
import referencing.jsonschema
|
||||
|
||||
from jsonschema.protocols import Validator
|
||||
|
||||
|
||||
class SchemaKeywordValidator(Protocol):
|
||||
def __call__(
|
||||
self,
|
||||
validator: Validator,
|
||||
value: Any,
|
||||
instance: Any,
|
||||
schema: referencing.jsonschema.Schema,
|
||||
) -> None:
|
||||
...
|
||||
|
||||
|
||||
id_of = Callable[[referencing.jsonschema.Schema], Union[str, None]]
|
||||
|
||||
|
||||
ApplicableValidators = Callable[
|
||||
[referencing.jsonschema.Schema],
|
||||
Iterable[Tuple[str, Any]],
|
||||
]
|
||||
351
.venv/lib/python3.10/site-packages/jsonschema/_utils.py
Normal file
351
.venv/lib/python3.10/site-packages/jsonschema/_utils.py
Normal file
@@ -0,0 +1,351 @@
|
||||
from collections.abc import Mapping, MutableMapping, Sequence
|
||||
from urllib.parse import urlsplit
|
||||
import itertools
|
||||
import re
|
||||
|
||||
|
||||
class URIDict(MutableMapping):
|
||||
"""
|
||||
Dictionary which uses normalized URIs as keys.
|
||||
"""
|
||||
|
||||
def normalize(self, uri):
|
||||
return urlsplit(uri).geturl()
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.store = dict()
|
||||
self.store.update(*args, **kwargs)
|
||||
|
||||
def __getitem__(self, uri):
|
||||
return self.store[self.normalize(uri)]
|
||||
|
||||
def __setitem__(self, uri, value):
|
||||
self.store[self.normalize(uri)] = value
|
||||
|
||||
def __delitem__(self, uri):
|
||||
del self.store[self.normalize(uri)]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.store)
|
||||
|
||||
def __len__(self): # pragma: no cover -- untested, but to be removed
|
||||
return len(self.store)
|
||||
|
||||
def __repr__(self): # pragma: no cover -- untested, but to be removed
|
||||
return repr(self.store)
|
||||
|
||||
|
||||
class Unset:
|
||||
"""
|
||||
An as-of-yet unset attribute or unprovided default parameter.
|
||||
"""
|
||||
|
||||
def __repr__(self): # pragma: no cover
|
||||
return "<unset>"
|
||||
|
||||
|
||||
def format_as_index(container, indices):
|
||||
"""
|
||||
Construct a single string containing indexing operations for the indices.
|
||||
|
||||
For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"]
|
||||
|
||||
Arguments:
|
||||
|
||||
container (str):
|
||||
|
||||
A word to use for the thing being indexed
|
||||
|
||||
indices (sequence):
|
||||
|
||||
The indices to format.
|
||||
|
||||
"""
|
||||
if not indices:
|
||||
return container
|
||||
return f"{container}[{']['.join(repr(index) for index in indices)}]"
|
||||
|
||||
|
||||
def find_additional_properties(instance, schema):
|
||||
"""
|
||||
Return the set of additional properties for the given ``instance``.
|
||||
|
||||
Weeds out properties that should have been validated by ``properties`` and
|
||||
/ or ``patternProperties``.
|
||||
|
||||
Assumes ``instance`` is dict-like already.
|
||||
"""
|
||||
properties = schema.get("properties", {})
|
||||
patterns = "|".join(schema.get("patternProperties", {}))
|
||||
for property in instance:
|
||||
if property not in properties:
|
||||
if patterns and re.search(patterns, property):
|
||||
continue
|
||||
yield property
|
||||
|
||||
|
||||
def extras_msg(extras):
|
||||
"""
|
||||
Create an error message for extra items or properties.
|
||||
"""
|
||||
verb = "was" if len(extras) == 1 else "were"
|
||||
return ", ".join(repr(extra) for extra in extras), verb
|
||||
|
||||
|
||||
def ensure_list(thing):
|
||||
"""
|
||||
Wrap ``thing`` in a list if it's a single str.
|
||||
|
||||
Otherwise, return it unchanged.
|
||||
"""
|
||||
if isinstance(thing, str):
|
||||
return [thing]
|
||||
return thing
|
||||
|
||||
|
||||
def _mapping_equal(one, two):
|
||||
"""
|
||||
Check if two mappings are equal using the semantics of `equal`.
|
||||
"""
|
||||
if len(one) != len(two):
|
||||
return False
|
||||
return all(
|
||||
key in two and equal(value, two[key])
|
||||
for key, value in one.items()
|
||||
)
|
||||
|
||||
|
||||
def _sequence_equal(one, two):
|
||||
"""
|
||||
Check if two sequences are equal using the semantics of `equal`.
|
||||
"""
|
||||
if len(one) != len(two):
|
||||
return False
|
||||
return all(equal(i, j) for i, j in zip(one, two))
|
||||
|
||||
|
||||
def equal(one, two):
|
||||
"""
|
||||
Check if two things are equal evading some Python type hierarchy semantics.
|
||||
|
||||
Specifically in JSON Schema, evade `bool` inheriting from `int`,
|
||||
recursing into sequences to do the same.
|
||||
"""
|
||||
if one is two:
|
||||
return True
|
||||
if isinstance(one, str) or isinstance(two, str):
|
||||
return one == two
|
||||
if isinstance(one, Sequence) and isinstance(two, Sequence):
|
||||
return _sequence_equal(one, two)
|
||||
if isinstance(one, Mapping) and isinstance(two, Mapping):
|
||||
return _mapping_equal(one, two)
|
||||
return unbool(one) == unbool(two)
|
||||
|
||||
|
||||
def unbool(element, true=object(), false=object()):
|
||||
"""
|
||||
A hack to make True and 1 and False and 0 unique for ``uniq``.
|
||||
"""
|
||||
if element is True:
|
||||
return true
|
||||
elif element is False:
|
||||
return false
|
||||
return element
|
||||
|
||||
|
||||
def uniq(container):
|
||||
"""
|
||||
Check if all of a container's elements are unique.
|
||||
|
||||
Tries to rely on the container being recursively sortable, or otherwise
|
||||
falls back on (slow) brute force.
|
||||
"""
|
||||
try:
|
||||
sort = sorted(unbool(i) for i in container)
|
||||
sliced = itertools.islice(sort, 1, None)
|
||||
|
||||
for i, j in zip(sort, sliced):
|
||||
if equal(i, j):
|
||||
return False
|
||||
|
||||
except (NotImplementedError, TypeError):
|
||||
seen = []
|
||||
for e in container:
|
||||
e = unbool(e)
|
||||
|
||||
for i in seen:
|
||||
if equal(i, e):
|
||||
return False
|
||||
|
||||
seen.append(e)
|
||||
return True
|
||||
|
||||
|
||||
def find_evaluated_item_indexes_by_schema(validator, instance, schema):
|
||||
"""
|
||||
Get all indexes of items that get evaluated under the current schema.
|
||||
|
||||
Covers all keywords related to unevaluatedItems: items, prefixItems, if,
|
||||
then, else, contains, unevaluatedItems, allOf, oneOf, anyOf
|
||||
"""
|
||||
if validator.is_type(schema, "boolean"):
|
||||
return []
|
||||
evaluated_indexes = []
|
||||
|
||||
if "items" in schema:
|
||||
return list(range(len(instance)))
|
||||
|
||||
ref = schema.get("$ref")
|
||||
if ref is not None:
|
||||
resolved = validator._resolver.lookup(ref)
|
||||
evaluated_indexes.extend(
|
||||
find_evaluated_item_indexes_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
dynamicRef = schema.get("$dynamicRef")
|
||||
if dynamicRef is not None:
|
||||
resolved = validator._resolver.lookup(dynamicRef)
|
||||
evaluated_indexes.extend(
|
||||
find_evaluated_item_indexes_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
if "prefixItems" in schema:
|
||||
evaluated_indexes += list(range(len(schema["prefixItems"])))
|
||||
|
||||
if "if" in schema:
|
||||
if validator.evolve(schema=schema["if"]).is_valid(instance):
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema["if"],
|
||||
)
|
||||
if "then" in schema:
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema["then"],
|
||||
)
|
||||
elif "else" in schema:
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, schema["else"],
|
||||
)
|
||||
|
||||
for keyword in ["contains", "unevaluatedItems"]:
|
||||
if keyword in schema:
|
||||
for k, v in enumerate(instance):
|
||||
if validator.evolve(schema=schema[keyword]).is_valid(v):
|
||||
evaluated_indexes.append(k)
|
||||
|
||||
for keyword in ["allOf", "oneOf", "anyOf"]:
|
||||
if keyword in schema:
|
||||
for subschema in schema[keyword]:
|
||||
errs = next(validator.descend(instance, subschema), None)
|
||||
if errs is None:
|
||||
evaluated_indexes += find_evaluated_item_indexes_by_schema(
|
||||
validator, instance, subschema,
|
||||
)
|
||||
|
||||
return evaluated_indexes
|
||||
|
||||
|
||||
def find_evaluated_property_keys_by_schema(validator, instance, schema):
|
||||
"""
|
||||
Get all keys of items that get evaluated under the current schema.
|
||||
|
||||
Covers all keywords related to unevaluatedProperties: properties,
|
||||
additionalProperties, unevaluatedProperties, patternProperties,
|
||||
dependentSchemas, allOf, oneOf, anyOf, if, then, else
|
||||
"""
|
||||
if validator.is_type(schema, "boolean"):
|
||||
return []
|
||||
evaluated_keys = []
|
||||
|
||||
ref = schema.get("$ref")
|
||||
if ref is not None:
|
||||
resolved = validator._resolver.lookup(ref)
|
||||
evaluated_keys.extend(
|
||||
find_evaluated_property_keys_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
dynamicRef = schema.get("$dynamicRef")
|
||||
if dynamicRef is not None:
|
||||
resolved = validator._resolver.lookup(dynamicRef)
|
||||
evaluated_keys.extend(
|
||||
find_evaluated_property_keys_by_schema(
|
||||
validator.evolve(
|
||||
schema=resolved.contents,
|
||||
_resolver=resolved.resolver,
|
||||
),
|
||||
instance,
|
||||
resolved.contents,
|
||||
),
|
||||
)
|
||||
|
||||
for keyword in [
|
||||
"properties", "additionalProperties", "unevaluatedProperties",
|
||||
]:
|
||||
if keyword in schema:
|
||||
schema_value = schema[keyword]
|
||||
if validator.is_type(schema_value, "boolean") and schema_value:
|
||||
evaluated_keys += instance.keys()
|
||||
|
||||
elif validator.is_type(schema_value, "object"):
|
||||
for property in schema_value:
|
||||
if property in instance:
|
||||
evaluated_keys.append(property)
|
||||
|
||||
if "patternProperties" in schema:
|
||||
for property in instance:
|
||||
for pattern in schema["patternProperties"]:
|
||||
if re.search(pattern, property):
|
||||
evaluated_keys.append(property)
|
||||
|
||||
if "dependentSchemas" in schema:
|
||||
for property, subschema in schema["dependentSchemas"].items():
|
||||
if property not in instance:
|
||||
continue
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, subschema,
|
||||
)
|
||||
|
||||
for keyword in ["allOf", "oneOf", "anyOf"]:
|
||||
if keyword in schema:
|
||||
for subschema in schema[keyword]:
|
||||
errs = next(validator.descend(instance, subschema), None)
|
||||
if errs is None:
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, subschema,
|
||||
)
|
||||
|
||||
if "if" in schema:
|
||||
if validator.evolve(schema=schema["if"]).is_valid(instance):
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema["if"],
|
||||
)
|
||||
if "then" in schema:
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema["then"],
|
||||
)
|
||||
elif "else" in schema:
|
||||
evaluated_keys += find_evaluated_property_keys_by_schema(
|
||||
validator, instance, schema["else"],
|
||||
)
|
||||
|
||||
return evaluated_keys
|
||||
@@ -0,0 +1,5 @@
|
||||
"""
|
||||
Benchmarks for validation.
|
||||
|
||||
This package is *not* public API.
|
||||
"""
|
||||
@@ -0,0 +1,30 @@
|
||||
"""
|
||||
A benchmark for comparing equivalent validation of `const` and `enum`.
|
||||
"""
|
||||
|
||||
from pyperf import Runner
|
||||
|
||||
from jsonschema import Draft202012Validator
|
||||
|
||||
value = [37] * 100
|
||||
const_schema = {"const": list(value)}
|
||||
enum_schema = {"enum": [list(value)]}
|
||||
|
||||
valid = list(value)
|
||||
invalid = [*valid, 73]
|
||||
|
||||
const = Draft202012Validator(const_schema)
|
||||
enum = Draft202012Validator(enum_schema)
|
||||
|
||||
assert const.is_valid(valid)
|
||||
assert enum.is_valid(valid)
|
||||
assert not const.is_valid(invalid)
|
||||
assert not enum.is_valid(invalid)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
runner = Runner()
|
||||
runner.bench_func("const valid", lambda: const.is_valid(valid))
|
||||
runner.bench_func("const invalid", lambda: const.is_valid(invalid))
|
||||
runner.bench_func("enum valid", lambda: enum.is_valid(valid))
|
||||
runner.bench_func("enum invalid", lambda: enum.is_valid(invalid))
|
||||
@@ -0,0 +1,28 @@
|
||||
"""
|
||||
A benchmark for validation of the `contains` keyword.
|
||||
"""
|
||||
|
||||
from pyperf import Runner
|
||||
|
||||
from jsonschema import Draft202012Validator
|
||||
|
||||
schema = {
|
||||
"type": "array",
|
||||
"contains": {"const": 37},
|
||||
}
|
||||
validator = Draft202012Validator(schema)
|
||||
|
||||
size = 1000
|
||||
beginning = [37] + [0] * (size - 1)
|
||||
middle = [0] * (size // 2) + [37] + [0] * (size // 2)
|
||||
end = [0] * (size - 1) + [37]
|
||||
invalid = [0] * size
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
runner = Runner()
|
||||
runner.bench_func("baseline", lambda: validator.is_valid([]))
|
||||
runner.bench_func("beginning", lambda: validator.is_valid(beginning))
|
||||
runner.bench_func("middle", lambda: validator.is_valid(middle))
|
||||
runner.bench_func("end", lambda: validator.is_valid(end))
|
||||
runner.bench_func("invalid", lambda: validator.is_valid(invalid))
|
||||
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
A performance benchmark using the example from issue #232.
|
||||
|
||||
See https://github.com/python-jsonschema/jsonschema/pull/232.
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from pyperf import Runner
|
||||
from referencing import Registry
|
||||
|
||||
from jsonschema.tests._suite import Version
|
||||
import jsonschema
|
||||
|
||||
issue232 = Version(
|
||||
path=Path(__file__).parent / "issue232",
|
||||
remotes=Registry(),
|
||||
name="issue232",
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
issue232.benchmark(
|
||||
runner=Runner(),
|
||||
Validator=jsonschema.Draft4Validator,
|
||||
)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,12 @@
|
||||
"""
|
||||
A performance benchmark using the official test suite.
|
||||
|
||||
This benchmarks jsonschema using every valid example in the
|
||||
JSON-Schema-Test-Suite. It will take some time to complete.
|
||||
"""
|
||||
from pyperf import Runner
|
||||
|
||||
from jsonschema.tests._suite import Suite
|
||||
|
||||
if __name__ == "__main__":
|
||||
Suite().benchmark(runner=Runner())
|
||||
@@ -0,0 +1,56 @@
|
||||
"""
|
||||
Validating highly nested schemas shouldn't cause exponential time blowups.
|
||||
|
||||
See https://github.com/python-jsonschema/jsonschema/issues/1097.
|
||||
"""
|
||||
from itertools import cycle
|
||||
|
||||
from jsonschema.validators import validator_for
|
||||
|
||||
metaschemaish = {
|
||||
"$id": "https://example.com/draft/2020-12/schema/strict",
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
|
||||
"$vocabulary": {
|
||||
"https://json-schema.org/draft/2020-12/vocab/core": True,
|
||||
"https://json-schema.org/draft/2020-12/vocab/applicator": True,
|
||||
"https://json-schema.org/draft/2020-12/vocab/unevaluated": True,
|
||||
"https://json-schema.org/draft/2020-12/vocab/validation": True,
|
||||
"https://json-schema.org/draft/2020-12/vocab/meta-data": True,
|
||||
"https://json-schema.org/draft/2020-12/vocab/format-annotation": True,
|
||||
"https://json-schema.org/draft/2020-12/vocab/content": True,
|
||||
},
|
||||
"$dynamicAnchor": "meta",
|
||||
|
||||
"$ref": "https://json-schema.org/draft/2020-12/schema",
|
||||
"unevaluatedProperties": False,
|
||||
}
|
||||
|
||||
|
||||
def nested_schema(levels):
|
||||
"""
|
||||
Produce a schema which validates deeply nested objects and arrays.
|
||||
"""
|
||||
|
||||
names = cycle(["foo", "bar", "baz", "quux", "spam", "eggs"])
|
||||
schema = {"type": "object", "properties": {"ham": {"type": "string"}}}
|
||||
for _, name in zip(range(levels - 1), names):
|
||||
schema = {"type": "object", "properties": {name: schema}}
|
||||
return schema
|
||||
|
||||
|
||||
validator = validator_for(metaschemaish)(metaschemaish)
|
||||
|
||||
if __name__ == "__main__":
|
||||
from pyperf import Runner
|
||||
runner = Runner()
|
||||
|
||||
not_nested = nested_schema(levels=1)
|
||||
runner.bench_func("not nested", lambda: validator.is_valid(not_nested))
|
||||
|
||||
for levels in range(1, 11, 3):
|
||||
schema = nested_schema(levels=levels)
|
||||
runner.bench_func(
|
||||
f"nested * {levels}",
|
||||
lambda schema=schema: validator.is_valid(schema),
|
||||
)
|
||||
@@ -0,0 +1,42 @@
|
||||
"""
|
||||
A benchmark which tries to compare the possible slow subparts of validation.
|
||||
"""
|
||||
from referencing import Registry
|
||||
from referencing.jsonschema import DRAFT202012
|
||||
from rpds import HashTrieMap, HashTrieSet
|
||||
|
||||
from jsonschema import Draft202012Validator
|
||||
|
||||
schema = {
|
||||
"type": "array",
|
||||
"minLength": 1,
|
||||
"maxLength": 1,
|
||||
"items": {"type": "integer"},
|
||||
}
|
||||
|
||||
hmap = HashTrieMap()
|
||||
hset = HashTrieSet()
|
||||
|
||||
registry = Registry()
|
||||
|
||||
v = Draft202012Validator(schema)
|
||||
|
||||
|
||||
def registry_data_structures():
|
||||
return hmap.insert("foo", "bar"), hset.insert("foo")
|
||||
|
||||
|
||||
def registry_add():
|
||||
resource = DRAFT202012.create_resource(schema)
|
||||
return registry.with_resource(uri="urn:example", resource=resource)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from pyperf import Runner
|
||||
runner = Runner()
|
||||
|
||||
runner.bench_func("HashMap/HashSet insertion", registry_data_structures)
|
||||
runner.bench_func("Registry insertion", registry_add)
|
||||
runner.bench_func("Success", lambda: v.is_valid([1]))
|
||||
runner.bench_func("Failure", lambda: v.is_valid(["foo"]))
|
||||
runner.bench_func("Metaschema validation", lambda: v.check_schema(schema))
|
||||
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
An unused schema registry should not cause slower validation.
|
||||
|
||||
"Unused" here means one where no reference resolution is occurring anyhow.
|
||||
|
||||
See https://github.com/python-jsonschema/jsonschema/issues/1088.
|
||||
"""
|
||||
from pyperf import Runner
|
||||
from referencing import Registry
|
||||
from referencing.jsonschema import DRAFT201909
|
||||
|
||||
from jsonschema import Draft201909Validator
|
||||
|
||||
registry = Registry().with_resource(
|
||||
"urn:example:foo",
|
||||
DRAFT201909.create_resource({}),
|
||||
)
|
||||
|
||||
schema = {"$ref": "https://json-schema.org/draft/2019-09/schema"}
|
||||
instance = {"maxLength": 4}
|
||||
|
||||
no_registry = Draft201909Validator(schema)
|
||||
with_useless_registry = Draft201909Validator(schema, registry=registry)
|
||||
|
||||
if __name__ == "__main__":
|
||||
runner = Runner()
|
||||
|
||||
runner.bench_func(
|
||||
"no registry",
|
||||
lambda: no_registry.is_valid(instance),
|
||||
)
|
||||
runner.bench_func(
|
||||
"useless registry",
|
||||
lambda: with_useless_registry.is_valid(instance),
|
||||
)
|
||||
@@ -0,0 +1,106 @@
|
||||
|
||||
"""
|
||||
A benchmark for validation of applicators containing lots of useless schemas.
|
||||
|
||||
Signals a small possible optimization to remove all such schemas ahead of time.
|
||||
"""
|
||||
|
||||
from pyperf import Runner
|
||||
|
||||
from jsonschema import Draft202012Validator as Validator
|
||||
|
||||
NUM_USELESS = 100000
|
||||
|
||||
subschema = {"const": 37}
|
||||
|
||||
valid = 37
|
||||
invalid = 12
|
||||
|
||||
baseline = Validator(subschema)
|
||||
|
||||
|
||||
# These should be indistinguishable from just `subschema`
|
||||
by_name = {
|
||||
"single subschema": {
|
||||
"anyOf": Validator({"anyOf": [subschema]}),
|
||||
"allOf": Validator({"allOf": [subschema]}),
|
||||
"oneOf": Validator({"oneOf": [subschema]}),
|
||||
},
|
||||
"redundant subschemas": {
|
||||
"anyOf": Validator({"anyOf": [subschema] * NUM_USELESS}),
|
||||
"allOf": Validator({"allOf": [subschema] * NUM_USELESS}),
|
||||
},
|
||||
"useless successful subschemas (beginning)": {
|
||||
"anyOf": Validator({"anyOf": [subschema, *[True] * NUM_USELESS]}),
|
||||
"allOf": Validator({"allOf": [subschema, *[True] * NUM_USELESS]}),
|
||||
},
|
||||
"useless successful subschemas (middle)": {
|
||||
"anyOf": Validator(
|
||||
{
|
||||
"anyOf": [
|
||||
*[True] * (NUM_USELESS // 2),
|
||||
subschema,
|
||||
*[True] * (NUM_USELESS // 2),
|
||||
],
|
||||
},
|
||||
),
|
||||
"allOf": Validator(
|
||||
{
|
||||
"allOf": [
|
||||
*[True] * (NUM_USELESS // 2),
|
||||
subschema,
|
||||
*[True] * (NUM_USELESS // 2),
|
||||
],
|
||||
},
|
||||
),
|
||||
},
|
||||
"useless successful subschemas (end)": {
|
||||
"anyOf": Validator({"anyOf": [*[True] * NUM_USELESS, subschema]}),
|
||||
"allOf": Validator({"allOf": [*[True] * NUM_USELESS, subschema]}),
|
||||
},
|
||||
"useless failing subschemas (beginning)": {
|
||||
"anyOf": Validator({"anyOf": [subschema, *[False] * NUM_USELESS]}),
|
||||
"oneOf": Validator({"oneOf": [subschema, *[False] * NUM_USELESS]}),
|
||||
},
|
||||
"useless failing subschemas (middle)": {
|
||||
"anyOf": Validator(
|
||||
{
|
||||
"anyOf": [
|
||||
*[False] * (NUM_USELESS // 2),
|
||||
subschema,
|
||||
*[False] * (NUM_USELESS // 2),
|
||||
],
|
||||
},
|
||||
),
|
||||
"oneOf": Validator(
|
||||
{
|
||||
"oneOf": [
|
||||
*[False] * (NUM_USELESS // 2),
|
||||
subschema,
|
||||
*[False] * (NUM_USELESS // 2),
|
||||
],
|
||||
},
|
||||
),
|
||||
},
|
||||
"useless failing subschemas (end)": {
|
||||
"anyOf": Validator({"anyOf": [*[False] * NUM_USELESS, subschema]}),
|
||||
"oneOf": Validator({"oneOf": [*[False] * NUM_USELESS, subschema]}),
|
||||
},
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
runner = Runner()
|
||||
|
||||
runner.bench_func("baseline valid", lambda: baseline.is_valid(valid))
|
||||
runner.bench_func("baseline invalid", lambda: baseline.is_valid(invalid))
|
||||
|
||||
for group, applicators in by_name.items():
|
||||
for applicator, validator in applicators.items():
|
||||
runner.bench_func(
|
||||
f"{group}: {applicator} valid",
|
||||
lambda validator=validator: validator.is_valid(valid),
|
||||
)
|
||||
runner.bench_func(
|
||||
f"{group}: {applicator} invalid",
|
||||
lambda validator=validator: validator.is_valid(invalid),
|
||||
)
|
||||
@@ -0,0 +1,32 @@
|
||||
"""
|
||||
A benchmark for validation of schemas containing lots of useless keywords.
|
||||
|
||||
Checks we filter them out once, ahead of time.
|
||||
"""
|
||||
|
||||
from pyperf import Runner
|
||||
|
||||
from jsonschema import Draft202012Validator
|
||||
|
||||
NUM_USELESS = 100000
|
||||
schema = dict(
|
||||
[
|
||||
("not", {"const": 42}),
|
||||
*((str(i), i) for i in range(NUM_USELESS)),
|
||||
("type", "integer"),
|
||||
*((str(i), i) for i in range(NUM_USELESS, NUM_USELESS)),
|
||||
("minimum", 37),
|
||||
],
|
||||
)
|
||||
validator = Draft202012Validator(schema)
|
||||
|
||||
valid = 3737
|
||||
invalid = 12
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
runner = Runner()
|
||||
runner.bench_func("beginning of schema", lambda: validator.is_valid(42))
|
||||
runner.bench_func("middle of schema", lambda: validator.is_valid("foo"))
|
||||
runner.bench_func("end of schema", lambda: validator.is_valid(12))
|
||||
runner.bench_func("valid", lambda: validator.is_valid(3737))
|
||||
@@ -0,0 +1,14 @@
|
||||
from pyperf import Runner
|
||||
|
||||
from jsonschema import Draft202012Validator
|
||||
|
||||
schema = {
|
||||
"type": "array",
|
||||
"minLength": 1,
|
||||
"maxLength": 1,
|
||||
"items": {"type": "integer"},
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Runner().bench_func("validator creation", Draft202012Validator, schema)
|
||||
296
.venv/lib/python3.10/site-packages/jsonschema/cli.py
Normal file
296
.venv/lib/python3.10/site-packages/jsonschema/cli.py
Normal file
@@ -0,0 +1,296 @@
|
||||
"""
|
||||
The ``jsonschema`` command line.
|
||||
"""
|
||||
|
||||
from importlib import metadata
|
||||
from json import JSONDecodeError
|
||||
from textwrap import dedent
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from pkgutil import resolve_name
|
||||
except ImportError:
|
||||
from pkgutil_resolve_name import resolve_name # type: ignore[no-redef]
|
||||
|
||||
from attrs import define, field
|
||||
|
||||
from jsonschema.exceptions import SchemaError
|
||||
from jsonschema.validators import _RefResolver, validator_for
|
||||
|
||||
warnings.warn(
|
||||
(
|
||||
"The jsonschema CLI is deprecated and will be removed in a future "
|
||||
"version. Please use check-jsonschema instead, which can be installed "
|
||||
"from https://pypi.org/project/check-jsonschema/"
|
||||
),
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
class _CannotLoadFile(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@define
|
||||
class _Outputter:
|
||||
|
||||
_formatter = field()
|
||||
_stdout = field()
|
||||
_stderr = field()
|
||||
|
||||
@classmethod
|
||||
def from_arguments(cls, arguments, stdout, stderr):
|
||||
if arguments["output"] == "plain":
|
||||
formatter = _PlainFormatter(arguments["error_format"])
|
||||
elif arguments["output"] == "pretty":
|
||||
formatter = _PrettyFormatter()
|
||||
return cls(formatter=formatter, stdout=stdout, stderr=stderr)
|
||||
|
||||
def load(self, path):
|
||||
try:
|
||||
file = open(path) # noqa: SIM115, PTH123
|
||||
except FileNotFoundError as error:
|
||||
self.filenotfound_error(path=path, exc_info=sys.exc_info())
|
||||
raise _CannotLoadFile() from error
|
||||
|
||||
with file:
|
||||
try:
|
||||
return json.load(file)
|
||||
except JSONDecodeError as error:
|
||||
self.parsing_error(path=path, exc_info=sys.exc_info())
|
||||
raise _CannotLoadFile() from error
|
||||
|
||||
def filenotfound_error(self, **kwargs):
|
||||
self._stderr.write(self._formatter.filenotfound_error(**kwargs))
|
||||
|
||||
def parsing_error(self, **kwargs):
|
||||
self._stderr.write(self._formatter.parsing_error(**kwargs))
|
||||
|
||||
def validation_error(self, **kwargs):
|
||||
self._stderr.write(self._formatter.validation_error(**kwargs))
|
||||
|
||||
def validation_success(self, **kwargs):
|
||||
self._stdout.write(self._formatter.validation_success(**kwargs))
|
||||
|
||||
|
||||
@define
|
||||
class _PrettyFormatter:
|
||||
|
||||
_ERROR_MSG = dedent(
|
||||
"""\
|
||||
===[{type}]===({path})===
|
||||
|
||||
{body}
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
_SUCCESS_MSG = "===[SUCCESS]===({path})===\n"
|
||||
|
||||
def filenotfound_error(self, path, exc_info):
|
||||
return self._ERROR_MSG.format(
|
||||
path=path,
|
||||
type="FileNotFoundError",
|
||||
body=f"{path!r} does not exist.",
|
||||
)
|
||||
|
||||
def parsing_error(self, path, exc_info):
|
||||
exc_type, exc_value, exc_traceback = exc_info
|
||||
exc_lines = "".join(
|
||||
traceback.format_exception(exc_type, exc_value, exc_traceback),
|
||||
)
|
||||
return self._ERROR_MSG.format(
|
||||
path=path,
|
||||
type=exc_type.__name__,
|
||||
body=exc_lines,
|
||||
)
|
||||
|
||||
def validation_error(self, instance_path, error):
|
||||
return self._ERROR_MSG.format(
|
||||
path=instance_path,
|
||||
type=error.__class__.__name__,
|
||||
body=error,
|
||||
)
|
||||
|
||||
def validation_success(self, instance_path):
|
||||
return self._SUCCESS_MSG.format(path=instance_path)
|
||||
|
||||
|
||||
@define
|
||||
class _PlainFormatter:
|
||||
|
||||
_error_format = field()
|
||||
|
||||
def filenotfound_error(self, path, exc_info):
|
||||
return f"{path!r} does not exist.\n"
|
||||
|
||||
def parsing_error(self, path, exc_info):
|
||||
return "Failed to parse {}: {}\n".format(
|
||||
"<stdin>" if path == "<stdin>" else repr(path),
|
||||
exc_info[1],
|
||||
)
|
||||
|
||||
def validation_error(self, instance_path, error):
|
||||
return self._error_format.format(file_name=instance_path, error=error)
|
||||
|
||||
def validation_success(self, instance_path):
|
||||
return ""
|
||||
|
||||
|
||||
def _resolve_name_with_default(name):
|
||||
if "." not in name:
|
||||
name = "jsonschema." + name
|
||||
return resolve_name(name)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="JSON Schema Validation CLI",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-i", "--instance",
|
||||
action="append",
|
||||
dest="instances",
|
||||
help="""
|
||||
a path to a JSON instance (i.e. filename.json) to validate (may
|
||||
be specified multiple times). If no instances are provided via this
|
||||
option, one will be expected on standard input.
|
||||
""",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-F", "--error-format",
|
||||
help="""
|
||||
the format to use for each validation error message, specified
|
||||
in a form suitable for str.format. This string will be passed
|
||||
one formatted object named 'error' for each ValidationError.
|
||||
Only provide this option when using --output=plain, which is the
|
||||
default. If this argument is unprovided and --output=plain is
|
||||
used, a simple default representation will be used.
|
||||
""",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o", "--output",
|
||||
choices=["plain", "pretty"],
|
||||
default="plain",
|
||||
help="""
|
||||
an output format to use. 'plain' (default) will produce minimal
|
||||
text with one line for each error, while 'pretty' will produce
|
||||
more detailed human-readable output on multiple lines.
|
||||
""",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-V", "--validator",
|
||||
type=_resolve_name_with_default,
|
||||
help="""
|
||||
the fully qualified object name of a validator to use, or, for
|
||||
validators that are registered with jsonschema, simply the name
|
||||
of the class.
|
||||
""",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--base-uri",
|
||||
help="""
|
||||
a base URI to assign to the provided schema, even if it does not
|
||||
declare one (via e.g. $id). This option can be used if you wish to
|
||||
resolve relative references to a particular URI (or local path)
|
||||
""",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--version",
|
||||
action="version",
|
||||
version=metadata.version("jsonschema"),
|
||||
)
|
||||
parser.add_argument(
|
||||
"schema",
|
||||
help="the path to a JSON Schema to validate with (i.e. schema.json)",
|
||||
)
|
||||
|
||||
|
||||
def parse_args(args): # noqa: D103
|
||||
arguments = vars(parser.parse_args(args=args or ["--help"]))
|
||||
if arguments["output"] != "plain" and arguments["error_format"]:
|
||||
raise parser.error(
|
||||
"--error-format can only be used with --output plain",
|
||||
)
|
||||
if arguments["output"] == "plain" and arguments["error_format"] is None:
|
||||
arguments["error_format"] = "{error.instance}: {error.message}\n"
|
||||
return arguments
|
||||
|
||||
|
||||
def _validate_instance(instance_path, instance, validator, outputter):
|
||||
invalid = False
|
||||
for error in validator.iter_errors(instance):
|
||||
invalid = True
|
||||
outputter.validation_error(instance_path=instance_path, error=error)
|
||||
|
||||
if not invalid:
|
||||
outputter.validation_success(instance_path=instance_path)
|
||||
return invalid
|
||||
|
||||
|
||||
def main(args=sys.argv[1:]): # noqa: D103
|
||||
sys.exit(run(arguments=parse_args(args=args)))
|
||||
|
||||
|
||||
def run(arguments, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin): # noqa: D103
|
||||
outputter = _Outputter.from_arguments(
|
||||
arguments=arguments,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
|
||||
try:
|
||||
schema = outputter.load(arguments["schema"])
|
||||
except _CannotLoadFile:
|
||||
return 1
|
||||
|
||||
Validator = arguments["validator"]
|
||||
if Validator is None:
|
||||
Validator = validator_for(schema)
|
||||
|
||||
try:
|
||||
Validator.check_schema(schema)
|
||||
except SchemaError as error:
|
||||
outputter.validation_error(
|
||||
instance_path=arguments["schema"],
|
||||
error=error,
|
||||
)
|
||||
return 1
|
||||
|
||||
if arguments["instances"]:
|
||||
load, instances = outputter.load, arguments["instances"]
|
||||
else:
|
||||
def load(_):
|
||||
try:
|
||||
return json.load(stdin)
|
||||
except JSONDecodeError as error:
|
||||
outputter.parsing_error(
|
||||
path="<stdin>", exc_info=sys.exc_info(),
|
||||
)
|
||||
raise _CannotLoadFile() from error
|
||||
instances = ["<stdin>"]
|
||||
|
||||
resolver = _RefResolver(
|
||||
base_uri=arguments["base_uri"],
|
||||
referrer=schema,
|
||||
) if arguments["base_uri"] is not None else None
|
||||
|
||||
validator = Validator(schema, resolver=resolver)
|
||||
exit_code = 0
|
||||
for each in instances:
|
||||
try:
|
||||
instance = load(each)
|
||||
except _CannotLoadFile:
|
||||
exit_code = 1
|
||||
else:
|
||||
exit_code |= _validate_instance(
|
||||
instance_path=each,
|
||||
instance=instance,
|
||||
validator=validator,
|
||||
outputter=outputter,
|
||||
)
|
||||
|
||||
return exit_code
|
||||
487
.venv/lib/python3.10/site-packages/jsonschema/exceptions.py
Normal file
487
.venv/lib/python3.10/site-packages/jsonschema/exceptions.py
Normal file
@@ -0,0 +1,487 @@
|
||||
"""
|
||||
Validation errors, and some surrounding helpers.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict, deque
|
||||
from pprint import pformat
|
||||
from textwrap import dedent, indent
|
||||
from typing import TYPE_CHECKING, Any, ClassVar
|
||||
import heapq
|
||||
import itertools
|
||||
import warnings
|
||||
|
||||
from attrs import define
|
||||
from referencing.exceptions import Unresolvable as _Unresolvable
|
||||
|
||||
from jsonschema import _utils
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Mapping, MutableMapping, Sequence
|
||||
|
||||
from jsonschema import _types
|
||||
|
||||
WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"])
|
||||
STRONG_MATCHES: frozenset[str] = frozenset()
|
||||
|
||||
_unset = _utils.Unset()
|
||||
|
||||
|
||||
def _pretty(thing: Any, prefix: str):
|
||||
"""
|
||||
Format something for an error message as prettily as we currently can.
|
||||
"""
|
||||
return indent(pformat(thing, width=72, sort_dicts=False), prefix).lstrip()
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
if name == "RefResolutionError":
|
||||
warnings.warn(
|
||||
_RefResolutionError._DEPRECATION_MESSAGE,
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return _RefResolutionError
|
||||
raise AttributeError(f"module {__name__} has no attribute {name}")
|
||||
|
||||
|
||||
class _Error(Exception):
|
||||
|
||||
_word_for_schema_in_error_message: ClassVar[str]
|
||||
_word_for_instance_in_error_message: ClassVar[str]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
validator: str = _unset, # type: ignore[assignment]
|
||||
path: Iterable[str | int] = (),
|
||||
cause: Exception | None = None,
|
||||
context=(),
|
||||
validator_value: Any = _unset,
|
||||
instance: Any = _unset,
|
||||
schema: Mapping[str, Any] | bool = _unset, # type: ignore[assignment]
|
||||
schema_path: Iterable[str | int] = (),
|
||||
parent: _Error | None = None,
|
||||
type_checker: _types.TypeChecker = _unset, # type: ignore[assignment]
|
||||
) -> None:
|
||||
super().__init__(
|
||||
message,
|
||||
validator,
|
||||
path,
|
||||
cause,
|
||||
context,
|
||||
validator_value,
|
||||
instance,
|
||||
schema,
|
||||
schema_path,
|
||||
parent,
|
||||
)
|
||||
self.message = message
|
||||
self.path = self.relative_path = deque(path)
|
||||
self.schema_path = self.relative_schema_path = deque(schema_path)
|
||||
self.context = list(context)
|
||||
self.cause = self.__cause__ = cause
|
||||
self.validator = validator
|
||||
self.validator_value = validator_value
|
||||
self.instance = instance
|
||||
self.schema = schema
|
||||
self.parent = parent
|
||||
self._type_checker = type_checker
|
||||
|
||||
for error in context:
|
||||
error.parent = self
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}: {self.message!r}>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
essential_for_verbose = (
|
||||
self.validator, self.validator_value, self.instance, self.schema,
|
||||
)
|
||||
if any(m is _unset for m in essential_for_verbose):
|
||||
return self.message
|
||||
|
||||
schema_path = _utils.format_as_index(
|
||||
container=self._word_for_schema_in_error_message,
|
||||
indices=list(self.relative_schema_path)[:-1],
|
||||
)
|
||||
instance_path = _utils.format_as_index(
|
||||
container=self._word_for_instance_in_error_message,
|
||||
indices=self.relative_path,
|
||||
)
|
||||
prefix = 16 * " "
|
||||
|
||||
return dedent(
|
||||
f"""\
|
||||
{self.message}
|
||||
|
||||
Failed validating {self.validator!r} in {schema_path}:
|
||||
{_pretty(self.schema, prefix=prefix)}
|
||||
|
||||
On {instance_path}:
|
||||
{_pretty(self.instance, prefix=prefix)}
|
||||
""".rstrip(),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def create_from(cls, other: _Error):
|
||||
return cls(**other._contents())
|
||||
|
||||
@property
|
||||
def absolute_path(self) -> Sequence[str | int]:
|
||||
parent = self.parent
|
||||
if parent is None:
|
||||
return self.relative_path
|
||||
|
||||
path = deque(self.relative_path)
|
||||
path.extendleft(reversed(parent.absolute_path))
|
||||
return path
|
||||
|
||||
@property
|
||||
def absolute_schema_path(self) -> Sequence[str | int]:
|
||||
parent = self.parent
|
||||
if parent is None:
|
||||
return self.relative_schema_path
|
||||
|
||||
path = deque(self.relative_schema_path)
|
||||
path.extendleft(reversed(parent.absolute_schema_path))
|
||||
return path
|
||||
|
||||
@property
|
||||
def json_path(self) -> str:
|
||||
path = "$"
|
||||
for elem in self.absolute_path:
|
||||
if isinstance(elem, int):
|
||||
path += "[" + str(elem) + "]"
|
||||
else:
|
||||
path += "." + elem
|
||||
return path
|
||||
|
||||
def _set(
|
||||
self,
|
||||
type_checker: _types.TypeChecker | None = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if type_checker is not None and self._type_checker is _unset:
|
||||
self._type_checker = type_checker
|
||||
|
||||
for k, v in kwargs.items():
|
||||
if getattr(self, k) is _unset:
|
||||
setattr(self, k, v)
|
||||
|
||||
def _contents(self):
|
||||
attrs = (
|
||||
"message", "cause", "context", "validator", "validator_value",
|
||||
"path", "schema_path", "instance", "schema", "parent",
|
||||
)
|
||||
return {attr: getattr(self, attr) for attr in attrs}
|
||||
|
||||
def _matches_type(self) -> bool:
|
||||
try:
|
||||
# We ignore this as we want to simply crash if this happens
|
||||
expected = self.schema["type"] # type: ignore[index]
|
||||
except (KeyError, TypeError):
|
||||
return False
|
||||
|
||||
if isinstance(expected, str):
|
||||
return self._type_checker.is_type(self.instance, expected)
|
||||
|
||||
return any(
|
||||
self._type_checker.is_type(self.instance, expected_type)
|
||||
for expected_type in expected
|
||||
)
|
||||
|
||||
|
||||
class ValidationError(_Error):
|
||||
"""
|
||||
An instance was invalid under a provided schema.
|
||||
"""
|
||||
|
||||
_word_for_schema_in_error_message = "schema"
|
||||
_word_for_instance_in_error_message = "instance"
|
||||
|
||||
|
||||
class SchemaError(_Error):
|
||||
"""
|
||||
A schema was invalid under its corresponding metaschema.
|
||||
"""
|
||||
|
||||
_word_for_schema_in_error_message = "metaschema"
|
||||
_word_for_instance_in_error_message = "schema"
|
||||
|
||||
|
||||
@define(slots=False)
|
||||
class _RefResolutionError(Exception):
|
||||
"""
|
||||
A ref could not be resolved.
|
||||
"""
|
||||
|
||||
_DEPRECATION_MESSAGE = (
|
||||
"jsonschema.exceptions.RefResolutionError is deprecated as of version "
|
||||
"4.18.0. If you wish to catch potential reference resolution errors, "
|
||||
"directly catch referencing.exceptions.Unresolvable."
|
||||
)
|
||||
|
||||
_cause: Exception
|
||||
|
||||
def __eq__(self, other):
|
||||
if self.__class__ is not other.__class__:
|
||||
return NotImplemented # pragma: no cover -- uncovered but deprecated # noqa: E501
|
||||
return self._cause == other._cause
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self._cause)
|
||||
|
||||
|
||||
class _WrappedReferencingError(_RefResolutionError, _Unresolvable): # pragma: no cover -- partially uncovered but to be removed # noqa: E501
|
||||
def __init__(self, cause: _Unresolvable):
|
||||
object.__setattr__(self, "_wrapped", cause)
|
||||
|
||||
def __eq__(self, other):
|
||||
if other.__class__ is self.__class__:
|
||||
return self._wrapped == other._wrapped
|
||||
elif other.__class__ is self._wrapped.__class__:
|
||||
return self._wrapped == other
|
||||
return NotImplemented
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self._wrapped, attr)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._wrapped)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WrappedReferencingError {self._wrapped!r}>"
|
||||
|
||||
def __str__(self):
|
||||
return f"{self._wrapped.__class__.__name__}: {self._wrapped}"
|
||||
|
||||
|
||||
class UndefinedTypeCheck(Exception):
|
||||
"""
|
||||
A type checker was asked to check a type it did not have registered.
|
||||
"""
|
||||
|
||||
def __init__(self, type: str) -> None:
|
||||
self.type = type
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"Type {self.type!r} is unknown to this type checker"
|
||||
|
||||
|
||||
class UnknownType(Exception):
|
||||
"""
|
||||
A validator was asked to validate an instance against an unknown type.
|
||||
"""
|
||||
|
||||
def __init__(self, type, instance, schema):
|
||||
self.type = type
|
||||
self.instance = instance
|
||||
self.schema = schema
|
||||
|
||||
def __str__(self):
|
||||
prefix = 16 * " "
|
||||
|
||||
return dedent(
|
||||
f"""\
|
||||
Unknown type {self.type!r} for validator with schema:
|
||||
{_pretty(self.schema, prefix=prefix)}
|
||||
|
||||
While checking instance:
|
||||
{_pretty(self.instance, prefix=prefix)}
|
||||
""".rstrip(),
|
||||
)
|
||||
|
||||
|
||||
class FormatError(Exception):
|
||||
"""
|
||||
Validating a format failed.
|
||||
"""
|
||||
|
||||
def __init__(self, message, cause=None):
|
||||
super().__init__(message, cause)
|
||||
self.message = message
|
||||
self.cause = self.__cause__ = cause
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
|
||||
|
||||
class ErrorTree:
|
||||
"""
|
||||
ErrorTrees make it easier to check which validations failed.
|
||||
"""
|
||||
|
||||
_instance = _unset
|
||||
|
||||
def __init__(self, errors: Iterable[ValidationError] = ()):
|
||||
self.errors: MutableMapping[str, ValidationError] = {}
|
||||
self._contents: Mapping[str, ErrorTree] = defaultdict(self.__class__)
|
||||
|
||||
for error in errors:
|
||||
container = self
|
||||
for element in error.path:
|
||||
container = container[element]
|
||||
container.errors[error.validator] = error
|
||||
|
||||
container._instance = error.instance
|
||||
|
||||
def __contains__(self, index: str | int):
|
||||
"""
|
||||
Check whether ``instance[index]`` has any errors.
|
||||
"""
|
||||
return index in self._contents
|
||||
|
||||
def __getitem__(self, index):
|
||||
"""
|
||||
Retrieve the child tree one level down at the given ``index``.
|
||||
|
||||
If the index is not in the instance that this tree corresponds
|
||||
to and is not known by this tree, whatever error would be raised
|
||||
by ``instance.__getitem__`` will be propagated (usually this is
|
||||
some subclass of `LookupError`.
|
||||
"""
|
||||
if self._instance is not _unset and index not in self:
|
||||
self._instance[index]
|
||||
return self._contents[index]
|
||||
|
||||
def __setitem__(self, index: str | int, value: ErrorTree):
|
||||
"""
|
||||
Add an error to the tree at the given ``index``.
|
||||
|
||||
.. deprecated:: v4.20.0
|
||||
|
||||
Setting items on an `ErrorTree` is deprecated without replacement.
|
||||
To populate a tree, provide all of its sub-errors when you
|
||||
construct the tree.
|
||||
"""
|
||||
warnings.warn(
|
||||
"ErrorTree.__setitem__ is deprecated without replacement.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self._contents[index] = value # type: ignore[index]
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
Iterate (non-recursively) over the indices in the instance with errors.
|
||||
"""
|
||||
return iter(self._contents)
|
||||
|
||||
def __len__(self):
|
||||
"""
|
||||
Return the `total_errors`.
|
||||
"""
|
||||
return self.total_errors
|
||||
|
||||
def __repr__(self):
|
||||
total = len(self)
|
||||
errors = "error" if total == 1 else "errors"
|
||||
return f"<{self.__class__.__name__} ({total} total {errors})>"
|
||||
|
||||
@property
|
||||
def total_errors(self):
|
||||
"""
|
||||
The total number of errors in the entire tree, including children.
|
||||
"""
|
||||
child_errors = sum(len(tree) for _, tree in self._contents.items())
|
||||
return len(self.errors) + child_errors
|
||||
|
||||
|
||||
def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
|
||||
"""
|
||||
Create a key function that can be used to sort errors by relevance.
|
||||
|
||||
Arguments:
|
||||
weak (set):
|
||||
a collection of validation keywords to consider to be
|
||||
"weak". If there are two errors at the same level of the
|
||||
instance and one is in the set of weak validation keywords,
|
||||
the other error will take priority. By default, :kw:`anyOf`
|
||||
and :kw:`oneOf` are considered weak keywords and will be
|
||||
superseded by other same-level validation errors.
|
||||
|
||||
strong (set):
|
||||
a collection of validation keywords to consider to be
|
||||
"strong"
|
||||
|
||||
"""
|
||||
|
||||
def relevance(error):
|
||||
validator = error.validator
|
||||
return ( # prefer errors which are ...
|
||||
-len(error.path), # 'deeper' and thereby more specific
|
||||
error.path, # earlier (for sibling errors)
|
||||
validator not in weak, # for a non-low-priority keyword
|
||||
validator in strong, # for a high priority keyword
|
||||
not error._matches_type(), # at least match the instance's type
|
||||
) # otherwise we'll treat them the same
|
||||
|
||||
return relevance
|
||||
|
||||
|
||||
relevance = by_relevance()
|
||||
"""
|
||||
A key function (e.g. to use with `sorted`) which sorts errors by relevance.
|
||||
|
||||
Example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
sorted(validator.iter_errors(12), key=jsonschema.exceptions.relevance)
|
||||
"""
|
||||
|
||||
|
||||
def best_match(errors, key=relevance):
|
||||
"""
|
||||
Try to find an error that appears to be the best match among given errors.
|
||||
|
||||
In general, errors that are higher up in the instance (i.e. for which
|
||||
`ValidationError.path` is shorter) are considered better matches,
|
||||
since they indicate "more" is wrong with the instance.
|
||||
|
||||
If the resulting match is either :kw:`oneOf` or :kw:`anyOf`, the
|
||||
*opposite* assumption is made -- i.e. the deepest error is picked,
|
||||
since these keywords only need to match once, and any other errors
|
||||
may not be relevant.
|
||||
|
||||
Arguments:
|
||||
errors (collections.abc.Iterable):
|
||||
|
||||
the errors to select from. Do not provide a mixture of
|
||||
errors from different validation attempts (i.e. from
|
||||
different instances or schemas), since it won't produce
|
||||
sensical output.
|
||||
|
||||
key (collections.abc.Callable):
|
||||
|
||||
the key to use when sorting errors. See `relevance` and
|
||||
transitively `by_relevance` for more details (the default is
|
||||
to sort with the defaults of that function). Changing the
|
||||
default is only useful if you want to change the function
|
||||
that rates errors but still want the error context descent
|
||||
done by this function.
|
||||
|
||||
Returns:
|
||||
the best matching error, or ``None`` if the iterable was empty
|
||||
|
||||
.. note::
|
||||
|
||||
This function is a heuristic. Its return value may change for a given
|
||||
set of inputs from version to version if better heuristics are added.
|
||||
|
||||
"""
|
||||
errors = iter(errors)
|
||||
best = next(errors, None)
|
||||
if best is None:
|
||||
return
|
||||
best = max(itertools.chain([best], errors), key=key)
|
||||
|
||||
while best.context:
|
||||
# Calculate the minimum via nsmallest, because we don't recurse if
|
||||
# all nested errors have the same relevance (i.e. if min == max == all)
|
||||
smallest = heapq.nsmallest(2, best.context, key=key)
|
||||
if len(smallest) == 2 and key(smallest[0]) == key(smallest[1]): # noqa: PLR2004
|
||||
return best
|
||||
best = smallest[0]
|
||||
return best
|
||||
236
.venv/lib/python3.10/site-packages/jsonschema/protocols.py
Normal file
236
.venv/lib/python3.10/site-packages/jsonschema/protocols.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""
|
||||
typing.Protocol classes for jsonschema interfaces.
|
||||
"""
|
||||
|
||||
# for reference material on Protocols, see
|
||||
# https://www.python.org/dev/peps/pep-0544/
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
ClassVar,
|
||||
Iterable,
|
||||
Protocol,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
# in order for Sphinx to resolve references accurately from type annotations,
|
||||
# it needs to see names like `jsonschema.TypeChecker`
|
||||
# therefore, only import at type-checking time (to avoid circular references),
|
||||
# but use `jsonschema` for any types which will otherwise not be resolvable
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
import referencing.jsonschema
|
||||
|
||||
from jsonschema import _typing
|
||||
from jsonschema.exceptions import ValidationError
|
||||
import jsonschema
|
||||
import jsonschema.validators
|
||||
|
||||
# For code authors working on the validator protocol, these are the three
|
||||
# use-cases which should be kept in mind:
|
||||
#
|
||||
# 1. As a protocol class, it can be used in type annotations to describe the
|
||||
# available methods and attributes of a validator
|
||||
# 2. It is the source of autodoc for the validator documentation
|
||||
# 3. It is runtime_checkable, meaning that it can be used in isinstance()
|
||||
# checks.
|
||||
#
|
||||
# Since protocols are not base classes, isinstance() checking is limited in
|
||||
# its capabilities. See docs on runtime_checkable for detail
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class Validator(Protocol):
|
||||
"""
|
||||
The protocol to which all validator classes adhere.
|
||||
|
||||
Arguments:
|
||||
|
||||
schema:
|
||||
|
||||
The schema that the validator object will validate with.
|
||||
It is assumed to be valid, and providing
|
||||
an invalid schema can lead to undefined behavior. See
|
||||
`Validator.check_schema` to validate a schema first.
|
||||
|
||||
registry:
|
||||
|
||||
a schema registry that will be used for looking up JSON references
|
||||
|
||||
resolver:
|
||||
|
||||
a resolver that will be used to resolve :kw:`$ref`
|
||||
properties (JSON references). If unprovided, one will be created.
|
||||
|
||||
.. deprecated:: v4.18.0
|
||||
|
||||
`RefResolver <_RefResolver>` has been deprecated in favor of
|
||||
`referencing`, and with it, this argument.
|
||||
|
||||
format_checker:
|
||||
|
||||
if provided, a checker which will be used to assert about
|
||||
:kw:`format` properties present in the schema. If unprovided,
|
||||
*no* format validation is done, and the presence of format
|
||||
within schemas is strictly informational. Certain formats
|
||||
require additional packages to be installed in order to assert
|
||||
against instances. Ensure you've installed `jsonschema` with
|
||||
its `extra (optional) dependencies <index:extras>` when
|
||||
invoking ``pip``.
|
||||
|
||||
.. deprecated:: v4.12.0
|
||||
|
||||
Subclassing validator classes now explicitly warns this is not part of
|
||||
their public API.
|
||||
|
||||
"""
|
||||
|
||||
#: An object representing the validator's meta schema (the schema that
|
||||
#: describes valid schemas in the given version).
|
||||
META_SCHEMA: ClassVar[Mapping]
|
||||
|
||||
#: A mapping of validation keywords (`str`\s) to functions that
|
||||
#: validate the keyword with that name. For more information see
|
||||
#: `creating-validators`.
|
||||
VALIDATORS: ClassVar[Mapping]
|
||||
|
||||
#: A `jsonschema.TypeChecker` that will be used when validating
|
||||
#: :kw:`type` keywords in JSON schemas.
|
||||
TYPE_CHECKER: ClassVar[jsonschema.TypeChecker]
|
||||
|
||||
#: A `jsonschema.FormatChecker` that will be used when validating
|
||||
#: :kw:`format` keywords in JSON schemas.
|
||||
FORMAT_CHECKER: ClassVar[jsonschema.FormatChecker]
|
||||
|
||||
#: A function which given a schema returns its ID.
|
||||
ID_OF: _typing.id_of
|
||||
|
||||
#: The schema that will be used to validate instances
|
||||
schema: Mapping | bool
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
schema: Mapping | bool,
|
||||
registry: referencing.jsonschema.SchemaRegistry,
|
||||
format_checker: jsonschema.FormatChecker | None = None,
|
||||
) -> None:
|
||||
...
|
||||
|
||||
@classmethod
|
||||
def check_schema(cls, schema: Mapping | bool) -> None:
|
||||
"""
|
||||
Validate the given schema against the validator's `META_SCHEMA`.
|
||||
|
||||
Raises:
|
||||
|
||||
`jsonschema.exceptions.SchemaError`:
|
||||
|
||||
if the schema is invalid
|
||||
|
||||
"""
|
||||
|
||||
def is_type(self, instance: Any, type: str) -> bool:
|
||||
"""
|
||||
Check if the instance is of the given (JSON Schema) type.
|
||||
|
||||
Arguments:
|
||||
|
||||
instance:
|
||||
|
||||
the value to check
|
||||
|
||||
type:
|
||||
|
||||
the name of a known (JSON Schema) type
|
||||
|
||||
Returns:
|
||||
|
||||
whether the instance is of the given type
|
||||
|
||||
Raises:
|
||||
|
||||
`jsonschema.exceptions.UnknownType`:
|
||||
|
||||
if ``type`` is not a known type
|
||||
|
||||
"""
|
||||
|
||||
def is_valid(self, instance: Any) -> bool:
|
||||
"""
|
||||
Check if the instance is valid under the current `schema`.
|
||||
|
||||
Returns:
|
||||
|
||||
whether the instance is valid or not
|
||||
|
||||
>>> schema = {"maxItems" : 2}
|
||||
>>> Draft202012Validator(schema).is_valid([2, 3, 4])
|
||||
False
|
||||
|
||||
"""
|
||||
|
||||
def iter_errors(self, instance: Any) -> Iterable[ValidationError]:
|
||||
r"""
|
||||
Lazily yield each of the validation errors in the given instance.
|
||||
|
||||
>>> schema = {
|
||||
... "type" : "array",
|
||||
... "items" : {"enum" : [1, 2, 3]},
|
||||
... "maxItems" : 2,
|
||||
... }
|
||||
>>> v = Draft202012Validator(schema)
|
||||
>>> for error in sorted(v.iter_errors([2, 3, 4]), key=str):
|
||||
... print(error.message)
|
||||
4 is not one of [1, 2, 3]
|
||||
[2, 3, 4] is too long
|
||||
|
||||
.. deprecated:: v4.0.0
|
||||
|
||||
Calling this function with a second schema argument is deprecated.
|
||||
Use `Validator.evolve` instead.
|
||||
"""
|
||||
|
||||
def validate(self, instance: Any) -> None:
|
||||
"""
|
||||
Check if the instance is valid under the current `schema`.
|
||||
|
||||
Raises:
|
||||
|
||||
`jsonschema.exceptions.ValidationError`:
|
||||
|
||||
if the instance is invalid
|
||||
|
||||
>>> schema = {"maxItems" : 2}
|
||||
>>> Draft202012Validator(schema).validate([2, 3, 4])
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValidationError: [2, 3, 4] is too long
|
||||
|
||||
"""
|
||||
|
||||
def evolve(self, **kwargs) -> Validator:
|
||||
"""
|
||||
Create a new validator like this one, but with given changes.
|
||||
|
||||
Preserves all other attributes, so can be used to e.g. create a
|
||||
validator with a different schema but with the same :kw:`$ref`
|
||||
resolution behavior.
|
||||
|
||||
>>> validator = Draft202012Validator({})
|
||||
>>> validator.evolve(schema={"type": "number"})
|
||||
Draft202012Validator(schema={'type': 'number'}, format_checker=None)
|
||||
|
||||
The returned object satisfies the validator protocol, but may not
|
||||
be of the same concrete class! In particular this occurs
|
||||
when a :kw:`$ref` occurs to a schema with a different
|
||||
:kw:`$schema` than this one (i.e. for a different draft).
|
||||
|
||||
>>> validator.evolve(
|
||||
... schema={"$schema": Draft7Validator.META_SCHEMA["$id"]}
|
||||
... )
|
||||
Draft7Validator(schema=..., format_checker=None)
|
||||
"""
|
||||
276
.venv/lib/python3.10/site-packages/jsonschema/tests/_suite.py
Normal file
276
.venv/lib/python3.10/site-packages/jsonschema/tests/_suite.py
Normal file
@@ -0,0 +1,276 @@
|
||||
"""
|
||||
Python representations of the JSON Schema Test Suite tests.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from contextlib import suppress
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from attrs import field, frozen
|
||||
from referencing import Registry
|
||||
import referencing.jsonschema
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Mapping, Sequence
|
||||
|
||||
import pyperf
|
||||
|
||||
from jsonschema.validators import _VALIDATORS
|
||||
import jsonschema
|
||||
|
||||
_DELIMITERS = re.compile(r"[\W\- ]+")
|
||||
|
||||
|
||||
def _find_suite():
|
||||
root = os.environ.get("JSON_SCHEMA_TEST_SUITE")
|
||||
if root is not None:
|
||||
return Path(root)
|
||||
|
||||
root = Path(jsonschema.__file__).parent.parent / "json"
|
||||
if not root.is_dir(): # pragma: no cover
|
||||
raise ValueError(
|
||||
(
|
||||
"Can't find the JSON-Schema-Test-Suite directory. "
|
||||
"Set the 'JSON_SCHEMA_TEST_SUITE' environment "
|
||||
"variable or run the tests from alongside a checkout "
|
||||
"of the suite."
|
||||
),
|
||||
)
|
||||
return root
|
||||
|
||||
|
||||
@frozen
|
||||
class Suite:
|
||||
|
||||
_root: Path = field(factory=_find_suite)
|
||||
_remotes: referencing.jsonschema.SchemaRegistry = field(init=False)
|
||||
|
||||
def __attrs_post_init__(self):
|
||||
jsonschema_suite = self._root.joinpath("bin", "jsonschema_suite")
|
||||
argv = [sys.executable, str(jsonschema_suite), "remotes"]
|
||||
remotes = subprocess.check_output(argv).decode("utf-8")
|
||||
|
||||
resources = json.loads(remotes)
|
||||
|
||||
li = "http://localhost:1234/locationIndependentIdentifierPre2019.json"
|
||||
li4 = "http://localhost:1234/locationIndependentIdentifierDraft4.json"
|
||||
|
||||
registry = Registry().with_resources(
|
||||
[
|
||||
(
|
||||
li,
|
||||
referencing.jsonschema.DRAFT7.create_resource(
|
||||
contents=resources.pop(li),
|
||||
),
|
||||
),
|
||||
(
|
||||
li4,
|
||||
referencing.jsonschema.DRAFT4.create_resource(
|
||||
contents=resources.pop(li4),
|
||||
),
|
||||
),
|
||||
],
|
||||
).with_contents(
|
||||
resources.items(),
|
||||
default_specification=referencing.jsonschema.DRAFT202012,
|
||||
)
|
||||
object.__setattr__(self, "_remotes", registry)
|
||||
|
||||
def benchmark(self, runner: pyperf.Runner): # pragma: no cover
|
||||
for name, Validator in _VALIDATORS.items():
|
||||
self.version(name=name).benchmark(
|
||||
runner=runner,
|
||||
Validator=Validator,
|
||||
)
|
||||
|
||||
def version(self, name) -> Version:
|
||||
return Version(
|
||||
name=name,
|
||||
path=self._root / "tests" / name,
|
||||
remotes=self._remotes,
|
||||
)
|
||||
|
||||
|
||||
@frozen
|
||||
class Version:
|
||||
|
||||
_path: Path
|
||||
_remotes: referencing.jsonschema.SchemaRegistry
|
||||
|
||||
name: str
|
||||
|
||||
def benchmark(self, **kwargs): # pragma: no cover
|
||||
for case in self.cases():
|
||||
case.benchmark(**kwargs)
|
||||
|
||||
def cases(self) -> Iterable[_Case]:
|
||||
return self._cases_in(paths=self._path.glob("*.json"))
|
||||
|
||||
def format_cases(self) -> Iterable[_Case]:
|
||||
return self._cases_in(paths=self._path.glob("optional/format/*.json"))
|
||||
|
||||
def optional_cases_of(self, name: str) -> Iterable[_Case]:
|
||||
return self._cases_in(paths=[self._path / "optional" / f"{name}.json"])
|
||||
|
||||
def to_unittest_testcase(self, *groups, **kwargs):
|
||||
name = kwargs.pop("name", "Test" + self.name.title().replace("-", ""))
|
||||
methods = {
|
||||
method.__name__: method
|
||||
for method in (
|
||||
test.to_unittest_method(**kwargs)
|
||||
for group in groups
|
||||
for case in group
|
||||
for test in case.tests
|
||||
)
|
||||
}
|
||||
cls = type(name, (unittest.TestCase,), methods)
|
||||
|
||||
# We're doing crazy things, so if they go wrong, like a function
|
||||
# behaving differently on some other interpreter, just make them
|
||||
# not happen.
|
||||
with suppress(Exception):
|
||||
cls.__module__ = _someone_save_us_the_module_of_the_caller()
|
||||
|
||||
return cls
|
||||
|
||||
def _cases_in(self, paths: Iterable[Path]) -> Iterable[_Case]:
|
||||
for path in paths:
|
||||
for case in json.loads(path.read_text(encoding="utf-8")):
|
||||
yield _Case.from_dict(
|
||||
case,
|
||||
version=self,
|
||||
subject=path.stem,
|
||||
remotes=self._remotes,
|
||||
)
|
||||
|
||||
|
||||
@frozen
|
||||
class _Case:
|
||||
|
||||
version: Version
|
||||
|
||||
subject: str
|
||||
description: str
|
||||
schema: Mapping[str, Any] | bool
|
||||
tests: list[_Test]
|
||||
comment: str | None = None
|
||||
specification: Sequence[dict[str, str]] = ()
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data, remotes, **kwargs):
|
||||
data.update(kwargs)
|
||||
tests = [
|
||||
_Test(
|
||||
version=data["version"],
|
||||
subject=data["subject"],
|
||||
case_description=data["description"],
|
||||
schema=data["schema"],
|
||||
remotes=remotes,
|
||||
**test,
|
||||
) for test in data.pop("tests")
|
||||
]
|
||||
return cls(tests=tests, **data)
|
||||
|
||||
def benchmark(self, runner: pyperf.Runner, **kwargs): # pragma: no cover
|
||||
for test in self.tests:
|
||||
runner.bench_func(
|
||||
test.fully_qualified_name,
|
||||
partial(test.validate_ignoring_errors, **kwargs),
|
||||
)
|
||||
|
||||
|
||||
@frozen(repr=False)
|
||||
class _Test:
|
||||
|
||||
version: Version
|
||||
|
||||
subject: str
|
||||
case_description: str
|
||||
description: str
|
||||
|
||||
data: Any
|
||||
schema: Mapping[str, Any] | bool
|
||||
|
||||
valid: bool
|
||||
|
||||
_remotes: referencing.jsonschema.SchemaRegistry
|
||||
|
||||
comment: str | None = None
|
||||
|
||||
def __repr__(self): # pragma: no cover
|
||||
return f"<Test {self.fully_qualified_name}>"
|
||||
|
||||
@property
|
||||
def fully_qualified_name(self): # pragma: no cover
|
||||
return " > ".join( # noqa: FLY002
|
||||
[
|
||||
self.version.name,
|
||||
self.subject,
|
||||
self.case_description,
|
||||
self.description,
|
||||
],
|
||||
)
|
||||
|
||||
def to_unittest_method(self, skip=lambda test: None, **kwargs):
|
||||
if self.valid:
|
||||
def fn(this):
|
||||
self.validate(**kwargs)
|
||||
else:
|
||||
def fn(this):
|
||||
with this.assertRaises(jsonschema.ValidationError):
|
||||
self.validate(**kwargs)
|
||||
|
||||
fn.__name__ = "_".join(
|
||||
[
|
||||
"test",
|
||||
_DELIMITERS.sub("_", self.subject),
|
||||
_DELIMITERS.sub("_", self.case_description),
|
||||
_DELIMITERS.sub("_", self.description),
|
||||
],
|
||||
)
|
||||
reason = skip(self)
|
||||
if reason is None or os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0":
|
||||
return fn
|
||||
elif os.environ.get("JSON_SCHEMA_EXPECTED_FAILURES", "0") != "0": # pragma: no cover # noqa: E501
|
||||
return unittest.expectedFailure(fn)
|
||||
else:
|
||||
return unittest.skip(reason)(fn)
|
||||
|
||||
def validate(self, Validator, **kwargs):
|
||||
Validator.check_schema(self.schema)
|
||||
validator = Validator(
|
||||
schema=self.schema,
|
||||
registry=self._remotes,
|
||||
**kwargs,
|
||||
)
|
||||
if os.environ.get("JSON_SCHEMA_DEBUG", "0") != "0": # pragma: no cover
|
||||
breakpoint() # noqa: T100
|
||||
validator.validate(instance=self.data)
|
||||
|
||||
def validate_ignoring_errors(self, Validator): # pragma: no cover
|
||||
with suppress(jsonschema.ValidationError):
|
||||
self.validate(Validator=Validator)
|
||||
|
||||
|
||||
def _someone_save_us_the_module_of_the_caller():
|
||||
"""
|
||||
The FQON of the module 2nd stack frames up from here.
|
||||
|
||||
This is intended to allow us to dynamically return test case classes that
|
||||
are indistinguishable from being defined in the module that wants them.
|
||||
|
||||
Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run
|
||||
the class that really is running.
|
||||
|
||||
Save us all, this is all so so so so so terrible.
|
||||
"""
|
||||
|
||||
return sys._getframe(2).f_globals["__name__"]
|
||||
@@ -0,0 +1,50 @@
|
||||
"""
|
||||
Fuzzing setup for OSS-Fuzz.
|
||||
|
||||
See https://github.com/google/oss-fuzz/tree/master/projects/jsonschema for the
|
||||
other half of the setup here.
|
||||
"""
|
||||
import sys
|
||||
|
||||
from hypothesis import given, strategies
|
||||
|
||||
import jsonschema
|
||||
|
||||
PRIM = strategies.one_of(
|
||||
strategies.booleans(),
|
||||
strategies.integers(),
|
||||
strategies.floats(allow_nan=False, allow_infinity=False),
|
||||
strategies.text(),
|
||||
)
|
||||
DICT = strategies.recursive(
|
||||
base=strategies.one_of(
|
||||
strategies.booleans(),
|
||||
strategies.dictionaries(strategies.text(), PRIM),
|
||||
),
|
||||
extend=lambda inner: strategies.dictionaries(strategies.text(), inner),
|
||||
)
|
||||
|
||||
|
||||
@given(obj1=DICT, obj2=DICT)
|
||||
def test_schemas(obj1, obj2):
|
||||
try:
|
||||
jsonschema.validate(instance=obj1, schema=obj2)
|
||||
except jsonschema.exceptions.ValidationError:
|
||||
pass
|
||||
except jsonschema.exceptions.SchemaError:
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
atheris.instrument_all()
|
||||
atheris.Setup(
|
||||
sys.argv,
|
||||
test_schemas.hypothesis.fuzz_one_input,
|
||||
enable_python_coverage=True,
|
||||
)
|
||||
atheris.Fuzz()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import atheris
|
||||
main()
|
||||
907
.venv/lib/python3.10/site-packages/jsonschema/tests/test_cli.py
Normal file
907
.venv/lib/python3.10/site-packages/jsonschema/tests/test_cli.py
Normal file
@@ -0,0 +1,907 @@
|
||||
from contextlib import redirect_stderr, redirect_stdout
|
||||
from importlib import metadata
|
||||
from io import StringIO
|
||||
from json import JSONDecodeError
|
||||
from pathlib import Path
|
||||
from textwrap import dedent
|
||||
from unittest import TestCase
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import warnings
|
||||
|
||||
from jsonschema import Draft4Validator, Draft202012Validator
|
||||
from jsonschema.exceptions import (
|
||||
SchemaError,
|
||||
ValidationError,
|
||||
_RefResolutionError,
|
||||
)
|
||||
from jsonschema.validators import _LATEST_VERSION, validate
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
from jsonschema import cli
|
||||
|
||||
|
||||
def fake_validator(*errors):
|
||||
errors = list(reversed(errors))
|
||||
|
||||
class FakeValidator:
|
||||
def __init__(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def iter_errors(self, instance):
|
||||
if errors:
|
||||
return errors.pop()
|
||||
return [] # pragma: no cover
|
||||
|
||||
@classmethod
|
||||
def check_schema(self, schema):
|
||||
pass
|
||||
|
||||
return FakeValidator
|
||||
|
||||
|
||||
def fake_open(all_contents):
|
||||
def open(path):
|
||||
contents = all_contents.get(path)
|
||||
if contents is None:
|
||||
raise FileNotFoundError(path)
|
||||
return StringIO(contents)
|
||||
return open
|
||||
|
||||
|
||||
def _message_for(non_json):
|
||||
try:
|
||||
json.loads(non_json)
|
||||
except JSONDecodeError as error:
|
||||
return str(error)
|
||||
else: # pragma: no cover
|
||||
raise RuntimeError("Tried and failed to capture a JSON dump error.")
|
||||
|
||||
|
||||
class TestCLI(TestCase):
|
||||
def run_cli(
|
||||
self, argv, files=None, stdin=StringIO(), exit_code=0, **override,
|
||||
):
|
||||
arguments = cli.parse_args(argv)
|
||||
arguments.update(override)
|
||||
|
||||
self.assertFalse(hasattr(cli, "open"))
|
||||
cli.open = fake_open(files or {})
|
||||
try:
|
||||
stdout, stderr = StringIO(), StringIO()
|
||||
actual_exit_code = cli.run(
|
||||
arguments,
|
||||
stdin=stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
finally:
|
||||
del cli.open
|
||||
|
||||
self.assertEqual(
|
||||
actual_exit_code, exit_code, msg=dedent(
|
||||
f"""
|
||||
Expected an exit code of {exit_code} != {actual_exit_code}.
|
||||
|
||||
stdout: {stdout.getvalue()}
|
||||
|
||||
stderr: {stderr.getvalue()}
|
||||
""",
|
||||
),
|
||||
)
|
||||
return stdout.getvalue(), stderr.getvalue()
|
||||
|
||||
def assertOutputs(self, stdout="", stderr="", **kwargs):
|
||||
self.assertEqual(
|
||||
self.run_cli(**kwargs),
|
||||
(dedent(stdout), dedent(stderr)),
|
||||
)
|
||||
|
||||
def test_invalid_instance(self):
|
||||
error = ValidationError("I am an error!", instance=12)
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(error.instance),
|
||||
),
|
||||
validator=fake_validator([error]),
|
||||
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="12: I am an error!\n",
|
||||
)
|
||||
|
||||
def test_invalid_instance_pretty_output(self):
|
||||
error = ValidationError("I am an error!", instance=12)
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(error.instance),
|
||||
),
|
||||
validator=fake_validator([error]),
|
||||
|
||||
argv=["-i", "some_instance", "--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[ValidationError]===(some_instance)===
|
||||
|
||||
I am an error!
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_instance_explicit_plain_output(self):
|
||||
error = ValidationError("I am an error!", instance=12)
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(error.instance),
|
||||
),
|
||||
validator=fake_validator([error]),
|
||||
|
||||
argv=["--output", "plain", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="12: I am an error!\n",
|
||||
)
|
||||
|
||||
def test_invalid_instance_multiple_errors(self):
|
||||
instance = 12
|
||||
first = ValidationError("First error", instance=instance)
|
||||
second = ValidationError("Second error", instance=instance)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(instance),
|
||||
),
|
||||
validator=fake_validator([first, second]),
|
||||
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: First error
|
||||
12: Second error
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_instance_multiple_errors_pretty_output(self):
|
||||
instance = 12
|
||||
first = ValidationError("First error", instance=instance)
|
||||
second = ValidationError("Second error", instance=instance)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_instance=json.dumps(instance),
|
||||
),
|
||||
validator=fake_validator([first, second]),
|
||||
|
||||
argv=["-i", "some_instance", "--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[ValidationError]===(some_instance)===
|
||||
|
||||
First error
|
||||
-----------------------------
|
||||
===[ValidationError]===(some_instance)===
|
||||
|
||||
Second error
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_multiple_invalid_instances(self):
|
||||
first_instance = 12
|
||||
first_errors = [
|
||||
ValidationError("An error", instance=first_instance),
|
||||
ValidationError("Another error", instance=first_instance),
|
||||
]
|
||||
second_instance = "foo"
|
||||
second_errors = [ValidationError("BOOM", instance=second_instance)]
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_first_instance=json.dumps(first_instance),
|
||||
some_second_instance=json.dumps(second_instance),
|
||||
),
|
||||
validator=fake_validator(first_errors, second_errors),
|
||||
|
||||
argv=[
|
||||
"-i", "some_first_instance",
|
||||
"-i", "some_second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: An error
|
||||
12: Another error
|
||||
foo: BOOM
|
||||
""",
|
||||
)
|
||||
|
||||
def test_multiple_invalid_instances_pretty_output(self):
|
||||
first_instance = 12
|
||||
first_errors = [
|
||||
ValidationError("An error", instance=first_instance),
|
||||
ValidationError("Another error", instance=first_instance),
|
||||
]
|
||||
second_instance = "foo"
|
||||
second_errors = [ValidationError("BOOM", instance=second_instance)]
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_first_instance=json.dumps(first_instance),
|
||||
some_second_instance=json.dumps(second_instance),
|
||||
),
|
||||
validator=fake_validator(first_errors, second_errors),
|
||||
|
||||
argv=[
|
||||
"--output", "pretty",
|
||||
"-i", "some_first_instance",
|
||||
"-i", "some_second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[ValidationError]===(some_first_instance)===
|
||||
|
||||
An error
|
||||
-----------------------------
|
||||
===[ValidationError]===(some_first_instance)===
|
||||
|
||||
Another error
|
||||
-----------------------------
|
||||
===[ValidationError]===(some_second_instance)===
|
||||
|
||||
BOOM
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_custom_error_format(self):
|
||||
first_instance = 12
|
||||
first_errors = [
|
||||
ValidationError("An error", instance=first_instance),
|
||||
ValidationError("Another error", instance=first_instance),
|
||||
]
|
||||
second_instance = "foo"
|
||||
second_errors = [ValidationError("BOOM", instance=second_instance)]
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"does not": "matter since it is stubbed"}',
|
||||
some_first_instance=json.dumps(first_instance),
|
||||
some_second_instance=json.dumps(second_instance),
|
||||
),
|
||||
validator=fake_validator(first_errors, second_errors),
|
||||
|
||||
argv=[
|
||||
"--error-format", ":{error.message}._-_.{error.instance}:",
|
||||
"-i", "some_first_instance",
|
||||
"-i", "some_second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr=":An error._-_.12::Another error._-_.12::BOOM._-_.foo:",
|
||||
)
|
||||
|
||||
def test_invalid_schema(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema='{"type": 12}'),
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: 12 is not valid under any of the given schemas
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_schema_pretty_output(self):
|
||||
schema = {"type": 12}
|
||||
|
||||
with self.assertRaises(SchemaError) as e:
|
||||
validate(schema=schema, instance="")
|
||||
error = str(e.exception)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=json.dumps(schema)),
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=(
|
||||
"===[SchemaError]===(some_schema)===\n\n"
|
||||
+ str(error)
|
||||
+ "\n-----------------------------\n"
|
||||
),
|
||||
)
|
||||
|
||||
def test_invalid_schema_multiple_errors(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema='{"type": 12, "items": 57}'),
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
57: 57 is not of type 'object', 'boolean'
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_schema_multiple_errors_pretty_output(self):
|
||||
schema = {"type": 12, "items": 57}
|
||||
|
||||
with self.assertRaises(SchemaError) as e:
|
||||
validate(schema=schema, instance="")
|
||||
error = str(e.exception)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=json.dumps(schema)),
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=(
|
||||
"===[SchemaError]===(some_schema)===\n\n"
|
||||
+ str(error)
|
||||
+ "\n-----------------------------\n"
|
||||
),
|
||||
)
|
||||
|
||||
def test_invalid_schema_with_invalid_instance(self):
|
||||
"""
|
||||
"Validating" an instance that's invalid under an invalid schema
|
||||
just shows the schema error.
|
||||
"""
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"type": 12, "minimum": 30}',
|
||||
some_instance="13",
|
||||
),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
12: 12 is not valid under any of the given schemas
|
||||
""",
|
||||
)
|
||||
|
||||
def test_invalid_schema_with_invalid_instance_pretty_output(self):
|
||||
instance, schema = 13, {"type": 12, "minimum": 30}
|
||||
|
||||
with self.assertRaises(SchemaError) as e:
|
||||
validate(schema=schema, instance=instance)
|
||||
error = str(e.exception)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema=json.dumps(schema),
|
||||
some_instance=json.dumps(instance),
|
||||
),
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=(
|
||||
"===[SchemaError]===(some_schema)===\n\n"
|
||||
+ str(error)
|
||||
+ "\n-----------------------------\n"
|
||||
),
|
||||
)
|
||||
|
||||
def test_invalid_instance_continues_with_the_rest(self):
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema='{"minimum": 30}',
|
||||
first_instance="not valid JSON!",
|
||||
second_instance="12",
|
||||
),
|
||||
argv=[
|
||||
"-i", "first_instance",
|
||||
"-i", "second_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
Failed to parse 'first_instance': {}
|
||||
12: 12 is less than the minimum of 30
|
||||
""".format(_message_for("not valid JSON!")),
|
||||
)
|
||||
|
||||
def test_custom_error_format_applies_to_schema_errors(self):
|
||||
instance, schema = 13, {"type": 12, "minimum": 30}
|
||||
|
||||
with self.assertRaises(SchemaError):
|
||||
validate(schema=schema, instance=instance)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=json.dumps(schema)),
|
||||
|
||||
argv=[
|
||||
"--error-format", ":{error.message}._-_.{error.instance}:",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr=":12 is not valid under any of the given schemas._-_.12:",
|
||||
)
|
||||
|
||||
def test_instance_is_invalid_JSON(self):
|
||||
instance = "not valid JSON!"
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance=instance),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse 'some_instance': {_message_for(instance)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_instance_is_invalid_JSON_pretty_output(self):
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(
|
||||
some_schema="{}",
|
||||
some_instance="not valid JSON!",
|
||||
),
|
||||
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(some_instance)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
self.assertNotIn("some_schema", stderr)
|
||||
|
||||
def test_instance_is_invalid_JSON_on_stdin(self):
|
||||
instance = "not valid JSON!"
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO(instance),
|
||||
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse <stdin>: {_message_for(instance)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_instance_is_invalid_JSON_on_stdin_pretty_output(self):
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO("not valid JSON!"),
|
||||
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(<stdin>)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
self.assertNotIn("some_schema", stderr)
|
||||
|
||||
def test_schema_is_invalid_JSON(self):
|
||||
schema = "not valid JSON!"
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema),
|
||||
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse 'some_schema': {_message_for(schema)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_schema_is_invalid_JSON_pretty_output(self):
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(some_schema="not valid JSON!"),
|
||||
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(some_schema)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
|
||||
def test_schema_and_instance_are_both_invalid_JSON(self):
|
||||
"""
|
||||
Only the schema error is reported, as we abort immediately.
|
||||
"""
|
||||
schema, instance = "not valid JSON!", "also not valid JSON!"
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance=instance),
|
||||
|
||||
argv=["some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr=f"""\
|
||||
Failed to parse 'some_schema': {_message_for(schema)}
|
||||
""",
|
||||
)
|
||||
|
||||
def test_schema_and_instance_are_both_invalid_JSON_pretty_output(self):
|
||||
"""
|
||||
Only the schema error is reported, as we abort immediately.
|
||||
"""
|
||||
stdout, stderr = self.run_cli(
|
||||
files=dict(
|
||||
some_schema="not valid JSON!",
|
||||
some_instance="also not valid JSON!",
|
||||
),
|
||||
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
self.assertIn(
|
||||
"(some_schema)===\n\nTraceback (most recent call last):\n",
|
||||
stderr,
|
||||
)
|
||||
self.assertNotIn("some_instance", stderr)
|
||||
|
||||
def test_instance_does_not_exist(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
argv=["-i", "nonexisting_instance", "some_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
'nonexisting_instance' does not exist.
|
||||
""",
|
||||
)
|
||||
|
||||
def test_instance_does_not_exist_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
argv=[
|
||||
"--output", "pretty",
|
||||
"-i", "nonexisting_instance",
|
||||
"some_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[FileNotFoundError]===(nonexisting_instance)===
|
||||
|
||||
'nonexisting_instance' does not exist.
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_schema_does_not_exist(self):
|
||||
self.assertOutputs(
|
||||
argv=["nonexisting_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="'nonexisting_schema' does not exist.\n",
|
||||
)
|
||||
|
||||
def test_schema_does_not_exist_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
argv=["--output", "pretty", "nonexisting_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[FileNotFoundError]===(nonexisting_schema)===
|
||||
|
||||
'nonexisting_schema' does not exist.
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_neither_instance_nor_schema_exist(self):
|
||||
self.assertOutputs(
|
||||
argv=["-i", "nonexisting_instance", "nonexisting_schema"],
|
||||
|
||||
exit_code=1,
|
||||
stderr="'nonexisting_schema' does not exist.\n",
|
||||
)
|
||||
|
||||
def test_neither_instance_nor_schema_exist_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
argv=[
|
||||
"--output", "pretty",
|
||||
"-i", "nonexisting_instance",
|
||||
"nonexisting_schema",
|
||||
],
|
||||
|
||||
exit_code=1,
|
||||
stderr="""\
|
||||
===[FileNotFoundError]===(nonexisting_schema)===
|
||||
|
||||
'nonexisting_schema' does not exist.
|
||||
-----------------------------
|
||||
""",
|
||||
)
|
||||
|
||||
def test_successful_validation(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
stdout="===[SUCCESS]===(some_instance)===\n",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_stdin(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO("{}"),
|
||||
argv=["some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_stdin_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}"),
|
||||
stdin=StringIO("{}"),
|
||||
argv=["--output", "pretty", "some_schema"],
|
||||
stdout="===[SUCCESS]===(<stdin>)===\n",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_just_the_schema(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_of_just_the_schema_pretty_output(self):
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema="{}", some_instance="{}"),
|
||||
argv=["--output", "pretty", "-i", "some_instance", "some_schema"],
|
||||
stdout="===[SUCCESS]===(some_instance)===\n",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_successful_validation_via_explicit_base_uri(self):
|
||||
ref_schema_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
ref_schema_file.close()
|
||||
self.addCleanup(os.remove, ref_schema_file.name)
|
||||
|
||||
ref_path = Path(ref_schema_file.name)
|
||||
ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}')
|
||||
|
||||
schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}'
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance="1"),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", ref_path.parent.as_uri() + "/",
|
||||
"some_schema",
|
||||
],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
def test_unsuccessful_validation_via_explicit_base_uri(self):
|
||||
ref_schema_file = tempfile.NamedTemporaryFile(delete=False)
|
||||
ref_schema_file.close()
|
||||
self.addCleanup(os.remove, ref_schema_file.name)
|
||||
|
||||
ref_path = Path(ref_schema_file.name)
|
||||
ref_path.write_text('{"definitions": {"num": {"type": "integer"}}}')
|
||||
|
||||
schema = f'{{"$ref": "{ref_path.name}#/definitions/num"}}'
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance='"1"'),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", ref_path.parent.as_uri() + "/",
|
||||
"some_schema",
|
||||
],
|
||||
exit_code=1,
|
||||
stdout="",
|
||||
stderr="1: '1' is not of type 'integer'\n",
|
||||
)
|
||||
|
||||
def test_nonexistent_file_with_explicit_base_uri(self):
|
||||
schema = '{"$ref": "someNonexistentFile.json#definitions/num"}'
|
||||
instance = "1"
|
||||
|
||||
with self.assertRaises(_RefResolutionError) as e:
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema=schema,
|
||||
some_instance=instance,
|
||||
),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", Path.cwd().as_uri(),
|
||||
"some_schema",
|
||||
],
|
||||
)
|
||||
error = str(e.exception)
|
||||
self.assertIn(f"{os.sep}someNonexistentFile.json'", error)
|
||||
|
||||
def test_invalid_explicit_base_uri(self):
|
||||
schema = '{"$ref": "foo.json#definitions/num"}'
|
||||
instance = "1"
|
||||
|
||||
with self.assertRaises(_RefResolutionError) as e:
|
||||
self.assertOutputs(
|
||||
files=dict(
|
||||
some_schema=schema,
|
||||
some_instance=instance,
|
||||
),
|
||||
argv=[
|
||||
"-i", "some_instance",
|
||||
"--base-uri", "not@UR1",
|
||||
"some_schema",
|
||||
],
|
||||
)
|
||||
error = str(e.exception)
|
||||
self.assertEqual(
|
||||
error, "unknown url type: 'foo.json'",
|
||||
)
|
||||
|
||||
def test_it_validates_using_the_latest_validator_when_unspecified(self):
|
||||
# There isn't a better way now I can think of to ensure that the
|
||||
# latest version was used, given that the call to validator_for
|
||||
# is hidden inside the CLI, so guard that that's the case, and
|
||||
# this test will have to be updated when versions change until
|
||||
# we can think of a better way to ensure this behavior.
|
||||
self.assertIs(Draft202012Validator, _LATEST_VERSION)
|
||||
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema='{"const": "check"}', some_instance='"a"'),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
exit_code=1,
|
||||
stdout="",
|
||||
stderr="a: 'check' was expected\n",
|
||||
)
|
||||
|
||||
def test_it_validates_using_draft7_when_specified(self):
|
||||
"""
|
||||
Specifically, `const` validation applies for Draft 7.
|
||||
"""
|
||||
schema = """
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"const": "check"
|
||||
}
|
||||
"""
|
||||
instance = '"foo"'
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance=instance),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
exit_code=1,
|
||||
stdout="",
|
||||
stderr="foo: 'check' was expected\n",
|
||||
)
|
||||
|
||||
def test_it_validates_using_draft4_when_specified(self):
|
||||
"""
|
||||
Specifically, `const` validation *does not* apply for Draft 4.
|
||||
"""
|
||||
schema = """
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
"const": "check"
|
||||
}
|
||||
"""
|
||||
instance = '"foo"'
|
||||
self.assertOutputs(
|
||||
files=dict(some_schema=schema, some_instance=instance),
|
||||
argv=["-i", "some_instance", "some_schema"],
|
||||
stdout="",
|
||||
stderr="",
|
||||
)
|
||||
|
||||
|
||||
class TestParser(TestCase):
|
||||
|
||||
FakeValidator = fake_validator()
|
||||
|
||||
def test_find_validator_by_fully_qualified_object_name(self):
|
||||
arguments = cli.parse_args(
|
||||
[
|
||||
"--validator",
|
||||
"jsonschema.tests.test_cli.TestParser.FakeValidator",
|
||||
"--instance", "mem://some/instance",
|
||||
"mem://some/schema",
|
||||
],
|
||||
)
|
||||
self.assertIs(arguments["validator"], self.FakeValidator)
|
||||
|
||||
def test_find_validator_in_jsonschema(self):
|
||||
arguments = cli.parse_args(
|
||||
[
|
||||
"--validator", "Draft4Validator",
|
||||
"--instance", "mem://some/instance",
|
||||
"mem://some/schema",
|
||||
],
|
||||
)
|
||||
self.assertIs(arguments["validator"], Draft4Validator)
|
||||
|
||||
def cli_output_for(self, *argv):
|
||||
stdout, stderr = StringIO(), StringIO()
|
||||
with redirect_stdout(stdout), redirect_stderr(stderr): # noqa: SIM117
|
||||
with self.assertRaises(SystemExit):
|
||||
cli.parse_args(argv)
|
||||
return stdout.getvalue(), stderr.getvalue()
|
||||
|
||||
def test_unknown_output(self):
|
||||
stdout, stderr = self.cli_output_for(
|
||||
"--output", "foo",
|
||||
"mem://some/schema",
|
||||
)
|
||||
self.assertIn("invalid choice: 'foo'", stderr)
|
||||
self.assertFalse(stdout)
|
||||
|
||||
def test_useless_error_format(self):
|
||||
stdout, stderr = self.cli_output_for(
|
||||
"--output", "pretty",
|
||||
"--error-format", "foo",
|
||||
"mem://some/schema",
|
||||
)
|
||||
self.assertIn(
|
||||
"--error-format can only be used with --output plain",
|
||||
stderr,
|
||||
)
|
||||
self.assertFalse(stdout)
|
||||
|
||||
|
||||
class TestCLIIntegration(TestCase):
|
||||
def test_license(self):
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, "-m", "pip", "show", "jsonschema"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
self.assertIn(b"License: MIT", output)
|
||||
|
||||
def test_version(self):
|
||||
version = subprocess.check_output(
|
||||
[sys.executable, "-W", "ignore", "-m", "jsonschema", "--version"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
version = version.decode("utf-8").strip()
|
||||
self.assertEqual(version, metadata.version("jsonschema"))
|
||||
|
||||
def test_no_arguments_shows_usage_notes(self):
|
||||
output = subprocess.check_output(
|
||||
[sys.executable, "-m", "jsonschema"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
output_for_help = subprocess.check_output(
|
||||
[sys.executable, "-m", "jsonschema", "--help"],
|
||||
stderr=subprocess.STDOUT,
|
||||
)
|
||||
self.assertEqual(output, output_for_help)
|
||||
@@ -0,0 +1,432 @@
|
||||
from contextlib import contextmanager
|
||||
from io import BytesIO
|
||||
from unittest import TestCase, mock
|
||||
import importlib.metadata
|
||||
import json
|
||||
import subprocess
|
||||
import sys
|
||||
import urllib.request
|
||||
|
||||
import referencing.exceptions
|
||||
|
||||
from jsonschema import FormatChecker, exceptions, protocols, validators
|
||||
|
||||
|
||||
class TestDeprecations(TestCase):
|
||||
def test_version(self):
|
||||
"""
|
||||
As of v4.0.0, __version__ is deprecated in favor of importlib.metadata.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.__version__ is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import __version__
|
||||
|
||||
self.assertEqual(__version__, importlib.metadata.version("jsonschema"))
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_validators_ErrorTree(self):
|
||||
"""
|
||||
As of v4.0.0, importing ErrorTree from jsonschema.validators is
|
||||
deprecated in favor of doing so from jsonschema.exceptions.
|
||||
"""
|
||||
|
||||
message = "Importing ErrorTree from jsonschema.validators is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema.validators import ErrorTree
|
||||
|
||||
self.assertEqual(ErrorTree, exceptions.ErrorTree)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_import_ErrorTree(self):
|
||||
"""
|
||||
As of v4.18.0, importing ErrorTree from the package root is
|
||||
deprecated in favor of doing so from jsonschema.exceptions.
|
||||
"""
|
||||
|
||||
message = "Importing ErrorTree directly from the jsonschema package "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import ErrorTree
|
||||
|
||||
self.assertEqual(ErrorTree, exceptions.ErrorTree)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_ErrorTree_setitem(self):
|
||||
"""
|
||||
As of v4.20.0, setting items on an ErrorTree is deprecated.
|
||||
"""
|
||||
|
||||
e = exceptions.ValidationError("some error", path=["foo"])
|
||||
tree = exceptions.ErrorTree()
|
||||
subtree = exceptions.ErrorTree(errors=[e])
|
||||
|
||||
message = "ErrorTree.__setitem__ is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
tree["foo"] = subtree
|
||||
|
||||
self.assertEqual(tree["foo"], subtree)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_import_FormatError(self):
|
||||
"""
|
||||
As of v4.18.0, importing FormatError from the package root is
|
||||
deprecated in favor of doing so from jsonschema.exceptions.
|
||||
"""
|
||||
|
||||
message = "Importing FormatError directly from the jsonschema package "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import FormatError
|
||||
|
||||
self.assertEqual(FormatError, exceptions.FormatError)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_import_Validator(self):
|
||||
"""
|
||||
As of v4.19.0, importing Validator from the package root is
|
||||
deprecated in favor of doing so from jsonschema.protocols.
|
||||
"""
|
||||
|
||||
message = "Importing Validator directly from the jsonschema package "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import Validator
|
||||
|
||||
self.assertEqual(Validator, protocols.Validator)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_validators_validators(self):
|
||||
"""
|
||||
As of v4.0.0, accessing jsonschema.validators.validators is
|
||||
deprecated.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.validators.validators is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
value = validators.validators
|
||||
|
||||
self.assertEqual(value, validators._VALIDATORS)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_validators_meta_schemas(self):
|
||||
"""
|
||||
As of v4.0.0, accessing jsonschema.validators.meta_schemas is
|
||||
deprecated.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.validators.meta_schemas is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
value = validators.meta_schemas
|
||||
|
||||
self.assertEqual(value, validators._META_SCHEMAS)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_RefResolver_in_scope(self):
|
||||
"""
|
||||
As of v4.0.0, RefResolver.in_scope is deprecated.
|
||||
"""
|
||||
|
||||
resolver = validators._RefResolver.from_schema({})
|
||||
message = "jsonschema.RefResolver.in_scope is deprecated "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w: # noqa: SIM117
|
||||
with resolver.in_scope("foo"):
|
||||
pass
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_Validator_is_valid_two_arguments(self):
|
||||
"""
|
||||
As of v4.0.0, calling is_valid with two arguments (to provide a
|
||||
different schema) is deprecated.
|
||||
"""
|
||||
|
||||
validator = validators.Draft7Validator({})
|
||||
message = "Passing a schema to Validator.is_valid is deprecated "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
result = validator.is_valid("foo", {"type": "number"})
|
||||
|
||||
self.assertFalse(result)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_Validator_iter_errors_two_arguments(self):
|
||||
"""
|
||||
As of v4.0.0, calling iter_errors with two arguments (to provide a
|
||||
different schema) is deprecated.
|
||||
"""
|
||||
|
||||
validator = validators.Draft7Validator({})
|
||||
message = "Passing a schema to Validator.iter_errors is deprecated "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
error, = validator.iter_errors("foo", {"type": "number"})
|
||||
|
||||
self.assertEqual(error.validator, "type")
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_Validator_resolver(self):
|
||||
"""
|
||||
As of v4.18.0, accessing Validator.resolver is deprecated.
|
||||
"""
|
||||
|
||||
validator = validators.Draft7Validator({})
|
||||
message = "Accessing Draft7Validator.resolver is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
self.assertIsInstance(validator.resolver, validators._RefResolver)
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_RefResolver(self):
|
||||
"""
|
||||
As of v4.18.0, RefResolver is fully deprecated.
|
||||
"""
|
||||
|
||||
message = "jsonschema.RefResolver is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import RefResolver
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema.validators import RefResolver # noqa: F401, F811
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_RefResolutionError(self):
|
||||
"""
|
||||
As of v4.18.0, RefResolutionError is deprecated in favor of directly
|
||||
catching errors from the referencing library.
|
||||
"""
|
||||
|
||||
message = "jsonschema.exceptions.RefResolutionError is deprecated"
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import RefResolutionError
|
||||
|
||||
self.assertEqual(RefResolutionError, exceptions._RefResolutionError)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema.exceptions import RefResolutionError
|
||||
|
||||
self.assertEqual(RefResolutionError, exceptions._RefResolutionError)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_catching_Unresolvable_directly(self):
|
||||
"""
|
||||
This behavior is the intended behavior (i.e. it's not deprecated), but
|
||||
given we do "tricksy" things in the iterim to wrap exceptions in a
|
||||
multiple inheritance subclass, we need to be extra sure it works and
|
||||
stays working.
|
||||
"""
|
||||
validator = validators.Draft202012Validator({"$ref": "urn:nothing"})
|
||||
|
||||
with self.assertRaises(referencing.exceptions.Unresolvable) as e:
|
||||
validator.validate(12)
|
||||
|
||||
expected = referencing.exceptions.Unresolvable(ref="urn:nothing")
|
||||
self.assertEqual(
|
||||
(e.exception, str(e.exception)),
|
||||
(expected, "Unresolvable: urn:nothing"),
|
||||
)
|
||||
|
||||
def test_catching_Unresolvable_via_RefResolutionError(self):
|
||||
"""
|
||||
Until RefResolutionError is removed, it is still possible to catch
|
||||
exceptions from reference resolution using it, even though they may
|
||||
have been raised by referencing.
|
||||
"""
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
from jsonschema import RefResolutionError
|
||||
|
||||
validator = validators.Draft202012Validator({"$ref": "urn:nothing"})
|
||||
|
||||
with self.assertRaises(referencing.exceptions.Unresolvable) as u:
|
||||
validator.validate(12)
|
||||
|
||||
with self.assertRaises(RefResolutionError) as e:
|
||||
validator.validate(12)
|
||||
|
||||
self.assertEqual(
|
||||
(e.exception, str(e.exception)),
|
||||
(u.exception, "Unresolvable: urn:nothing"),
|
||||
)
|
||||
|
||||
def test_WrappedReferencingError_hashability(self):
|
||||
"""
|
||||
Ensure the wrapped referencing errors are hashable when possible.
|
||||
"""
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
from jsonschema import RefResolutionError
|
||||
|
||||
validator = validators.Draft202012Validator({"$ref": "urn:nothing"})
|
||||
|
||||
with self.assertRaises(referencing.exceptions.Unresolvable) as u:
|
||||
validator.validate(12)
|
||||
|
||||
with self.assertRaises(RefResolutionError) as e:
|
||||
validator.validate(12)
|
||||
|
||||
self.assertIn(e.exception, {u.exception})
|
||||
self.assertIn(u.exception, {e.exception})
|
||||
|
||||
def test_Validator_subclassing(self):
|
||||
"""
|
||||
As of v4.12.0, subclassing a validator class produces an explicit
|
||||
deprecation warning.
|
||||
|
||||
This was never intended to be public API (and some comments over the
|
||||
years in issues said so, but obviously that's not a great way to make
|
||||
sure it's followed).
|
||||
|
||||
A future version will explicitly raise an error.
|
||||
"""
|
||||
|
||||
message = "Subclassing validator classes is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
class Subclass(validators.Draft202012Validator):
|
||||
pass
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
class AnotherSubclass(validators.create(meta_schema={})):
|
||||
pass
|
||||
|
||||
def test_FormatChecker_cls_checks(self):
|
||||
"""
|
||||
As of v4.14.0, FormatChecker.cls_checks is deprecated without
|
||||
replacement.
|
||||
"""
|
||||
|
||||
self.addCleanup(FormatChecker.checkers.pop, "boom", None)
|
||||
|
||||
message = "FormatChecker.cls_checks "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
FormatChecker.cls_checks("boom")
|
||||
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
def test_draftN_format_checker(self):
|
||||
"""
|
||||
As of v4.16.0, accessing jsonschema.draftn_format_checker is deprecated
|
||||
in favor of Validator.FORMAT_CHECKER.
|
||||
"""
|
||||
|
||||
message = "Accessing jsonschema.draft202012_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft202012_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft202012_format_checker,
|
||||
validators.Draft202012Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft201909_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft201909_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft201909_format_checker,
|
||||
validators.Draft201909Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft7_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft7_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft7_format_checker,
|
||||
validators.Draft7Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft6_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft6_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft6_format_checker,
|
||||
validators.Draft6Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft4_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft4_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft4_format_checker,
|
||||
validators.Draft4Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
message = "Accessing jsonschema.draft3_format_checker is "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
from jsonschema import draft3_format_checker
|
||||
|
||||
self.assertIs(
|
||||
draft3_format_checker,
|
||||
validators.Draft3Validator.FORMAT_CHECKER,
|
||||
)
|
||||
self.assertEqual(w.filename, __file__)
|
||||
|
||||
with self.assertRaises(ImportError):
|
||||
from jsonschema import draft1234_format_checker # noqa: F401
|
||||
|
||||
def test_import_cli(self):
|
||||
"""
|
||||
As of v4.17.0, importing jsonschema.cli is deprecated.
|
||||
"""
|
||||
|
||||
message = "The jsonschema CLI is deprecated and will be removed "
|
||||
with self.assertWarnsRegex(DeprecationWarning, message) as w:
|
||||
import jsonschema.cli
|
||||
importlib.reload(jsonschema.cli)
|
||||
|
||||
self.assertEqual(w.filename, importlib.__file__)
|
||||
|
||||
def test_cli(self):
|
||||
"""
|
||||
As of v4.17.0, the jsonschema CLI is deprecated.
|
||||
"""
|
||||
|
||||
process = subprocess.run(
|
||||
[sys.executable, "-m", "jsonschema"],
|
||||
capture_output=True,
|
||||
check=True,
|
||||
)
|
||||
self.assertIn(b"The jsonschema CLI is deprecated ", process.stderr)
|
||||
|
||||
def test_automatic_remote_retrieval(self):
|
||||
"""
|
||||
Automatic retrieval of remote references is deprecated as of v4.18.0.
|
||||
"""
|
||||
ref = "http://bar#/$defs/baz"
|
||||
schema = {"$defs": {"baz": {"type": "integer"}}}
|
||||
|
||||
if "requests" in sys.modules: # pragma: no cover
|
||||
self.addCleanup(
|
||||
sys.modules.__setitem__, "requests", sys.modules["requests"],
|
||||
)
|
||||
sys.modules["requests"] = None
|
||||
|
||||
@contextmanager
|
||||
def fake_urlopen(request):
|
||||
self.assertIsInstance(request, urllib.request.Request)
|
||||
self.assertEqual(request.full_url, "http://bar")
|
||||
|
||||
# Ha ha urllib.request.Request "normalizes" header names and
|
||||
# Request.get_header does not also normalize them...
|
||||
(header, value), = request.header_items()
|
||||
self.assertEqual(header.lower(), "user-agent")
|
||||
self.assertEqual(
|
||||
value, "python-jsonschema (deprecated $ref resolution)",
|
||||
)
|
||||
yield BytesIO(json.dumps(schema).encode("utf8"))
|
||||
|
||||
validator = validators.Draft202012Validator({"$ref": ref})
|
||||
|
||||
message = "Automatically retrieving remote references "
|
||||
patch = mock.patch.object(urllib.request, "urlopen", new=fake_urlopen)
|
||||
|
||||
with patch, self.assertWarnsRegex(DeprecationWarning, message):
|
||||
self.assertEqual(
|
||||
(validator.is_valid({}), validator.is_valid(37)),
|
||||
(False, True),
|
||||
)
|
||||
@@ -0,0 +1,702 @@
|
||||
from unittest import TestCase
|
||||
import textwrap
|
||||
|
||||
from jsonschema import exceptions
|
||||
from jsonschema.validators import _LATEST_VERSION
|
||||
|
||||
|
||||
class TestBestMatch(TestCase):
|
||||
def best_match_of(self, instance, schema):
|
||||
errors = list(_LATEST_VERSION(schema).iter_errors(instance))
|
||||
msg = f"No errors found for {instance} under {schema!r}!"
|
||||
self.assertTrue(errors, msg=msg)
|
||||
|
||||
best = exceptions.best_match(iter(errors))
|
||||
reversed_best = exceptions.best_match(reversed(errors))
|
||||
|
||||
self.assertEqual(
|
||||
best._contents(),
|
||||
reversed_best._contents(),
|
||||
f"No consistent best match!\nGot: {best}\n\nThen: {reversed_best}",
|
||||
)
|
||||
return best
|
||||
|
||||
def test_shallower_errors_are_better_matches(self):
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"minProperties": 2,
|
||||
"properties": {"bar": {"type": "object"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": []}}, schema=schema)
|
||||
self.assertEqual(best.validator, "minProperties")
|
||||
|
||||
def test_oneOf_and_anyOf_are_weak_matches(self):
|
||||
"""
|
||||
A property you *must* match is probably better than one you have to
|
||||
match a part of.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"minProperties": 2,
|
||||
"anyOf": [{"type": "string"}, {"type": "number"}],
|
||||
"oneOf": [{"type": "string"}, {"type": "number"}],
|
||||
}
|
||||
best = self.best_match_of(instance={}, schema=schema)
|
||||
self.assertEqual(best.validator, "minProperties")
|
||||
|
||||
def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
|
||||
"""
|
||||
If the most relevant error is an anyOf, then we traverse its context
|
||||
and select the otherwise *least* relevant error, since in this case
|
||||
that means the most specific, deep, error inside the instance.
|
||||
|
||||
I.e. since only one of the schemas must match, we look for the most
|
||||
relevant one.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"properties": {"bar": {"type": "array"}}},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "array")
|
||||
|
||||
def test_no_anyOf_traversal_for_equally_relevant_errors(self):
|
||||
"""
|
||||
We don't traverse into an anyOf (as above) if all of its context errors
|
||||
seem to be equally "wrong" against the instance.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer"},
|
||||
{"type": "object"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "anyOf")
|
||||
|
||||
def test_anyOf_traversal_for_single_equally_relevant_error(self):
|
||||
"""
|
||||
We *do* traverse anyOf with a single nested error, even though it is
|
||||
vacuously equally relevant to itself.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"type": "string"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "type")
|
||||
|
||||
def test_anyOf_traversal_for_single_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse anyOf with a single subschema that fails multiple
|
||||
times (e.g. on multiple items).
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_anyOf_traversal_for_non_type_matching_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse anyOf with multiple subschemas when one does not type
|
||||
match.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"anyOf": [
|
||||
{"type": "object"},
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
|
||||
"""
|
||||
If the most relevant error is an oneOf, then we traverse its context
|
||||
and select the otherwise *least* relevant error, since in this case
|
||||
that means the most specific, deep, error inside the instance.
|
||||
|
||||
I.e. since only one of the schemas must match, we look for the most
|
||||
relevant one.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"properties": {"bar": {"type": "array"}}},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "array")
|
||||
|
||||
def test_no_oneOf_traversal_for_equally_relevant_errors(self):
|
||||
"""
|
||||
We don't traverse into an oneOf (as above) if all of its context errors
|
||||
seem to be equally "wrong" against the instance.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{"type": "integer"},
|
||||
{"type": "object"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "oneOf")
|
||||
|
||||
def test_oneOf_traversal_for_single_equally_relevant_error(self):
|
||||
"""
|
||||
We *do* traverse oneOf with a single nested error, even though it is
|
||||
vacuously equally relevant to itself.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[], schema=schema)
|
||||
self.assertEqual(best.validator, "type")
|
||||
|
||||
def test_oneOf_traversal_for_single_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse oneOf with a single subschema that fails multiple
|
||||
times (e.g. on multiple items).
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_oneOf_traversal_for_non_type_matching_sibling_errors(self):
|
||||
"""
|
||||
We *do* traverse oneOf with multiple subschemas when one does not type
|
||||
match.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"oneOf": [
|
||||
{"type": "object"},
|
||||
{"items": {"const": 37}},
|
||||
],
|
||||
}
|
||||
best = self.best_match_of(instance=[12, 12], schema=schema)
|
||||
self.assertEqual(best.validator, "const")
|
||||
|
||||
def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
|
||||
"""
|
||||
Now, if the error is allOf, we traverse but select the *most* relevant
|
||||
error from the context, because all schemas here must match anyways.
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"allOf": [
|
||||
{"type": "string"},
|
||||
{"properties": {"bar": {"type": "array"}}},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "string")
|
||||
|
||||
def test_nested_context_for_oneOf(self):
|
||||
"""
|
||||
We traverse into nested contexts (a oneOf containing an error in a
|
||||
nested oneOf here).
|
||||
"""
|
||||
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"oneOf": [
|
||||
{"type": "string"},
|
||||
{
|
||||
"properties": {
|
||||
"bar": {"type": "array"},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": {"bar": 12}}, schema=schema)
|
||||
self.assertEqual(best.validator_value, "array")
|
||||
|
||||
def test_it_prioritizes_matching_types(self):
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "array", "minItems": 2},
|
||||
{"type": "string", "minLength": 10},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
reordered = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "string", "minLength": 10},
|
||||
{"type": "array", "minItems": 2},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=reordered)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
def test_it_prioritizes_matching_union_types(self):
|
||||
schema = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": ["array", "object"], "minItems": 2},
|
||||
{"type": ["integer", "string"], "minLength": 10},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
reordered = {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"anyOf": [
|
||||
{"type": "string", "minLength": 10},
|
||||
{"type": "array", "minItems": 2},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=reordered)
|
||||
self.assertEqual(best.validator, "minLength")
|
||||
|
||||
def test_boolean_schemas(self):
|
||||
schema = {"properties": {"foo": False}}
|
||||
best = self.best_match_of(instance={"foo": "bar"}, schema=schema)
|
||||
self.assertIsNone(best.validator)
|
||||
|
||||
def test_one_error(self):
|
||||
validator = _LATEST_VERSION({"minProperties": 2})
|
||||
error, = validator.iter_errors({})
|
||||
self.assertEqual(
|
||||
exceptions.best_match(validator.iter_errors({})).validator,
|
||||
"minProperties",
|
||||
)
|
||||
|
||||
def test_no_errors(self):
|
||||
validator = _LATEST_VERSION({})
|
||||
self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
|
||||
|
||||
|
||||
class TestByRelevance(TestCase):
|
||||
def test_short_paths_are_better_matches(self):
|
||||
shallow = exceptions.ValidationError("Oh no!", path=["baz"])
|
||||
deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
|
||||
match = max([shallow, deep], key=exceptions.relevance)
|
||||
self.assertIs(match, shallow)
|
||||
|
||||
match = max([deep, shallow], key=exceptions.relevance)
|
||||
self.assertIs(match, shallow)
|
||||
|
||||
def test_global_errors_are_even_better_matches(self):
|
||||
shallow = exceptions.ValidationError("Oh no!", path=[])
|
||||
deep = exceptions.ValidationError("Oh yes!", path=["foo"])
|
||||
|
||||
errors = sorted([shallow, deep], key=exceptions.relevance)
|
||||
self.assertEqual(
|
||||
[list(error.path) for error in errors],
|
||||
[["foo"], []],
|
||||
)
|
||||
|
||||
errors = sorted([deep, shallow], key=exceptions.relevance)
|
||||
self.assertEqual(
|
||||
[list(error.path) for error in errors],
|
||||
[["foo"], []],
|
||||
)
|
||||
|
||||
def test_weak_keywords_are_lower_priority(self):
|
||||
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
|
||||
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
|
||||
|
||||
best_match = exceptions.by_relevance(weak="a")
|
||||
|
||||
match = max([weak, normal], key=best_match)
|
||||
self.assertIs(match, normal)
|
||||
|
||||
match = max([normal, weak], key=best_match)
|
||||
self.assertIs(match, normal)
|
||||
|
||||
def test_strong_keywords_are_higher_priority(self):
|
||||
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
|
||||
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
|
||||
strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
|
||||
|
||||
best_match = exceptions.by_relevance(weak="a", strong="c")
|
||||
|
||||
match = max([weak, normal, strong], key=best_match)
|
||||
self.assertIs(match, strong)
|
||||
|
||||
match = max([strong, normal, weak], key=best_match)
|
||||
self.assertIs(match, strong)
|
||||
|
||||
|
||||
class TestErrorTree(TestCase):
|
||||
def test_it_knows_how_many_total_errors_it_contains(self):
|
||||
# FIXME: #442
|
||||
errors = [
|
||||
exceptions.ValidationError("Something", validator=i)
|
||||
for i in range(8)
|
||||
]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertEqual(tree.total_errors, 8)
|
||||
|
||||
def test_it_contains_an_item_if_the_item_had_an_error(self):
|
||||
errors = [exceptions.ValidationError("a message", path=["bar"])]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertIn("bar", tree)
|
||||
|
||||
def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
|
||||
errors = [exceptions.ValidationError("a message", path=["bar"])]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertNotIn("foo", tree)
|
||||
|
||||
def test_keywords_that_failed_appear_in_errors_dict(self):
|
||||
error = exceptions.ValidationError("a message", validator="foo")
|
||||
tree = exceptions.ErrorTree([error])
|
||||
self.assertEqual(tree.errors, {"foo": error})
|
||||
|
||||
def test_it_creates_a_child_tree_for_each_nested_path(self):
|
||||
errors = [
|
||||
exceptions.ValidationError("a bar message", path=["bar"]),
|
||||
exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]),
|
||||
]
|
||||
tree = exceptions.ErrorTree(errors)
|
||||
self.assertIn(0, tree["bar"])
|
||||
self.assertNotIn(1, tree["bar"])
|
||||
|
||||
def test_children_have_their_errors_dicts_built(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError("1", validator="foo", path=["bar", 0]),
|
||||
exceptions.ValidationError("2", validator="quux", path=["bar", 0]),
|
||||
)
|
||||
tree = exceptions.ErrorTree([e1, e2])
|
||||
self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2})
|
||||
|
||||
def test_multiple_errors_with_instance(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1"),
|
||||
exceptions.ValidationError(
|
||||
"2",
|
||||
validator="quux",
|
||||
path=["foobar", 2],
|
||||
instance="i2"),
|
||||
)
|
||||
exceptions.ErrorTree([e1, e2])
|
||||
|
||||
def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
|
||||
error = exceptions.ValidationError("123", validator="foo", instance=[])
|
||||
tree = exceptions.ErrorTree([error])
|
||||
|
||||
with self.assertRaises(IndexError):
|
||||
tree[0]
|
||||
|
||||
def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
|
||||
"""
|
||||
If a keyword refers to a path that isn't in the instance, the
|
||||
tree still properly returns a subtree for that path.
|
||||
"""
|
||||
|
||||
error = exceptions.ValidationError(
|
||||
"a message", validator="foo", instance={}, path=["foo"],
|
||||
)
|
||||
tree = exceptions.ErrorTree([error])
|
||||
self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
|
||||
|
||||
def test_iter(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1"),
|
||||
exceptions.ValidationError(
|
||||
"2",
|
||||
validator="quux",
|
||||
path=["foobar", 2],
|
||||
instance="i2"),
|
||||
)
|
||||
tree = exceptions.ErrorTree([e1, e2])
|
||||
self.assertEqual(set(tree), {"bar", "foobar"})
|
||||
|
||||
def test_repr_single(self):
|
||||
error = exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1",
|
||||
)
|
||||
tree = exceptions.ErrorTree([error])
|
||||
self.assertEqual(repr(tree), "<ErrorTree (1 total error)>")
|
||||
|
||||
def test_repr_multiple(self):
|
||||
e1, e2 = (
|
||||
exceptions.ValidationError(
|
||||
"1",
|
||||
validator="foo",
|
||||
path=["bar", "bar2"],
|
||||
instance="i1"),
|
||||
exceptions.ValidationError(
|
||||
"2",
|
||||
validator="quux",
|
||||
path=["foobar", 2],
|
||||
instance="i2"),
|
||||
)
|
||||
tree = exceptions.ErrorTree([e1, e2])
|
||||
self.assertEqual(repr(tree), "<ErrorTree (2 total errors)>")
|
||||
|
||||
def test_repr_empty(self):
|
||||
tree = exceptions.ErrorTree([])
|
||||
self.assertEqual(repr(tree), "<ErrorTree (0 total errors)>")
|
||||
|
||||
|
||||
class TestErrorInitReprStr(TestCase):
|
||||
def make_error(self, **kwargs):
|
||||
defaults = dict(
|
||||
message="hello",
|
||||
validator="type",
|
||||
validator_value="string",
|
||||
instance=5,
|
||||
schema={"type": "string"},
|
||||
)
|
||||
defaults.update(kwargs)
|
||||
return exceptions.ValidationError(**defaults)
|
||||
|
||||
def assertShows(self, expected, **kwargs):
|
||||
expected = textwrap.dedent(expected).rstrip("\n")
|
||||
|
||||
error = self.make_error(**kwargs)
|
||||
message_line, _, rest = str(error).partition("\n")
|
||||
self.assertEqual(message_line, error.message)
|
||||
self.assertEqual(rest, expected)
|
||||
|
||||
def test_it_calls_super_and_sets_args(self):
|
||||
error = self.make_error()
|
||||
self.assertGreater(len(error.args), 1)
|
||||
|
||||
def test_repr(self):
|
||||
self.assertEqual(
|
||||
repr(exceptions.ValidationError(message="Hello!")),
|
||||
"<ValidationError: 'Hello!'>",
|
||||
)
|
||||
|
||||
def test_unset_error(self):
|
||||
error = exceptions.ValidationError("message")
|
||||
self.assertEqual(str(error), "message")
|
||||
|
||||
kwargs = {
|
||||
"validator": "type",
|
||||
"validator_value": "string",
|
||||
"instance": 5,
|
||||
"schema": {"type": "string"},
|
||||
}
|
||||
# Just the message should show if any of the attributes are unset
|
||||
for attr in kwargs:
|
||||
k = dict(kwargs)
|
||||
del k[attr]
|
||||
error = exceptions.ValidationError("message", **k)
|
||||
self.assertEqual(str(error), "message")
|
||||
|
||||
def test_empty_paths(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema:
|
||||
{'type': 'string'}
|
||||
|
||||
On instance:
|
||||
5
|
||||
""",
|
||||
path=[],
|
||||
schema_path=[],
|
||||
)
|
||||
|
||||
def test_one_item_paths(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema:
|
||||
{'type': 'string'}
|
||||
|
||||
On instance[0]:
|
||||
5
|
||||
""",
|
||||
path=[0],
|
||||
schema_path=["items"],
|
||||
)
|
||||
|
||||
def test_multiple_item_paths(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema['items'][0]:
|
||||
{'type': 'string'}
|
||||
|
||||
On instance[0]['a']:
|
||||
5
|
||||
""",
|
||||
path=[0, "a"],
|
||||
schema_path=["items", 0, 1],
|
||||
)
|
||||
|
||||
def test_uses_pprint(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'maxLength' in schema:
|
||||
{0: 0,
|
||||
1: 1,
|
||||
2: 2,
|
||||
3: 3,
|
||||
4: 4,
|
||||
5: 5,
|
||||
6: 6,
|
||||
7: 7,
|
||||
8: 8,
|
||||
9: 9,
|
||||
10: 10,
|
||||
11: 11,
|
||||
12: 12,
|
||||
13: 13,
|
||||
14: 14,
|
||||
15: 15,
|
||||
16: 16,
|
||||
17: 17,
|
||||
18: 18,
|
||||
19: 19}
|
||||
|
||||
On instance:
|
||||
[0,
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24]
|
||||
""",
|
||||
instance=list(range(25)),
|
||||
schema=dict(zip(range(20), range(20))),
|
||||
validator="maxLength",
|
||||
)
|
||||
|
||||
def test_does_not_reorder_dicts(self):
|
||||
self.assertShows(
|
||||
"""
|
||||
Failed validating 'type' in schema:
|
||||
{'do': 3, 'not': 7, 'sort': 37, 'me': 73}
|
||||
|
||||
On instance:
|
||||
{'here': 73, 'too': 37, 'no': 7, 'sorting': 3}
|
||||
""",
|
||||
schema={
|
||||
"do": 3,
|
||||
"not": 7,
|
||||
"sort": 37,
|
||||
"me": 73,
|
||||
},
|
||||
instance={
|
||||
"here": 73,
|
||||
"too": 37,
|
||||
"no": 7,
|
||||
"sorting": 3,
|
||||
},
|
||||
)
|
||||
|
||||
def test_str_works_with_instances_having_overriden_eq_operator(self):
|
||||
"""
|
||||
Check for #164 which rendered exceptions unusable when a
|
||||
`ValidationError` involved instances with an `__eq__` method
|
||||
that returned truthy values.
|
||||
"""
|
||||
|
||||
class DontEQMeBro:
|
||||
def __eq__(this, other): # pragma: no cover
|
||||
self.fail("Don't!")
|
||||
|
||||
def __ne__(this, other): # pragma: no cover
|
||||
self.fail("Don't!")
|
||||
|
||||
instance = DontEQMeBro()
|
||||
error = exceptions.ValidationError(
|
||||
"a message",
|
||||
validator="foo",
|
||||
instance=instance,
|
||||
validator_value="some",
|
||||
schema="schema",
|
||||
)
|
||||
self.assertIn(repr(instance), str(error))
|
||||
|
||||
|
||||
class TestHashable(TestCase):
|
||||
def test_hashable(self):
|
||||
{exceptions.ValidationError("")}
|
||||
{exceptions.SchemaError("")}
|
||||
@@ -0,0 +1,91 @@
|
||||
"""
|
||||
Tests for the parts of jsonschema related to the :kw:`format` keyword.
|
||||
"""
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
from jsonschema import FormatChecker, ValidationError
|
||||
from jsonschema.exceptions import FormatError
|
||||
from jsonschema.validators import Draft4Validator
|
||||
|
||||
BOOM = ValueError("Boom!")
|
||||
BANG = ZeroDivisionError("Bang!")
|
||||
|
||||
|
||||
def boom(thing):
|
||||
if thing == "bang":
|
||||
raise BANG
|
||||
raise BOOM
|
||||
|
||||
|
||||
class TestFormatChecker(TestCase):
|
||||
def test_it_can_validate_no_formats(self):
|
||||
checker = FormatChecker(formats=())
|
||||
self.assertFalse(checker.checkers)
|
||||
|
||||
def test_it_raises_a_key_error_for_unknown_formats(self):
|
||||
with self.assertRaises(KeyError):
|
||||
FormatChecker(formats=["o noes"])
|
||||
|
||||
def test_it_can_register_cls_checkers(self):
|
||||
original = dict(FormatChecker.checkers)
|
||||
self.addCleanup(FormatChecker.checkers.pop, "boom")
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
FormatChecker.cls_checks("boom")(boom)
|
||||
self.assertEqual(
|
||||
FormatChecker.checkers,
|
||||
dict(original, boom=(boom, ())),
|
||||
)
|
||||
|
||||
def test_it_can_register_checkers(self):
|
||||
checker = FormatChecker()
|
||||
checker.checks("boom")(boom)
|
||||
self.assertEqual(
|
||||
checker.checkers,
|
||||
dict(FormatChecker.checkers, boom=(boom, ())),
|
||||
)
|
||||
|
||||
def test_it_catches_registered_errors(self):
|
||||
checker = FormatChecker()
|
||||
checker.checks("boom", raises=type(BOOM))(boom)
|
||||
|
||||
with self.assertRaises(FormatError) as cm:
|
||||
checker.check(instance=12, format="boom")
|
||||
|
||||
self.assertIs(cm.exception.cause, BOOM)
|
||||
self.assertIs(cm.exception.__cause__, BOOM)
|
||||
self.assertEqual(str(cm.exception), "12 is not a 'boom'")
|
||||
|
||||
# Unregistered errors should not be caught
|
||||
with self.assertRaises(type(BANG)):
|
||||
checker.check(instance="bang", format="boom")
|
||||
|
||||
def test_format_error_causes_become_validation_error_causes(self):
|
||||
checker = FormatChecker()
|
||||
checker.checks("boom", raises=ValueError)(boom)
|
||||
validator = Draft4Validator({"format": "boom"}, format_checker=checker)
|
||||
|
||||
with self.assertRaises(ValidationError) as cm:
|
||||
validator.validate("BOOM")
|
||||
|
||||
self.assertIs(cm.exception.cause, BOOM)
|
||||
self.assertIs(cm.exception.__cause__, BOOM)
|
||||
|
||||
def test_format_checkers_come_with_defaults(self):
|
||||
# This is bad :/ but relied upon.
|
||||
# The docs for quite awhile recommended people do things like
|
||||
# validate(..., format_checker=FormatChecker())
|
||||
# We should change that, but we can't without deprecation...
|
||||
checker = FormatChecker()
|
||||
with self.assertRaises(FormatError):
|
||||
checker.check(instance="not-an-ipv4", format="ipv4")
|
||||
|
||||
def test_repr(self):
|
||||
checker = FormatChecker(formats=())
|
||||
checker.checks("foo")(lambda thing: True) # pragma: no cover
|
||||
checker.checks("bar")(lambda thing: True) # pragma: no cover
|
||||
checker.checks("baz")(lambda thing: True) # pragma: no cover
|
||||
self.assertEqual(
|
||||
repr(checker),
|
||||
"<FormatChecker checkers=['bar', 'baz', 'foo']>",
|
||||
)
|
||||
@@ -0,0 +1,269 @@
|
||||
"""
|
||||
Test runner for the JSON Schema official test suite
|
||||
|
||||
Tests comprehensive correctness of each draft's validator.
|
||||
|
||||
See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from jsonschema.tests._suite import Suite
|
||||
import jsonschema
|
||||
|
||||
SUITE = Suite()
|
||||
DRAFT3 = SUITE.version(name="draft3")
|
||||
DRAFT4 = SUITE.version(name="draft4")
|
||||
DRAFT6 = SUITE.version(name="draft6")
|
||||
DRAFT7 = SUITE.version(name="draft7")
|
||||
DRAFT201909 = SUITE.version(name="draft2019-09")
|
||||
DRAFT202012 = SUITE.version(name="draft2020-12")
|
||||
|
||||
|
||||
def skip(message, **kwargs):
|
||||
def skipper(test):
|
||||
if all(value == getattr(test, attr) for attr, value in kwargs.items()):
|
||||
return message
|
||||
return skipper
|
||||
|
||||
|
||||
def missing_format(Validator):
|
||||
def missing_format(test): # pragma: no cover
|
||||
schema = test.schema
|
||||
if (
|
||||
schema is True
|
||||
or schema is False
|
||||
or "format" not in schema
|
||||
or schema["format"] in Validator.FORMAT_CHECKER.checkers
|
||||
or test.valid
|
||||
):
|
||||
return
|
||||
|
||||
return f"Format checker {schema['format']!r} not found."
|
||||
return missing_format
|
||||
|
||||
|
||||
def complex_email_validation(test):
|
||||
if test.subject != "email":
|
||||
return
|
||||
|
||||
message = "Complex email validation is (intentionally) unsupported."
|
||||
return skip(
|
||||
message=message,
|
||||
description="an invalid domain",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="an invalid IPv4-address-literal",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="dot after local part is not valid",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="dot before local part is not valid",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
description="two subsequent dots inside local part are not valid",
|
||||
)(test)
|
||||
|
||||
|
||||
if sys.version_info < (3, 9): # pragma: no cover
|
||||
message = "Rejecting leading zeros is 3.9+"
|
||||
allowed_leading_zeros = skip(
|
||||
message=message,
|
||||
subject="ipv4",
|
||||
description="invalid leading zeroes, as they are treated as octals",
|
||||
)
|
||||
else:
|
||||
def allowed_leading_zeros(test): # pragma: no cover
|
||||
return
|
||||
|
||||
|
||||
def leap_second(test):
|
||||
message = "Leap seconds are unsupported."
|
||||
return skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="a valid time string with leap second",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="a valid time string with leap second, Zulu",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="a valid time string with leap second with offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, positive time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, negative time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, large positive time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, large negative time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="time",
|
||||
description="valid leap second, zero time-offset",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="date-time",
|
||||
description="a valid date-time with a leap second, UTC",
|
||||
)(test) or skip(
|
||||
message=message,
|
||||
subject="date-time",
|
||||
description="a valid date-time with a leap second, with minus offset",
|
||||
)(test)
|
||||
|
||||
|
||||
TestDraft3 = DRAFT3.to_unittest_testcase(
|
||||
DRAFT3.cases(),
|
||||
DRAFT3.format_cases(),
|
||||
DRAFT3.optional_cases_of(name="bignum"),
|
||||
DRAFT3.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT3.optional_cases_of(name="zeroTerminatedFloats"),
|
||||
Validator=jsonschema.Draft3Validator,
|
||||
format_checker=jsonschema.Draft3Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
missing_format(jsonschema.Draft3Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft4 = DRAFT4.to_unittest_testcase(
|
||||
DRAFT4.cases(),
|
||||
DRAFT4.format_cases(),
|
||||
DRAFT4.optional_cases_of(name="bignum"),
|
||||
DRAFT4.optional_cases_of(name="float-overflow"),
|
||||
DRAFT4.optional_cases_of(name="id"),
|
||||
DRAFT4.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT4.optional_cases_of(name="zeroTerminatedFloats"),
|
||||
Validator=jsonschema.Draft4Validator,
|
||||
format_checker=jsonschema.Draft4Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft4Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft6 = DRAFT6.to_unittest_testcase(
|
||||
DRAFT6.cases(),
|
||||
DRAFT6.format_cases(),
|
||||
DRAFT6.optional_cases_of(name="bignum"),
|
||||
DRAFT6.optional_cases_of(name="float-overflow"),
|
||||
DRAFT6.optional_cases_of(name="id"),
|
||||
DRAFT6.optional_cases_of(name="non-bmp-regex"),
|
||||
Validator=jsonschema.Draft6Validator,
|
||||
format_checker=jsonschema.Draft6Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft6Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft7 = DRAFT7.to_unittest_testcase(
|
||||
DRAFT7.cases(),
|
||||
DRAFT7.format_cases(),
|
||||
DRAFT7.optional_cases_of(name="bignum"),
|
||||
DRAFT7.optional_cases_of(name="cross-draft"),
|
||||
DRAFT7.optional_cases_of(name="float-overflow"),
|
||||
DRAFT6.optional_cases_of(name="id"),
|
||||
DRAFT7.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT7.optional_cases_of(name="unknownKeyword"),
|
||||
Validator=jsonschema.Draft7Validator,
|
||||
format_checker=jsonschema.Draft7Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft7Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft201909 = DRAFT201909.to_unittest_testcase(
|
||||
DRAFT201909.cases(),
|
||||
DRAFT201909.optional_cases_of(name="anchor"),
|
||||
DRAFT201909.optional_cases_of(name="bignum"),
|
||||
DRAFT201909.optional_cases_of(name="cross-draft"),
|
||||
DRAFT201909.optional_cases_of(name="float-overflow"),
|
||||
DRAFT201909.optional_cases_of(name="id"),
|
||||
DRAFT201909.optional_cases_of(name="no-schema"),
|
||||
DRAFT201909.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT201909.optional_cases_of(name="refOfUnknownKeyword"),
|
||||
DRAFT201909.optional_cases_of(name="unknownKeyword"),
|
||||
Validator=jsonschema.Draft201909Validator,
|
||||
skip=skip(
|
||||
message="Vocabulary support is still in-progress.",
|
||||
subject="vocabulary",
|
||||
description=(
|
||||
"no validation: invalid number, but it still validates"
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft201909Format = DRAFT201909.to_unittest_testcase(
|
||||
DRAFT201909.format_cases(),
|
||||
name="TestDraft201909Format",
|
||||
Validator=jsonschema.Draft201909Validator,
|
||||
format_checker=jsonschema.Draft201909Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
complex_email_validation(test)
|
||||
or allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft201909Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft202012 = DRAFT202012.to_unittest_testcase(
|
||||
DRAFT202012.cases(),
|
||||
DRAFT201909.optional_cases_of(name="anchor"),
|
||||
DRAFT202012.optional_cases_of(name="bignum"),
|
||||
DRAFT202012.optional_cases_of(name="cross-draft"),
|
||||
DRAFT202012.optional_cases_of(name="float-overflow"),
|
||||
DRAFT202012.optional_cases_of(name="id"),
|
||||
DRAFT202012.optional_cases_of(name="no-schema"),
|
||||
DRAFT202012.optional_cases_of(name="non-bmp-regex"),
|
||||
DRAFT202012.optional_cases_of(name="refOfUnknownKeyword"),
|
||||
DRAFT202012.optional_cases_of(name="unknownKeyword"),
|
||||
Validator=jsonschema.Draft202012Validator,
|
||||
skip=skip(
|
||||
message="Vocabulary support is still in-progress.",
|
||||
subject="vocabulary",
|
||||
description=(
|
||||
"no validation: invalid number, but it still validates"
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
TestDraft202012Format = DRAFT202012.to_unittest_testcase(
|
||||
DRAFT202012.format_cases(),
|
||||
name="TestDraft202012Format",
|
||||
Validator=jsonschema.Draft202012Validator,
|
||||
format_checker=jsonschema.Draft202012Validator.FORMAT_CHECKER,
|
||||
skip=lambda test: (
|
||||
complex_email_validation(test)
|
||||
or allowed_leading_zeros(test)
|
||||
or leap_second(test)
|
||||
or missing_format(jsonschema.Draft202012Validator)(test)
|
||||
or complex_email_validation(test)
|
||||
),
|
||||
)
|
||||
@@ -0,0 +1,221 @@
|
||||
"""
|
||||
Tests for the `TypeChecker`-based type interface.
|
||||
|
||||
The actual correctness of the type checking is handled in
|
||||
`test_jsonschema_test_suite`; these tests check that TypeChecker
|
||||
functions correctly at a more granular level.
|
||||
"""
|
||||
from collections import namedtuple
|
||||
from unittest import TestCase
|
||||
|
||||
from jsonschema import ValidationError, _keywords
|
||||
from jsonschema._types import TypeChecker
|
||||
from jsonschema.exceptions import UndefinedTypeCheck, UnknownType
|
||||
from jsonschema.validators import Draft202012Validator, extend
|
||||
|
||||
|
||||
def equals_2(checker, instance):
|
||||
return instance == 2
|
||||
|
||||
|
||||
def is_namedtuple(instance):
|
||||
return isinstance(instance, tuple) and getattr(instance, "_fields", None)
|
||||
|
||||
|
||||
def is_object_or_named_tuple(checker, instance):
|
||||
if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"):
|
||||
return True
|
||||
return is_namedtuple(instance)
|
||||
|
||||
|
||||
class TestTypeChecker(TestCase):
|
||||
def test_is_type(self):
|
||||
checker = TypeChecker({"two": equals_2})
|
||||
self.assertEqual(
|
||||
(
|
||||
checker.is_type(instance=2, type="two"),
|
||||
checker.is_type(instance="bar", type="two"),
|
||||
),
|
||||
(True, False),
|
||||
)
|
||||
|
||||
def test_is_unknown_type(self):
|
||||
with self.assertRaises(UndefinedTypeCheck) as e:
|
||||
TypeChecker().is_type(4, "foobar")
|
||||
self.assertIn(
|
||||
"'foobar' is unknown to this type checker",
|
||||
str(e.exception),
|
||||
)
|
||||
self.assertTrue(
|
||||
e.exception.__suppress_context__,
|
||||
msg="Expected the internal KeyError to be hidden.",
|
||||
)
|
||||
|
||||
def test_checks_can_be_added_at_init(self):
|
||||
checker = TypeChecker({"two": equals_2})
|
||||
self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
|
||||
|
||||
def test_redefine_existing_type(self):
|
||||
self.assertEqual(
|
||||
TypeChecker().redefine("two", object()).redefine("two", equals_2),
|
||||
TypeChecker().redefine("two", equals_2),
|
||||
)
|
||||
|
||||
def test_remove(self):
|
||||
self.assertEqual(
|
||||
TypeChecker({"two": equals_2}).remove("two"),
|
||||
TypeChecker(),
|
||||
)
|
||||
|
||||
def test_remove_unknown_type(self):
|
||||
with self.assertRaises(UndefinedTypeCheck) as context:
|
||||
TypeChecker().remove("foobar")
|
||||
self.assertIn("foobar", str(context.exception))
|
||||
|
||||
def test_redefine_many(self):
|
||||
self.assertEqual(
|
||||
TypeChecker().redefine_many({"foo": int, "bar": str}),
|
||||
TypeChecker().redefine("foo", int).redefine("bar", str),
|
||||
)
|
||||
|
||||
def test_remove_multiple(self):
|
||||
self.assertEqual(
|
||||
TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
|
||||
TypeChecker(),
|
||||
)
|
||||
|
||||
def test_type_check_can_raise_key_error(self):
|
||||
"""
|
||||
Make sure no one writes:
|
||||
|
||||
try:
|
||||
self._type_checkers[type](...)
|
||||
except KeyError:
|
||||
|
||||
ignoring the fact that the function itself can raise that.
|
||||
"""
|
||||
|
||||
error = KeyError("Stuff")
|
||||
|
||||
def raises_keyerror(checker, instance):
|
||||
raise error
|
||||
|
||||
with self.assertRaises(KeyError) as context:
|
||||
TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
|
||||
|
||||
self.assertIs(context.exception, error)
|
||||
|
||||
def test_repr(self):
|
||||
checker = TypeChecker({"foo": is_namedtuple, "bar": is_namedtuple})
|
||||
self.assertEqual(repr(checker), "<TypeChecker types={'bar', 'foo'}>")
|
||||
|
||||
|
||||
class TestCustomTypes(TestCase):
|
||||
def test_simple_type_can_be_extended(self):
|
||||
def int_or_str_int(checker, instance):
|
||||
if not isinstance(instance, (int, str)):
|
||||
return False
|
||||
try:
|
||||
int(instance)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"integer", int_or_str_int,
|
||||
),
|
||||
)
|
||||
validator = CustomValidator({"type": "integer"})
|
||||
|
||||
validator.validate(4)
|
||||
validator.validate("4")
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate(4.4)
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate("foo")
|
||||
|
||||
def test_object_can_be_extended(self):
|
||||
schema = {"type": "object"}
|
||||
|
||||
Point = namedtuple("Point", ["x", "y"])
|
||||
|
||||
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"object", is_object_or_named_tuple,
|
||||
)
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=type_checker,
|
||||
)
|
||||
validator = CustomValidator(schema)
|
||||
|
||||
validator.validate(Point(x=4, y=5))
|
||||
|
||||
def test_object_extensions_require_custom_validators(self):
|
||||
schema = {"type": "object", "required": ["x"]}
|
||||
|
||||
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"object", is_object_or_named_tuple,
|
||||
)
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=type_checker,
|
||||
)
|
||||
validator = CustomValidator(schema)
|
||||
|
||||
Point = namedtuple("Point", ["x", "y"])
|
||||
# Cannot handle required
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate(Point(x=4, y=5))
|
||||
|
||||
def test_object_extensions_can_handle_custom_validators(self):
|
||||
schema = {
|
||||
"type": "object",
|
||||
"required": ["x"],
|
||||
"properties": {"x": {"type": "integer"}},
|
||||
}
|
||||
|
||||
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
|
||||
"object", is_object_or_named_tuple,
|
||||
)
|
||||
|
||||
def coerce_named_tuple(fn):
|
||||
def coerced(validator, value, instance, schema):
|
||||
if is_namedtuple(instance):
|
||||
instance = instance._asdict()
|
||||
return fn(validator, value, instance, schema)
|
||||
return coerced
|
||||
|
||||
required = coerce_named_tuple(_keywords.required)
|
||||
properties = coerce_named_tuple(_keywords.properties)
|
||||
|
||||
CustomValidator = extend(
|
||||
Draft202012Validator,
|
||||
type_checker=type_checker,
|
||||
validators={"required": required, "properties": properties},
|
||||
)
|
||||
|
||||
validator = CustomValidator(schema)
|
||||
|
||||
Point = namedtuple("Point", ["x", "y"])
|
||||
# Can now process required and properties
|
||||
validator.validate(Point(x=4, y=5))
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate(Point(x="not an integer", y=5))
|
||||
|
||||
# As well as still handle objects.
|
||||
validator.validate({"x": 4, "y": 5})
|
||||
|
||||
with self.assertRaises(ValidationError):
|
||||
validator.validate({"x": "not an integer", "y": 5})
|
||||
|
||||
def test_unknown_type(self):
|
||||
with self.assertRaises(UnknownType) as e:
|
||||
Draft202012Validator({}).is_type(12, "some unknown type")
|
||||
self.assertIn("'some unknown type'", str(e.exception))
|
||||
@@ -0,0 +1,138 @@
|
||||
from math import nan
|
||||
from unittest import TestCase
|
||||
|
||||
from jsonschema._utils import equal
|
||||
|
||||
|
||||
class TestEqual(TestCase):
|
||||
def test_none(self):
|
||||
self.assertTrue(equal(None, None))
|
||||
|
||||
def test_nan(self):
|
||||
self.assertTrue(equal(nan, nan))
|
||||
|
||||
|
||||
class TestDictEqual(TestCase):
|
||||
def test_equal_dictionaries(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "a": "b"}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_equal_dictionaries_with_nan(self):
|
||||
dict_1 = {"a": nan, "c": "d"}
|
||||
dict_2 = {"c": "d", "a": nan}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_missing_key(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "x": "b"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_additional_key(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "a": "b", "x": "x"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_missing_value(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
dict_2 = {"c": "d", "a": "x"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_empty_dictionaries(self):
|
||||
dict_1 = {}
|
||||
dict_2 = {}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_one_none(self):
|
||||
dict_1 = None
|
||||
dict_2 = {"a": "b", "c": "d"}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_same_item(self):
|
||||
dict_1 = {"a": "b", "c": "d"}
|
||||
self.assertTrue(equal(dict_1, dict_1))
|
||||
|
||||
def test_nested_equal(self):
|
||||
dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"}
|
||||
dict_2 = {"c": "d", "a": {"a": "b", "c": "d"}}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_nested_dict_unequal(self):
|
||||
dict_1 = {"a": {"a": "b", "c": "d"}, "c": "d"}
|
||||
dict_2 = {"c": "d", "a": {"a": "b", "c": "x"}}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
def test_mixed_nested_equal(self):
|
||||
dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"}
|
||||
dict_2 = {"c": "d", "a": ["a", "b", "c", "d"]}
|
||||
self.assertTrue(equal(dict_1, dict_2))
|
||||
|
||||
def test_nested_list_unequal(self):
|
||||
dict_1 = {"a": ["a", "b", "c", "d"], "c": "d"}
|
||||
dict_2 = {"c": "d", "a": ["b", "c", "d", "a"]}
|
||||
self.assertFalse(equal(dict_1, dict_2))
|
||||
|
||||
|
||||
class TestListEqual(TestCase):
|
||||
def test_equal_lists(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
list_2 = ["a", "b", "c"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_equal_lists_with_nan(self):
|
||||
list_1 = ["a", nan, "c"]
|
||||
list_2 = ["a", nan, "c"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_unsorted_lists(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
list_2 = ["b", "b", "a"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_first_list_larger(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
list_2 = ["a", "b"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_second_list_larger(self):
|
||||
list_1 = ["a", "b"]
|
||||
list_2 = ["a", "b", "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_list_with_none_unequal(self):
|
||||
list_1 = ["a", "b", None]
|
||||
list_2 = ["a", "b", "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
list_1 = ["a", "b", None]
|
||||
list_2 = [None, "b", "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_list_with_none_equal(self):
|
||||
list_1 = ["a", None, "c"]
|
||||
list_2 = ["a", None, "c"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_empty_list(self):
|
||||
list_1 = []
|
||||
list_2 = []
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_one_none(self):
|
||||
list_1 = None
|
||||
list_2 = []
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
|
||||
def test_same_list(self):
|
||||
list_1 = ["a", "b", "c"]
|
||||
self.assertTrue(equal(list_1, list_1))
|
||||
|
||||
def test_equal_nested_lists(self):
|
||||
list_1 = ["a", ["b", "c"], "d"]
|
||||
list_2 = ["a", ["b", "c"], "d"]
|
||||
self.assertTrue(equal(list_1, list_2))
|
||||
|
||||
def test_unequal_nested_lists(self):
|
||||
list_1 = ["a", ["b", "c"], "d"]
|
||||
list_2 = ["a", [], "c"]
|
||||
self.assertFalse(equal(list_1, list_2))
|
||||
File diff suppressed because it is too large
Load Diff
1410
.venv/lib/python3.10/site-packages/jsonschema/validators.py
Normal file
1410
.venv/lib/python3.10/site-packages/jsonschema/validators.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user