main commit
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-16 16:30:25 +09:00
parent 91c7e04474
commit 537e7b363f
1146 changed files with 45926 additions and 77196 deletions

View File

@@ -1,5 +1,5 @@
# dialects/mssql/base.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# mssql/base.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -9,6 +9,7 @@
"""
.. dialect:: mssql
:name: Microsoft SQL Server
:full_support: 2017
:normal_support: 2012+
:best_effort: 2005+
@@ -39,12 +40,9 @@ considered to be the identity column - unless it is associated with a
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table(
"t",
m,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
@@ -62,12 +60,9 @@ specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
on the first integer primary key column::
m = MetaData()
t = Table(
"t",
m,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer),
)
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
To add the ``IDENTITY`` keyword to a non-primary key column, specify
@@ -77,12 +72,9 @@ To add the ``IDENTITY`` keyword to a non-primary key column, specify
is set to ``False`` on any integer primary key column::
m = MetaData()
t = Table(
"t",
m,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer, autoincrement=True),
)
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer, autoincrement=True))
m.create_all(engine)
.. versionchanged:: 1.4 Added :class:`_schema.Identity` construct
@@ -145,12 +137,14 @@ parameters passed to the :class:`_schema.Identity` object::
from sqlalchemy import Table, Integer, Column, Identity
test = Table(
"test",
metadata,
'test', metadata,
Column(
"id", Integer, primary_key=True, Identity(start=100, increment=10)
'id',
Integer,
primary_key=True,
Identity(start=100, increment=10)
),
Column("name", String(20)),
Column('name', String(20))
)
The CREATE TABLE for the above :class:`_schema.Table` object would be:
@@ -160,7 +154,7 @@ The CREATE TABLE for the above :class:`_schema.Table` object would be:
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
)
.. note::
@@ -193,7 +187,6 @@ type deployed to the SQL Server database can be specified as ``Numeric`` using
Base = declarative_base()
class TestTable(Base):
__tablename__ = "test"
id = Column(
@@ -219,9 +212,8 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
from sqlalchemy import TypeDecorator
class NumericAsInteger(TypeDecorator):
"normalize floating point return values into ints"
'''normalize floating point return values into ints'''
impl = Numeric(10, 0, asdecimal=False)
cache_ok = True
@@ -231,7 +223,6 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
value = int(value)
return value
class TestTable(Base):
__tablename__ = "test"
id = Column(
@@ -280,11 +271,11 @@ The process for fetching this value has several variants:
fetched in order to receive the value. Given a table as::
t = Table(
"t",
't',
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
implicit_returning=False,
Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False
)
an INSERT will look like:
@@ -310,13 +301,12 @@ statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table(
"t", m, Column("id", Integer, primary_key=True), Column("x", Integer)
)
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
with engine.begin() as conn:
conn.execute(t.insert(), {"id": 1, "x": 1}, {"id": 2, "x": 2})
conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
@@ -352,11 +342,7 @@ The :class:`.Sequence` object creates "real" sequences, i.e.,
>>> from sqlalchemy import Sequence
>>> from sqlalchemy.schema import CreateSequence
>>> from sqlalchemy.dialects import mssql
>>> print(
... CreateSequence(Sequence("my_seq", start=1)).compile(
... dialect=mssql.dialect()
... )
... )
>>> print(CreateSequence(Sequence("my_seq", start=1)).compile(dialect=mssql.dialect()))
{printsql}CREATE SEQUENCE my_seq START WITH 1
For integer primary key generation, SQL Server's ``IDENTITY`` construct should
@@ -390,12 +376,12 @@ more than one backend without using dialect-specific types.
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
my_table = Table(
"my_table",
metadata,
Column("my_data", VARCHAR(None)),
Column("my_n_data", NVARCHAR(None)),
'my_table', metadata,
Column('my_data', VARCHAR(None)),
Column('my_n_data', NVARCHAR(None))
)
Collation Support
-----------------
@@ -403,13 +389,10 @@ Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column("login", VARCHAR(32, collation="Latin1_General_CI_AS"))
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`_schema.Table`, the
CREATE TABLE statement for this column will yield:
.. sourcecode:: sql
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
@@ -429,9 +412,7 @@ versions when no OFFSET clause is present. A statement such as::
select(some_table).limit(5)
will render similarly to:
.. sourcecode:: sql
will render similarly to::
SELECT TOP 5 col1, col2.. FROM table
@@ -441,9 +422,7 @@ LIMIT and OFFSET, or just OFFSET alone, will be rendered using the
select(some_table).order_by(some_table.c.col3).limit(5).offset(10)
will render similarly to:
.. sourcecode:: sql
will render similarly to::
SELECT anon_1.col1, anon_1.col2 FROM (SELECT col1, col2,
ROW_NUMBER() OVER (ORDER BY col3) AS
@@ -496,13 +475,16 @@ each new connection.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ"
"mssql+pyodbc://scott:tiger@ms_2008",
isolation_level="REPEATABLE READ"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(isolation_level="READ COMMITTED")
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
@@ -552,6 +534,7 @@ will remain consistent with the state of the transaction::
mssql_engine = create_engine(
"mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
# disable default reset-on-return scheme
pool_reset_on_return=None,
)
@@ -580,17 +563,13 @@ Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct:
.. sourcecode:: sql
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render:
.. sourcecode:: sql
render::
name VARCHAR(20)
@@ -646,9 +625,8 @@ behavior of this flag is as follows:
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`_sa.create_engine`::
eng = create_engine(
"mssql+pymssql://user:pass@host/db", deprecate_large_types=True
)
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
@@ -670,10 +648,9 @@ at once using the :paramref:`_schema.Table.schema` argument of
:class:`_schema.Table`::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="mydatabase.dbo",
schema="mydatabase.dbo"
)
When performing operations such as table or component reflection, a schema
@@ -685,10 +662,9 @@ components will be quoted separately for case sensitive names and other
special characters. Given an argument as below::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="MyDataBase.dbo",
schema="MyDataBase.dbo"
)
The above schema would be rendered as ``[MyDataBase].dbo``, and also in
@@ -701,22 +677,21 @@ Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
"database" will be None::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.dbo]",
schema="[MyDataBase.dbo]"
)
To individually specify both database and owner name with special characters
or embedded dots, use two sets of brackets::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.Period].[MyOwner.Dot]",
schema="[MyDataBase.Period].[MyOwner.Dot]"
)
.. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
identifier delimiters splitting the schema into separate database
and owner tokens, to allow dots within either name itself.
@@ -731,11 +706,10 @@ schema-qualified table would be auto-aliased when used in a
SELECT statement; given a table::
account_table = Table(
"account",
metadata,
Column("id", Integer, primary_key=True),
Column("info", String(100)),
schema="customer_schema",
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
)
this legacy mode of rendering would assume that "customer_schema.account"
@@ -778,55 +752,37 @@ which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
To generate a clustered primary key use::
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True),
)
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as:
which will render the table, for example, as::
.. sourcecode:: sql
CREATE TABLE my_table (
x INTEGER NOT NULL,
y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y)
)
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
To explicitly request a non-clustered primary key (for example, when
a separate clustered index is desired), use::
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False),
)
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
which will render the table, for example, as:
which will render the table, for example, as::
.. sourcecode:: sql
CREATE TABLE my_table (
x INTEGER NOT NULL,
y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y)
)
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y))
Columnstore Index Support
-------------------------
@@ -864,7 +820,7 @@ INCLUDE
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=["y"])
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
@@ -919,19 +875,18 @@ To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`_schema.Table`
which has triggers::
Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {"implicit_returning": False}
__table_args__ = {'implicit_returning':False}
.. _mssql_rowcount_versioning:
@@ -965,9 +920,7 @@ isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt:
.. sourcecode:: sql
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
@@ -1473,6 +1426,7 @@ class ROWVERSION(TIMESTAMP):
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
@@ -1597,11 +1551,44 @@ class MSUUid(sqltypes.Uuid):
def process(value):
return f"""'{
value.replace("-", "").replace("'", "''")
}'"""
value.replace("-", "").replace("'", "''")
}'"""
return process
def _sentinel_value_resolver(self, dialect):
"""Return a callable that will receive the uuid object or string
as it is normally passed to the DB in the parameter set, after
bind_processor() is called. Convert this value to match
what it would be as coming back from an INSERT..OUTPUT inserted.
for the UUID type, there are four varieties of settings so here
we seek to convert to the string or UUID representation that comes
back from the driver.
"""
character_based_uuid = (
not dialect.supports_native_uuid or not self.native_uuid
)
if character_based_uuid:
if self.native_uuid:
# for pyodbc, uuid.uuid() objects are accepted for incoming
# data, as well as strings. but the driver will always return
# uppercase strings in result sets.
def process(value):
return str(value).upper()
else:
def process(value):
return str(value)
return process
else:
# for pymssql, we get uuid.uuid() objects back.
return None
class UNIQUEIDENTIFIER(sqltypes.Uuid[sqltypes._UUID_RETURN]):
__visit_name__ = "UNIQUEIDENTIFIER"
@@ -1609,12 +1596,12 @@ class UNIQUEIDENTIFIER(sqltypes.Uuid[sqltypes._UUID_RETURN]):
@overload
def __init__(
self: UNIQUEIDENTIFIER[_python_UUID], as_uuid: Literal[True] = ...
): ...
):
...
@overload
def __init__(
self: UNIQUEIDENTIFIER[str], as_uuid: Literal[False] = ...
): ...
def __init__(self: UNIQUEIDENTIFIER[str], as_uuid: Literal[False] = ...):
...
def __init__(self, as_uuid: bool = True):
"""Construct a :class:`_mssql.UNIQUEIDENTIFIER` type.
@@ -1865,6 +1852,7 @@ class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_lastrowid = None
_rowcount = None
dialect: MSDialect
@@ -1984,6 +1972,13 @@ class MSExecutionContext(default.DefaultExecutionContext):
def get_lastrowid(self):
return self._lastrowid
@property
def rowcount(self):
if self._rowcount is not None:
return self._rowcount
else:
return self.cursor.rowcount
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
@@ -2035,10 +2030,6 @@ class MSSQLCompiler(compiler.SQLCompiler):
self.tablealiases = {}
super().__init__(*args, **kwargs)
def _format_frame_clause(self, range_, **kw):
kw["literal_execute"] = True
return super()._format_frame_clause(range_, **kw)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:
@@ -2492,12 +2483,10 @@ class MSSQLCompiler(compiler.SQLCompiler):
type_expression = "ELSE CAST(JSON_VALUE(%s, %s) AS %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
(
"FLOAT"
if isinstance(binary.type, sqltypes.Float)
else "NUMERIC(%s, %s)"
% (binary.type.precision, binary.type.scale)
),
"FLOAT"
if isinstance(binary.type, sqltypes.Float)
else "NUMERIC(%s, %s)"
% (binary.type.precision, binary.type.scale),
)
elif binary.type._type_affinity is sqltypes.Boolean:
# the NULL handling is particularly weird with boolean, so
@@ -2533,6 +2522,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
@@ -3632,36 +3622,27 @@ where
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
sys_columns = ischema.sys_columns
sys_types = ischema.sys_types
sys_default_constraints = ischema.sys_default_constraints
computed_cols = ischema.computed_columns
identity_cols = ischema.identity_columns
extended_properties = ischema.extended_properties
# to access sys tables, need an object_id.
# object_id() can normally match to the unquoted name even if it
# has special characters. however it also accepts quoted names,
# which means for the special case that the name itself has
# "quotes" (e.g. brackets for SQL Server) we need to "quote" (e.g.
# bracket) that name anyway. Fixed as part of #12654
is_temp_table = tablename.startswith("#")
if is_temp_table:
owner, tablename = self._get_internal_temp_table_name(
connection, tablename
)
object_id_tokens = [self.identifier_preparer.quote(tablename)]
columns = ischema.mssql_temp_table_columns
else:
columns = ischema.columns
computed_cols = ischema.computed_columns
identity_cols = ischema.identity_columns
if owner:
object_id_tokens.insert(0, self.identifier_preparer.quote(owner))
if is_temp_table:
object_id_tokens.insert(0, "tempdb")
object_id = func.object_id(".".join(object_id_tokens))
whereclause = sys_columns.c.object_id == object_id
whereclause = sql.and_(
columns.c.table_name == tablename,
columns.c.table_schema == owner,
)
full_name = columns.c.table_schema + "." + columns.c.table_name
else:
whereclause = columns.c.table_name == tablename
full_name = columns.c.table_name
if self._supports_nvarchar_max:
computed_definition = computed_cols.c.definition
@@ -3671,112 +3652,92 @@ where
computed_cols.c.definition, NVARCHAR(4000)
)
object_id = func.object_id(full_name)
s = (
sql.select(
sys_columns.c.name,
sys_types.c.name,
sys_columns.c.is_nullable,
sys_columns.c.max_length,
sys_columns.c.precision,
sys_columns.c.scale,
sys_default_constraints.c.definition,
sys_columns.c.collation_name,
columns.c.column_name,
columns.c.data_type,
columns.c.is_nullable,
columns.c.character_maximum_length,
columns.c.numeric_precision,
columns.c.numeric_scale,
columns.c.column_default,
columns.c.collation_name,
computed_definition,
computed_cols.c.is_persisted,
identity_cols.c.is_identity,
identity_cols.c.seed_value,
identity_cols.c.increment_value,
extended_properties.c.value.label("comment"),
)
.select_from(sys_columns)
.join(
sys_types,
onclause=sys_columns.c.user_type_id
== sys_types.c.user_type_id,
)
.outerjoin(
sys_default_constraints,
sql.and_(
sys_default_constraints.c.object_id
== sys_columns.c.default_object_id,
sys_default_constraints.c.parent_column_id
== sys_columns.c.column_id,
),
ischema.extended_properties.c.value.label("comment"),
)
.select_from(columns)
.outerjoin(
computed_cols,
onclause=sql.and_(
computed_cols.c.object_id == sys_columns.c.object_id,
computed_cols.c.column_id == sys_columns.c.column_id,
computed_cols.c.object_id == object_id,
computed_cols.c.name
== columns.c.column_name.collate("DATABASE_DEFAULT"),
),
)
.outerjoin(
identity_cols,
onclause=sql.and_(
identity_cols.c.object_id == sys_columns.c.object_id,
identity_cols.c.column_id == sys_columns.c.column_id,
identity_cols.c.object_id == object_id,
identity_cols.c.name
== columns.c.column_name.collate("DATABASE_DEFAULT"),
),
)
.outerjoin(
extended_properties,
ischema.extended_properties,
onclause=sql.and_(
extended_properties.c["class"] == 1,
extended_properties.c.name == "MS_Description",
sys_columns.c.object_id == extended_properties.c.major_id,
sys_columns.c.column_id == extended_properties.c.minor_id,
ischema.extended_properties.c["class"] == 1,
ischema.extended_properties.c.major_id == object_id,
ischema.extended_properties.c.minor_id
== columns.c.ordinal_position,
ischema.extended_properties.c.name == "MS_Description",
),
)
.where(whereclause)
.order_by(sys_columns.c.column_id)
.order_by(columns.c.ordinal_position)
)
if is_temp_table:
exec_opts = {"schema_translate_map": {"sys": "tempdb.sys"}}
else:
exec_opts = {"schema_translate_map": {}}
c = connection.execution_options(**exec_opts).execute(s)
c = connection.execution_options(future_result=True).execute(s)
cols = []
for row in c.mappings():
name = row[sys_columns.c.name]
type_ = row[sys_types.c.name]
nullable = row[sys_columns.c.is_nullable] == 1
maxlen = row[sys_columns.c.max_length]
numericprec = row[sys_columns.c.precision]
numericscale = row[sys_columns.c.scale]
default = row[sys_default_constraints.c.definition]
collation = row[sys_columns.c.collation_name]
name = row[columns.c.column_name]
type_ = row[columns.c.data_type]
nullable = row[columns.c.is_nullable] == "YES"
charlen = row[columns.c.character_maximum_length]
numericprec = row[columns.c.numeric_precision]
numericscale = row[columns.c.numeric_scale]
default = row[columns.c.column_default]
collation = row[columns.c.collation_name]
definition = row[computed_definition]
is_persisted = row[computed_cols.c.is_persisted]
is_identity = row[identity_cols.c.is_identity]
identity_start = row[identity_cols.c.seed_value]
identity_increment = row[identity_cols.c.increment_value]
comment = row[extended_properties.c.value]
comment = row[ischema.extended_properties.c.value]
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (
MSString,
MSChar,
MSNVarchar,
MSNChar,
MSText,
MSNText,
MSBinary,
MSVarBinary,
sqltypes.LargeBinary,
):
kwargs["length"] = maxlen if maxlen != -1 else None
elif coltype in (
MSString,
MSChar,
MSText,
):
kwargs["length"] = maxlen if maxlen != -1 else None
if collation:
kwargs["collation"] = collation
elif coltype in (
MSNVarchar,
MSNChar,
MSNText,
):
kwargs["length"] = maxlen // 2 if maxlen != -1 else None
if charlen == -1:
charlen = None
kwargs["length"] = charlen
if collation:
kwargs["collation"] = collation
@@ -4020,8 +3981,10 @@ index_info AS (
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = util.defaultdict(
lambda: {
fkeys = []
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
@@ -4029,7 +3992,8 @@ index_info AS (
"referred_columns": [],
"options": {},
}
)
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).all():
(