main commit
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-10-16 16:30:25 +09:00
parent 91c7e04474
commit 537e7b363f
1146 changed files with 45926 additions and 77196 deletions

View File

@@ -1,5 +1,5 @@
# dialects/mssql/__init__.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# mssql/__init__.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/mssql/aioodbc.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# mssql/aioodbc.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -32,12 +32,13 @@ This dialect should normally be used only with the
styles are otherwise equivalent to those documented in the pyodbc section::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine(
"mssql+aioodbc://scott:tiger@mssql2017:1433/test?"
"driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
)
"""
from __future__ import annotations

View File

@@ -1,5 +1,5 @@
# dialects/mssql/base.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# mssql/base.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -9,6 +9,7 @@
"""
.. dialect:: mssql
:name: Microsoft SQL Server
:full_support: 2017
:normal_support: 2012+
:best_effort: 2005+
@@ -39,12 +40,9 @@ considered to be the identity column - unless it is associated with a
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table(
"t",
m,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
The above example will generate DDL as:
@@ -62,12 +60,9 @@ specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
on the first integer primary key column::
m = MetaData()
t = Table(
"t",
m,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer),
)
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
m.create_all(engine)
To add the ``IDENTITY`` keyword to a non-primary key column, specify
@@ -77,12 +72,9 @@ To add the ``IDENTITY`` keyword to a non-primary key column, specify
is set to ``False`` on any integer primary key column::
m = MetaData()
t = Table(
"t",
m,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer, autoincrement=True),
)
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer, autoincrement=True))
m.create_all(engine)
.. versionchanged:: 1.4 Added :class:`_schema.Identity` construct
@@ -145,12 +137,14 @@ parameters passed to the :class:`_schema.Identity` object::
from sqlalchemy import Table, Integer, Column, Identity
test = Table(
"test",
metadata,
'test', metadata,
Column(
"id", Integer, primary_key=True, Identity(start=100, increment=10)
'id',
Integer,
primary_key=True,
Identity(start=100, increment=10)
),
Column("name", String(20)),
Column('name', String(20))
)
The CREATE TABLE for the above :class:`_schema.Table` object would be:
@@ -160,7 +154,7 @@ The CREATE TABLE for the above :class:`_schema.Table` object would be:
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
)
.. note::
@@ -193,7 +187,6 @@ type deployed to the SQL Server database can be specified as ``Numeric`` using
Base = declarative_base()
class TestTable(Base):
__tablename__ = "test"
id = Column(
@@ -219,9 +212,8 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
from sqlalchemy import TypeDecorator
class NumericAsInteger(TypeDecorator):
"normalize floating point return values into ints"
'''normalize floating point return values into ints'''
impl = Numeric(10, 0, asdecimal=False)
cache_ok = True
@@ -231,7 +223,6 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
value = int(value)
return value
class TestTable(Base):
__tablename__ = "test"
id = Column(
@@ -280,11 +271,11 @@ The process for fetching this value has several variants:
fetched in order to receive the value. Given a table as::
t = Table(
"t",
't',
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
implicit_returning=False,
Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False
)
an INSERT will look like:
@@ -310,13 +301,12 @@ statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table(
"t", m, Column("id", Integer, primary_key=True), Column("x", Integer)
)
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
m.create_all(engine)
with engine.begin() as conn:
conn.execute(t.insert(), {"id": 1, "x": 1}, {"id": 2, "x": 2})
conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
@@ -352,11 +342,7 @@ The :class:`.Sequence` object creates "real" sequences, i.e.,
>>> from sqlalchemy import Sequence
>>> from sqlalchemy.schema import CreateSequence
>>> from sqlalchemy.dialects import mssql
>>> print(
... CreateSequence(Sequence("my_seq", start=1)).compile(
... dialect=mssql.dialect()
... )
... )
>>> print(CreateSequence(Sequence("my_seq", start=1)).compile(dialect=mssql.dialect()))
{printsql}CREATE SEQUENCE my_seq START WITH 1
For integer primary key generation, SQL Server's ``IDENTITY`` construct should
@@ -390,12 +376,12 @@ more than one backend without using dialect-specific types.
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
my_table = Table(
"my_table",
metadata,
Column("my_data", VARCHAR(None)),
Column("my_n_data", NVARCHAR(None)),
'my_table', metadata,
Column('my_data', VARCHAR(None)),
Column('my_n_data', NVARCHAR(None))
)
Collation Support
-----------------
@@ -403,13 +389,10 @@ Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column("login", VARCHAR(32, collation="Latin1_General_CI_AS"))
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
When such a column is associated with a :class:`_schema.Table`, the
CREATE TABLE statement for this column will yield:
.. sourcecode:: sql
CREATE TABLE statement for this column will yield::
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
@@ -429,9 +412,7 @@ versions when no OFFSET clause is present. A statement such as::
select(some_table).limit(5)
will render similarly to:
.. sourcecode:: sql
will render similarly to::
SELECT TOP 5 col1, col2.. FROM table
@@ -441,9 +422,7 @@ LIMIT and OFFSET, or just OFFSET alone, will be rendered using the
select(some_table).order_by(some_table.c.col3).limit(5).offset(10)
will render similarly to:
.. sourcecode:: sql
will render similarly to::
SELECT anon_1.col1, anon_1.col2 FROM (SELECT col1, col2,
ROW_NUMBER() OVER (ORDER BY col3) AS
@@ -496,13 +475,16 @@ each new connection.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ"
"mssql+pyodbc://scott:tiger@ms_2008",
isolation_level="REPEATABLE READ"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(isolation_level="READ COMMITTED")
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
Valid values for ``isolation_level`` include:
@@ -552,6 +534,7 @@ will remain consistent with the state of the transaction::
mssql_engine = create_engine(
"mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
# disable default reset-on-return scheme
pool_reset_on_return=None,
)
@@ -580,17 +563,13 @@ Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct:
.. sourcecode:: sql
construct::
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render:
.. sourcecode:: sql
render::
name VARCHAR(20)
@@ -646,9 +625,8 @@ behavior of this flag is as follows:
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`_sa.create_engine`::
eng = create_engine(
"mssql+pymssql://user:pass@host/db", deprecate_large_types=True
)
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
@@ -670,10 +648,9 @@ at once using the :paramref:`_schema.Table.schema` argument of
:class:`_schema.Table`::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="mydatabase.dbo",
schema="mydatabase.dbo"
)
When performing operations such as table or component reflection, a schema
@@ -685,10 +662,9 @@ components will be quoted separately for case sensitive names and other
special characters. Given an argument as below::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="MyDataBase.dbo",
schema="MyDataBase.dbo"
)
The above schema would be rendered as ``[MyDataBase].dbo``, and also in
@@ -701,22 +677,21 @@ Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
"database" will be None::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.dbo]",
schema="[MyDataBase.dbo]"
)
To individually specify both database and owner name with special characters
or embedded dots, use two sets of brackets::
Table(
"some_table",
metadata,
"some_table", metadata,
Column("q", String(50)),
schema="[MyDataBase.Period].[MyOwner.Dot]",
schema="[MyDataBase.Period].[MyOwner.Dot]"
)
.. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
identifier delimiters splitting the schema into separate database
and owner tokens, to allow dots within either name itself.
@@ -731,11 +706,10 @@ schema-qualified table would be auto-aliased when used in a
SELECT statement; given a table::
account_table = Table(
"account",
metadata,
Column("id", Integer, primary_key=True),
Column("info", String(100)),
schema="customer_schema",
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
)
this legacy mode of rendering would assume that "customer_schema.account"
@@ -778,55 +752,37 @@ which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
To generate a clustered primary key use::
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True),
)
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
which will render the table, for example, as:
which will render the table, for example, as::
.. sourcecode:: sql
CREATE TABLE my_table (
x INTEGER NOT NULL,
y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y)
)
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
Similarly, we can generate a clustered unique constraint using::
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
To explicitly request a non-clustered primary key (for example, when
a separate clustered index is desired), use::
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False),
)
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
which will render the table, for example, as:
which will render the table, for example, as::
.. sourcecode:: sql
CREATE TABLE my_table (
x INTEGER NOT NULL,
y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y)
)
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y))
Columnstore Index Support
-------------------------
@@ -864,7 +820,7 @@ INCLUDE
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=["y"])
Index("my_index", table.c.x, mssql_include=['y'])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
@@ -919,19 +875,18 @@ To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`_schema.Table`
which has triggers::
Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False,
implicit_returning=False
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {"implicit_returning": False}
__table_args__ = {'implicit_returning':False}
.. _mssql_rowcount_versioning:
@@ -965,9 +920,7 @@ isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt:
.. sourcecode:: sql
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
@@ -1473,6 +1426,7 @@ class ROWVERSION(TIMESTAMP):
class NTEXT(sqltypes.UnicodeText):
"""MSSQL NTEXT type, for variable-length unicode text up to 2^30
characters."""
@@ -1597,11 +1551,44 @@ class MSUUid(sqltypes.Uuid):
def process(value):
return f"""'{
value.replace("-", "").replace("'", "''")
}'"""
value.replace("-", "").replace("'", "''")
}'"""
return process
def _sentinel_value_resolver(self, dialect):
"""Return a callable that will receive the uuid object or string
as it is normally passed to the DB in the parameter set, after
bind_processor() is called. Convert this value to match
what it would be as coming back from an INSERT..OUTPUT inserted.
for the UUID type, there are four varieties of settings so here
we seek to convert to the string or UUID representation that comes
back from the driver.
"""
character_based_uuid = (
not dialect.supports_native_uuid or not self.native_uuid
)
if character_based_uuid:
if self.native_uuid:
# for pyodbc, uuid.uuid() objects are accepted for incoming
# data, as well as strings. but the driver will always return
# uppercase strings in result sets.
def process(value):
return str(value).upper()
else:
def process(value):
return str(value)
return process
else:
# for pymssql, we get uuid.uuid() objects back.
return None
class UNIQUEIDENTIFIER(sqltypes.Uuid[sqltypes._UUID_RETURN]):
__visit_name__ = "UNIQUEIDENTIFIER"
@@ -1609,12 +1596,12 @@ class UNIQUEIDENTIFIER(sqltypes.Uuid[sqltypes._UUID_RETURN]):
@overload
def __init__(
self: UNIQUEIDENTIFIER[_python_UUID], as_uuid: Literal[True] = ...
): ...
):
...
@overload
def __init__(
self: UNIQUEIDENTIFIER[str], as_uuid: Literal[False] = ...
): ...
def __init__(self: UNIQUEIDENTIFIER[str], as_uuid: Literal[False] = ...):
...
def __init__(self, as_uuid: bool = True):
"""Construct a :class:`_mssql.UNIQUEIDENTIFIER` type.
@@ -1865,6 +1852,7 @@ class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_lastrowid = None
_rowcount = None
dialect: MSDialect
@@ -1984,6 +1972,13 @@ class MSExecutionContext(default.DefaultExecutionContext):
def get_lastrowid(self):
return self._lastrowid
@property
def rowcount(self):
if self._rowcount is not None:
return self._rowcount
else:
return self.cursor.rowcount
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
@@ -2035,10 +2030,6 @@ class MSSQLCompiler(compiler.SQLCompiler):
self.tablealiases = {}
super().__init__(*args, **kwargs)
def _format_frame_clause(self, range_, **kw):
kw["literal_execute"] = True
return super()._format_frame_clause(range_, **kw)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:
@@ -2492,12 +2483,10 @@ class MSSQLCompiler(compiler.SQLCompiler):
type_expression = "ELSE CAST(JSON_VALUE(%s, %s) AS %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
(
"FLOAT"
if isinstance(binary.type, sqltypes.Float)
else "NUMERIC(%s, %s)"
% (binary.type.precision, binary.type.scale)
),
"FLOAT"
if isinstance(binary.type, sqltypes.Float)
else "NUMERIC(%s, %s)"
% (binary.type.precision, binary.type.scale),
)
elif binary.type._type_affinity is sqltypes.Boolean:
# the NULL handling is particularly weird with boolean, so
@@ -2533,6 +2522,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
@@ -3632,36 +3622,27 @@ where
@reflection.cache
@_db_plus_owner
def get_columns(self, connection, tablename, dbname, owner, schema, **kw):
sys_columns = ischema.sys_columns
sys_types = ischema.sys_types
sys_default_constraints = ischema.sys_default_constraints
computed_cols = ischema.computed_columns
identity_cols = ischema.identity_columns
extended_properties = ischema.extended_properties
# to access sys tables, need an object_id.
# object_id() can normally match to the unquoted name even if it
# has special characters. however it also accepts quoted names,
# which means for the special case that the name itself has
# "quotes" (e.g. brackets for SQL Server) we need to "quote" (e.g.
# bracket) that name anyway. Fixed as part of #12654
is_temp_table = tablename.startswith("#")
if is_temp_table:
owner, tablename = self._get_internal_temp_table_name(
connection, tablename
)
object_id_tokens = [self.identifier_preparer.quote(tablename)]
columns = ischema.mssql_temp_table_columns
else:
columns = ischema.columns
computed_cols = ischema.computed_columns
identity_cols = ischema.identity_columns
if owner:
object_id_tokens.insert(0, self.identifier_preparer.quote(owner))
if is_temp_table:
object_id_tokens.insert(0, "tempdb")
object_id = func.object_id(".".join(object_id_tokens))
whereclause = sys_columns.c.object_id == object_id
whereclause = sql.and_(
columns.c.table_name == tablename,
columns.c.table_schema == owner,
)
full_name = columns.c.table_schema + "." + columns.c.table_name
else:
whereclause = columns.c.table_name == tablename
full_name = columns.c.table_name
if self._supports_nvarchar_max:
computed_definition = computed_cols.c.definition
@@ -3671,112 +3652,92 @@ where
computed_cols.c.definition, NVARCHAR(4000)
)
object_id = func.object_id(full_name)
s = (
sql.select(
sys_columns.c.name,
sys_types.c.name,
sys_columns.c.is_nullable,
sys_columns.c.max_length,
sys_columns.c.precision,
sys_columns.c.scale,
sys_default_constraints.c.definition,
sys_columns.c.collation_name,
columns.c.column_name,
columns.c.data_type,
columns.c.is_nullable,
columns.c.character_maximum_length,
columns.c.numeric_precision,
columns.c.numeric_scale,
columns.c.column_default,
columns.c.collation_name,
computed_definition,
computed_cols.c.is_persisted,
identity_cols.c.is_identity,
identity_cols.c.seed_value,
identity_cols.c.increment_value,
extended_properties.c.value.label("comment"),
)
.select_from(sys_columns)
.join(
sys_types,
onclause=sys_columns.c.user_type_id
== sys_types.c.user_type_id,
)
.outerjoin(
sys_default_constraints,
sql.and_(
sys_default_constraints.c.object_id
== sys_columns.c.default_object_id,
sys_default_constraints.c.parent_column_id
== sys_columns.c.column_id,
),
ischema.extended_properties.c.value.label("comment"),
)
.select_from(columns)
.outerjoin(
computed_cols,
onclause=sql.and_(
computed_cols.c.object_id == sys_columns.c.object_id,
computed_cols.c.column_id == sys_columns.c.column_id,
computed_cols.c.object_id == object_id,
computed_cols.c.name
== columns.c.column_name.collate("DATABASE_DEFAULT"),
),
)
.outerjoin(
identity_cols,
onclause=sql.and_(
identity_cols.c.object_id == sys_columns.c.object_id,
identity_cols.c.column_id == sys_columns.c.column_id,
identity_cols.c.object_id == object_id,
identity_cols.c.name
== columns.c.column_name.collate("DATABASE_DEFAULT"),
),
)
.outerjoin(
extended_properties,
ischema.extended_properties,
onclause=sql.and_(
extended_properties.c["class"] == 1,
extended_properties.c.name == "MS_Description",
sys_columns.c.object_id == extended_properties.c.major_id,
sys_columns.c.column_id == extended_properties.c.minor_id,
ischema.extended_properties.c["class"] == 1,
ischema.extended_properties.c.major_id == object_id,
ischema.extended_properties.c.minor_id
== columns.c.ordinal_position,
ischema.extended_properties.c.name == "MS_Description",
),
)
.where(whereclause)
.order_by(sys_columns.c.column_id)
.order_by(columns.c.ordinal_position)
)
if is_temp_table:
exec_opts = {"schema_translate_map": {"sys": "tempdb.sys"}}
else:
exec_opts = {"schema_translate_map": {}}
c = connection.execution_options(**exec_opts).execute(s)
c = connection.execution_options(future_result=True).execute(s)
cols = []
for row in c.mappings():
name = row[sys_columns.c.name]
type_ = row[sys_types.c.name]
nullable = row[sys_columns.c.is_nullable] == 1
maxlen = row[sys_columns.c.max_length]
numericprec = row[sys_columns.c.precision]
numericscale = row[sys_columns.c.scale]
default = row[sys_default_constraints.c.definition]
collation = row[sys_columns.c.collation_name]
name = row[columns.c.column_name]
type_ = row[columns.c.data_type]
nullable = row[columns.c.is_nullable] == "YES"
charlen = row[columns.c.character_maximum_length]
numericprec = row[columns.c.numeric_precision]
numericscale = row[columns.c.numeric_scale]
default = row[columns.c.column_default]
collation = row[columns.c.collation_name]
definition = row[computed_definition]
is_persisted = row[computed_cols.c.is_persisted]
is_identity = row[identity_cols.c.is_identity]
identity_start = row[identity_cols.c.seed_value]
identity_increment = row[identity_cols.c.increment_value]
comment = row[extended_properties.c.value]
comment = row[ischema.extended_properties.c.value]
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (
MSString,
MSChar,
MSNVarchar,
MSNChar,
MSText,
MSNText,
MSBinary,
MSVarBinary,
sqltypes.LargeBinary,
):
kwargs["length"] = maxlen if maxlen != -1 else None
elif coltype in (
MSString,
MSChar,
MSText,
):
kwargs["length"] = maxlen if maxlen != -1 else None
if collation:
kwargs["collation"] = collation
elif coltype in (
MSNVarchar,
MSNChar,
MSNText,
):
kwargs["length"] = maxlen // 2 if maxlen != -1 else None
if charlen == -1:
charlen = None
kwargs["length"] = charlen
if collation:
kwargs["collation"] = collation
@@ -4020,8 +3981,10 @@ index_info AS (
)
# group rows by constraint ID, to handle multi-column FKs
fkeys = util.defaultdict(
lambda: {
fkeys = []
def fkey_rec():
return {
"name": None,
"constrained_columns": [],
"referred_schema": None,
@@ -4029,7 +3992,8 @@ index_info AS (
"referred_columns": [],
"options": {},
}
)
fkeys = util.defaultdict(fkey_rec)
for r in connection.execute(s).all():
(

View File

@@ -1,5 +1,5 @@
# dialects/mssql/information_schema.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# mssql/information_schema.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -88,41 +88,23 @@ columns = Table(
schema="INFORMATION_SCHEMA",
)
sys_columns = Table(
"columns",
mssql_temp_table_columns = Table(
"COLUMNS",
ischema,
Column("object_id", Integer),
Column("name", CoerceUnicode),
Column("column_id", Integer),
Column("default_object_id", Integer),
Column("user_type_id", Integer),
Column("is_nullable", Integer),
Column("ordinal_position", Integer),
Column("max_length", Integer),
Column("precision", Integer),
Column("scale", Integer),
Column("collation_name", String),
schema="sys",
)
sys_types = Table(
"types",
ischema,
Column("name", CoerceUnicode, key="name"),
Column("system_type_id", Integer, key="system_type_id"),
Column("user_type_id", Integer, key="user_type_id"),
Column("schema_id", Integer, key="schema_id"),
Column("max_length", Integer, key="max_length"),
Column("precision", Integer, key="precision"),
Column("scale", Integer, key="scale"),
Column("collation_name", CoerceUnicode, key="collation_name"),
Column("is_nullable", Boolean, key="is_nullable"),
Column("is_user_defined", Boolean, key="is_user_defined"),
Column("is_assembly_type", Boolean, key="is_assembly_type"),
Column("default_object_id", Integer, key="default_object_id"),
Column("rule_object_id", Integer, key="rule_object_id"),
Column("is_table_type", Boolean, key="is_table_type"),
schema="sys",
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
Column("IS_NULLABLE", Integer, key="is_nullable"),
Column("DATA_TYPE", String, key="data_type"),
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
Column(
"CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"
),
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
Column("COLUMN_DEFAULT", Integer, key="column_default"),
Column("COLLATION_NAME", String, key="collation_name"),
schema="tempdb.INFORMATION_SCHEMA",
)
constraints = Table(
@@ -135,17 +117,6 @@ constraints = Table(
schema="INFORMATION_SCHEMA",
)
sys_default_constraints = Table(
"default_constraints",
ischema,
Column("object_id", Integer),
Column("name", CoerceUnicode),
Column("schema_id", Integer),
Column("parent_column_id", Integer),
Column("definition", CoerceUnicode),
schema="sys",
)
column_constraints = Table(
"CONSTRAINT_COLUMN_USAGE",
ischema,
@@ -211,7 +182,6 @@ computed_columns = Table(
ischema,
Column("object_id", Integer),
Column("name", CoerceUnicode),
Column("column_id", Integer),
Column("is_computed", Boolean),
Column("is_persisted", Boolean),
Column("definition", CoerceUnicode),
@@ -237,7 +207,6 @@ class NumericSqlVariant(TypeDecorator):
int 1 is returned as "\x01\x00\x00\x00". On python 3 it returns the
correct value as string.
"""
impl = Unicode
cache_ok = True
@@ -250,7 +219,6 @@ identity_columns = Table(
ischema,
Column("object_id", Integer),
Column("name", CoerceUnicode),
Column("column_id", Integer),
Column("is_identity", Boolean),
Column("seed_value", NumericSqlVariant),
Column("increment_value", NumericSqlVariant),

View File

@@ -1,9 +1,3 @@
# dialects/mssql/json.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from ... import types as sqltypes
@@ -54,7 +48,9 @@ class JSON(sqltypes.JSON):
dictionary or list, the :meth:`_types.JSON.Comparator.as_json` accessor
should be used::
stmt = select(data_table.c.data["some key"].as_json()).where(
stmt = select(
data_table.c.data["some key"].as_json()
).where(
data_table.c.data["some key"].as_json() == {"sub": "structure"}
)
@@ -65,7 +61,9 @@ class JSON(sqltypes.JSON):
:meth:`_types.JSON.Comparator.as_integer`,
:meth:`_types.JSON.Comparator.as_float`::
stmt = select(data_table.c.data["some key"].as_string()).where(
stmt = select(
data_table.c.data["some key"].as_string()
).where(
data_table.c.data["some key"].as_string() == "some string"
)

View File

@@ -1,9 +1,3 @@
# dialects/mssql/provision.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from sqlalchemy import inspect
@@ -22,17 +16,10 @@ from ...testing.provision import generate_driver_url
from ...testing.provision import get_temp_table_name
from ...testing.provision import log
from ...testing.provision import normalize_sequence
from ...testing.provision import post_configure_engine
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
@post_configure_engine.for_db("mssql")
def post_configure_engine(url, engine, follower_ident):
if engine.driver == "pyodbc":
engine.dialect.dbapi.pooling = False
@generate_driver_url.for_db("mssql")
def generate_driver_url(url, driver, query_str):
backend = url.get_backend_name()
@@ -42,9 +29,6 @@ def generate_driver_url(url, driver, query_str):
if driver not in ("pyodbc", "aioodbc"):
new_url = new_url.set(query="")
if driver == "aioodbc":
new_url = new_url.update_query_dict({"MARS_Connection": "Yes"})
if query_str:
new_url = new_url.update_query_string(query_str)

View File

@@ -1,5 +1,5 @@
# dialects/mssql/pymssql.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# mssql/pymssql.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -103,7 +103,6 @@ class MSDialect_pymssql(MSDialect):
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
"The server failed to resume the transaction",
):
if msg in str(e):
return True

View File

@@ -1,5 +1,5 @@
# dialects/mssql/pyodbc.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# mssql/pyodbc.py
# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -30,9 +30,7 @@ is configured on the client, a basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC:
.. sourcecode:: text
Which above, will pass the following connection string to PyODBC::
DSN=some_dsn;UID=scott;PWD=tiger
@@ -51,9 +49,7 @@ When using a hostname connection, the driver name must also be specified in the
query parameters of the URL. As these names usually have spaces in them, the
name must be URL encoded which means using plus signs for spaces::
engine = create_engine(
"mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server"
)
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server")
The ``driver`` keyword is significant to the pyodbc dialect and must be
specified in lowercase.
@@ -73,7 +69,6 @@ internally::
The equivalent URL can be constructed using :class:`_sa.engine.URL`::
from sqlalchemy.engine import URL
connection_url = URL.create(
"mssql+pyodbc",
username="scott",
@@ -88,6 +83,7 @@ The equivalent URL can be constructed using :class:`_sa.engine.URL`::
},
)
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -98,11 +94,8 @@ using the parameter ``odbc_connect``. A :class:`_sa.engine.URL` object
can help make this easier::
from sqlalchemy.engine import URL
connection_string = "DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password"
connection_url = URL.create(
"mssql+pyodbc", query={"odbc_connect": connection_string}
)
connection_url = URL.create("mssql+pyodbc", query={"odbc_connect": connection_string})
engine = create_engine(connection_url)
@@ -134,8 +127,7 @@ database using Azure credentials::
from sqlalchemy.engine.url import URL
from azure import identity
# Connection option for access tokens, as defined in msodbcsql.h
SQL_COPT_SS_ACCESS_TOKEN = 1256
SQL_COPT_SS_ACCESS_TOKEN = 1256 # Connection option for access tokens, as defined in msodbcsql.h
TOKEN_URL = "https://database.windows.net/" # The token URL for any Azure SQL database
connection_string = "mssql+pyodbc://@my-server.database.windows.net/myDb?driver=ODBC+Driver+17+for+SQL+Server"
@@ -144,19 +136,14 @@ database using Azure credentials::
azure_credentials = identity.DefaultAzureCredential()
@event.listens_for(engine, "do_connect")
def provide_token(dialect, conn_rec, cargs, cparams):
# remove the "Trusted_Connection" parameter that SQLAlchemy adds
cargs[0] = cargs[0].replace(";Trusted_Connection=Yes", "")
# create token credential
raw_token = azure_credentials.get_token(TOKEN_URL).token.encode(
"utf-16-le"
)
token_struct = struct.pack(
f"<I{len(raw_token)}s", len(raw_token), raw_token
)
raw_token = azure_credentials.get_token(TOKEN_URL).token.encode("utf-16-le")
token_struct = struct.pack(f"<I{len(raw_token)}s", len(raw_token), raw_token)
# apply it to keyword arguments
cparams["attrs_before"] = {SQL_COPT_SS_ACCESS_TOKEN: token_struct}
@@ -189,9 +176,7 @@ emit a ``.rollback()`` after an operation had a failure of some kind.
This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to
the SQL Server dialect via the :func:`_sa.create_engine` function as follows::
engine = create_engine(
connection_url, ignore_no_transaction_on_rollback=True
)
engine = create_engine(connection_url, ignore_no_transaction_on_rollback=True)
Using the above parameter, the dialect will catch ``ProgrammingError``
exceptions raised during ``connection.rollback()`` and emit a warning
@@ -251,6 +236,7 @@ behavior and pass long strings as varchar(max)/nvarchar(max) using the
},
)
Pyodbc Pooling / connection close behavior
------------------------------------------
@@ -315,8 +301,7 @@ Server dialect supports this parameter by passing the
engine = create_engine(
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
fast_executemany=True,
)
fast_executemany=True)
.. versionchanged:: 2.0.9 - the ``fast_executemany`` parameter now has its
intended effect of this PyODBC feature taking effect for all INSERT
@@ -384,6 +369,7 @@ from ...engine import cursor as _cursor
class _ms_numeric_pyodbc:
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions