summaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authormike bayer <mike_mp@zzzcomputing.com>2023-05-09 14:39:18 +0000
committerGerrit Code Review <gerrit@bbpush.zzzcomputing.com>2023-05-09 14:39:18 +0000
commita7d8063171d87eef0f89e114e919ea8e07023db3 (patch)
treee0858fc9c2218aaf71a3dbee1a2540beb432b7b3 /test
parenta517e14ee4c44488a2a765c588fa9a2a440b0662 (diff)
parentf45f4a3afc3c260d50773c647eb7b1c270bb8e00 (diff)
downloadsqlalchemy-a7d8063171d87eef0f89e114e919ea8e07023db3.tar.gz
Merge "Improve oracle index reflection" into main
Diffstat (limited to 'test')
-rw-r--r--test/dialect/oracle/test_reflection.py112
-rw-r--r--test/dialect/postgresql/test_reflection.py3
-rw-r--r--test/perf/many_table_reflection.py68
-rw-r--r--test/requirements.py12
4 files changed, 149 insertions, 46 deletions
diff --git a/test/dialect/oracle/test_reflection.py b/test/dialect/oracle/test_reflection.py
index ac58d3694..6c6f1f21f 100644
--- a/test/dialect/oracle/test_reflection.py
+++ b/test/dialect/oracle/test_reflection.py
@@ -5,7 +5,6 @@ from sqlalchemy import FLOAT
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
-from sqlalchemy import func
from sqlalchemy import Identity
from sqlalchemy import Index
from sqlalchemy import inspect
@@ -982,36 +981,109 @@ class RoundTripIndexTest(fixtures.TestBase):
)
def test_reflect_fn_index(self, metadata, connection):
- """test reflection of a functional index.
+ """test reflection of a functional index."""
- it appears this emitted a warning at some point but does not right now.
- the returned data is not exactly correct, but this is what it's
- likely been doing for many years.
+ Table(
+ "sometable",
+ metadata,
+ Column("group", Unicode(255)),
+ Column("col", Unicode(255)),
+ Column("other", Unicode(255), index=True),
+ )
+ metadata.create_all(connection)
+ connection.exec_driver_sql(
+ """create index idx3 on sometable(
+ lower("group"), other, upper(other))"""
+ )
+ connection.exec_driver_sql(
+ """create index idx1 on sometable
+ (("group" || col), col || other desc)"""
+ )
+ connection.exec_driver_sql(
+ """
+ create unique index idx2 on sometable
+ (col desc, lower(other), "group" asc)
+ """
+ )
- """
+ expected = [
+ {
+ "name": "idx1",
+ "column_names": [None, None],
+ "expressions": ['"group"||"COL"', '"COL"||"OTHER"'],
+ "unique": False,
+ "dialect_options": {},
+ "column_sorting": {'"COL"||"OTHER"': ("desc",)},
+ },
+ {
+ "name": "idx2",
+ "column_names": [None, None, "group"],
+ "expressions": ['"COL"', 'LOWER("OTHER")', "group"],
+ "unique": True,
+ "column_sorting": {'"COL"': ("desc",)},
+ "dialect_options": {},
+ },
+ {
+ "name": "idx3",
+ "column_names": [None, "other", None],
+ "expressions": [
+ 'LOWER("group")',
+ "other",
+ 'UPPER("OTHER")',
+ ],
+ "unique": False,
+ "dialect_options": {},
+ },
+ {
+ "name": "ix_sometable_other",
+ "column_names": ["other"],
+ "unique": False,
+ "dialect_options": {},
+ },
+ ]
+
+ eq_(inspect(connection).get_indexes("sometable"), expected)
+ def test_indexes_asc_desc(self, metadata, connection):
s_table = Table(
"sometable",
metadata,
- Column("group", Unicode(255), primary_key=True),
+ Column("a", Unicode(255), primary_key=True),
+ Column("b", Unicode(255)),
+ Column("group", Unicode(255)),
Column("col", Unicode(255)),
)
-
- Index("data_idx", func.upper(s_table.c.col))
+ Index("id1", s_table.c.b.asc())
+ Index("id2", s_table.c.col.desc())
+ Index("id3", s_table.c.b.asc(), s_table.c.group.desc())
metadata.create_all(connection)
- eq_(
- inspect(connection).get_indexes("sometable"),
- [
- {
- "column_names": [],
- "dialect_options": {},
- "name": "data_idx",
- "unique": False,
- }
- ],
- )
+ expected = [
+ {
+ "name": "id1",
+ "column_names": ["b"],
+ "unique": False,
+ "dialect_options": {},
+ },
+ {
+ "name": "id2",
+ "column_names": [None],
+ "expressions": ['"COL"'],
+ "unique": False,
+ "column_sorting": {'"COL"': ("desc",)},
+ "dialect_options": {},
+ },
+ {
+ "name": "id3",
+ "column_names": ["b", None],
+ "expressions": ["b", '"group"'],
+ "unique": False,
+ "column_sorting": {'"group"': ("desc",)},
+ "dialect_options": {},
+ },
+ ]
+ eq_(inspect(connection).get_indexes("sometable"), expected)
def test_basic(self, metadata, connection):
diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py
index 4fb1f8e70..eacb4b149 100644
--- a/test/dialect/postgresql/test_reflection.py
+++ b/test/dialect/postgresql/test_reflection.py
@@ -1166,7 +1166,7 @@ class ReflectionTest(
connection.exec_driver_sql(
"""
create index idx3 on party
- (lower(name::text), other, lower(aname::text))
+ (lower(name::text), other, lower(aname::text) desc)
"""
)
connection.exec_driver_sql(
@@ -1216,6 +1216,7 @@ class ReflectionTest(
"unique": False,
"include_columns": [],
"dialect_options": {"postgresql_include": []},
+ "column_sorting": {"lower(aname::text)": ("desc",)},
},
{
"name": "idx4",
diff --git a/test/perf/many_table_reflection.py b/test/perf/many_table_reflection.py
index 804419f28..8fb654bbe 100644
--- a/test/perf/many_table_reflection.py
+++ b/test/perf/many_table_reflection.py
@@ -27,16 +27,15 @@ def generate_table(meta: sa.MetaData, min_cols, max_cols, dialect_name):
cols = []
for i in range(col_number - (0 if is_mssql else add_identity)):
args = []
- if random.random() < 0.95 or table_num == 0:
+ if random.random() < 0.99 or table_num == 0:
if is_mssql and add_identity and i == 0:
args.append(sa.Integer)
args.append(identity)
else:
args.append(random.choice(types))
else:
- args.append(
- sa.ForeignKey(f"table_{table_num-1}.table_{table_num-1}_col_1")
- )
+ target = random.randint(0, table_num - 1)
+ args.append(sa.ForeignKey(f"table_{target}.table_{target}_col_1"))
cols.append(
sa.Column(
f"table_{table_num}_col_{i+1}",
@@ -45,8 +44,8 @@ def generate_table(meta: sa.MetaData, min_cols, max_cols, dialect_name):
comment=f"primary key of table_{table_num}"
if i == 0
else None,
- index=random.random() > 0.9 and i > 0,
- unique=random.random() > 0.95 and i > 0,
+ index=random.random() > 0.97 and i > 0,
+ unique=random.random() > 0.97 and i > 0,
)
)
if add_identity and not is_mssql:
@@ -131,6 +130,19 @@ def create_tables(engine, meta):
meta.create_all(engine, tables[i : i + 500])
+def _drop_ddl(name, schema_name, dialect_name):
+ if dialect_name.startswith("postgres"):
+ suffix = "CASCADE"
+ elif dialect_name.startswith("oracle"):
+ suffix = "CASCADE CONSTRAINTS PURGE"
+ else:
+ suffix = ""
+ if schema_name:
+ return sa.schema.DDL(f"DROP TABLE {schema_name}.{name} {suffix}")
+ else:
+ return sa.schema.DDL(f"DROP TABLE {name} {suffix}")
+
+
@log
def drop_tables(engine, meta, schema_name, table_names: list):
tables = list(meta.tables.values())[::-1]
@@ -138,10 +150,6 @@ def drop_tables(engine, meta, schema_name, table_names: list):
meta.drop_all(engine, tables[i : i + 500])
remaining = sa.inspect(engine).get_table_names(schema=schema_name)
- suffix = ""
- if engine.dialect.name.startswith("postgres"):
- suffix = "CASCADE"
-
remaining = sorted(
remaining, key=lambda tn: int(tn.partition("_")[2]), reverse=True
)
@@ -151,14 +159,7 @@ def drop_tables(engine, meta, schema_name, table_names: list):
name = engine.dialect.denormalize_name(tn)
else:
name = tn
- if schema_name:
- conn.execute(
- sa.schema.DDL(
- f'DROP TABLE {schema_name}."{name}" {suffix}'
- )
- )
- else:
- conn.execute(sa.schema.DDL(f'DROP TABLE "{name}" {suffix}'))
+ conn.execute(_drop_ddl(name, schema_name, engine.dialect.name))
if i % 500 == 0:
conn.commit()
conn.commit()
@@ -454,6 +455,9 @@ def main(db, schema_name, table_number, min_cols, max_cols, args):
else:
engine = sa.create_engine(db, echo=args.echo, future=True)
+ if args.drop_all:
+ return drop_all(engine, schema_name)
+
if engine.name == "oracle":
# clear out oracle caches so that we get the real-world time the
# queries would normally take for scripts that aren't run repeatedly
@@ -537,6 +541,25 @@ def timer():
return track_time
+def drop_all(engine, schema_name):
+ with engine.connect() as conn:
+ table_names = engine.dialect.get_table_names(conn, schema=schema_name)
+ print(f"Dropping {len(table_names)} tables")
+ dn = engine.dialect.name
+ i = 0
+ while table_names:
+ name = table_names.pop()
+ try:
+ conn.execute(_drop_ddl(name, schema_name, dn))
+ conn.commit()
+ except Exception:
+ conn.rollback()
+ table_names.insert(0, name)
+ i += 1
+ if i % 25 == 0:
+ print(f"Still running. Tables left {len(table_names)}")
+
+
if __name__ == "__main__":
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
@@ -572,7 +595,9 @@ if __name__ == "__main__":
parser.add_argument(
"--no-drop", help="Do not run drop tables", action="store_true"
)
- parser.add_argument("--reflect", help="Run reflect", action="store_true")
+ parser.add_argument(
+ "--reflect", help="Run metadata reflect", action="store_true"
+ )
parser.add_argument(
"--test",
help="Run these tests. 'all' runs all tests",
@@ -608,6 +633,11 @@ if __name__ == "__main__":
"using single reflections. Mainly for sqlite.",
)
parser.add_argument("--pool-class", help="The pool class to use")
+ parser.add_argument(
+ "--drop-all",
+ action="store_true",
+ help="Drop all tables, do nothing else",
+ )
args = parser.parse_args()
min_cols = args.min_cols
diff --git a/test/requirements.py b/test/requirements.py
index 68241330d..436f7d3d7 100644
--- a/test/requirements.py
+++ b/test/requirements.py
@@ -96,10 +96,6 @@ class DefaultRequirements(SuiteRequirements):
)
@property
- def reflect_indexes_with_ascdesc(self):
- return fails_if(["oracle"])
-
- @property
def table_ddl_if_exists(self):
"""target platform supports IF NOT EXISTS / IF EXISTS for tables."""
@@ -607,11 +603,15 @@ class DefaultRequirements(SuiteRequirements):
@property
def indexes_with_expressions(self):
- return only_on(["postgresql", "sqlite>=3.9.0"])
+ return only_on(["postgresql", "sqlite>=3.9.0", "oracle"])
@property
def reflect_indexes_with_expressions(self):
- return only_on(["postgresql"])
+ return only_on(["postgresql", "oracle"])
+
+ @property
+ def reflect_indexes_with_ascdesc_as_expression(self):
+ return only_on(["oracle"])
@property
def temp_table_names(self):