summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/testing/util.py
diff options
context:
space:
mode:
authorFederico Caselli <cfederico87@gmail.com>2021-10-14 21:45:57 +0200
committerMike Bayer <mike_mp@zzzcomputing.com>2022-06-18 14:57:26 -0400
commitdb08a699489c9b0259579d7ff7fd6bf3496ca3a2 (patch)
tree741feb8714d9f94f0ddfd03af437f94d2d5a505b /lib/sqlalchemy/testing/util.py
parent964c26feecc7607d6d3a66240c3f33f4ae9215d4 (diff)
downloadsqlalchemy-db08a699489c9b0259579d7ff7fd6bf3496ca3a2.tar.gz
rearchitect reflection for batched performance
Rearchitected the schema reflection API to allow some dialects to make use of high performing batch queries to reflect the schemas of many tables at once using much fewer queries. The new performance features are targeted first at the PostgreSQL and Oracle backends, and may be applied to any dialect that makes use of SELECT queries against system catalog tables to reflect tables (currently this omits the MySQL and SQLite dialects which instead make use of parsing the "CREATE TABLE" statement, however these dialects do not have a pre-existing performance issue with reflection. MS SQL Server is still a TODO). The new API is backwards compatible with the previous system, and should require no changes to third party dialects to retain compatibility; third party dialects can also opt into the new system by implementing batched queries for schema reflection. Along with this change is an updated reflection API that is fully :pep:`484` typed, features many new methods and some changes. Fixes: #4379 Change-Id: I897ec09843543aa7012bcdce758792ed3d415d08
Diffstat (limited to 'lib/sqlalchemy/testing/util.py')
-rw-r--r--lib/sqlalchemy/testing/util.py39
1 files changed, 29 insertions, 10 deletions
diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py
index 0070b4d67..6fd42af70 100644
--- a/lib/sqlalchemy/testing/util.py
+++ b/lib/sqlalchemy/testing/util.py
@@ -393,36 +393,55 @@ def drop_all_tables_from_metadata(metadata, engine_or_connection):
go(engine_or_connection)
-def drop_all_tables(engine, inspector, schema=None, include_names=None):
+def drop_all_tables(
+ engine,
+ inspector,
+ schema=None,
+ consider_schemas=(None,),
+ include_names=None,
+):
if include_names is not None:
include_names = set(include_names)
+ if schema is not None:
+ assert consider_schemas == (
+ None,
+ ), "consider_schemas and schema are mutually exclusive"
+ consider_schemas = (schema,)
+
with engine.begin() as conn:
- for tname, fkcs in reversed(
- inspector.get_sorted_table_and_fkc_names(schema=schema)
+ for table_key, fkcs in reversed(
+ inspector.sort_tables_on_foreign_key_dependency(
+ consider_schemas=consider_schemas
+ )
):
- if tname:
- if include_names is not None and tname not in include_names:
+ if table_key:
+ if (
+ include_names is not None
+ and table_key[1] not in include_names
+ ):
continue
conn.execute(
- DropTable(Table(tname, MetaData(), schema=schema))
+ DropTable(
+ Table(table_key[1], MetaData(), schema=table_key[0])
+ )
)
elif fkcs:
if not engine.dialect.supports_alter:
continue
- for tname, fkc in fkcs:
+ for t_key, fkc in fkcs:
if (
include_names is not None
- and tname not in include_names
+ and t_key[1] not in include_names
):
continue
tb = Table(
- tname,
+ t_key[1],
MetaData(),
Column("x", Integer),
Column("y", Integer),
- schema=schema,
+ schema=t_key[0],
)
conn.execute(
DropConstraint(