diff options
Diffstat (limited to 'oslo_db/sqlalchemy')
-rw-r--r-- | oslo_db/sqlalchemy/engines.py | 6 | ||||
-rw-r--r-- | oslo_db/sqlalchemy/utils.py | 11 |
2 files changed, 11 insertions, 6 deletions
diff --git a/oslo_db/sqlalchemy/engines.py b/oslo_db/sqlalchemy/engines.py index de3719a..32c4def 100644 --- a/oslo_db/sqlalchemy/engines.py +++ b/oslo_db/sqlalchemy/engines.py @@ -70,7 +70,7 @@ def _connect_ping_listener(connection, branch): try: # run a SELECT 1. use a core select() so that # any details like that needed by the backend are handled. - connection.scalar(select([1])) + connection.scalar(select(1)) except exception.DBConnectionError: # catch DBConnectionError, which is raised by the filter # system. @@ -80,7 +80,7 @@ def _connect_ping_listener(connection, branch): # run the select again to re-validate the Connection. LOG.exception( 'Database connection was found disconnected; reconnecting') - connection.scalar(select([1])) + connection.scalar(select(1)) finally: connection.should_close_with_result = save_should_close_with_result @@ -362,7 +362,7 @@ def _init_events(engine, sqlite_synchronous=True, sqlite_fk=False, **kw): # emit our own BEGIN, checking for existing # transactional state if 'in_transaction' not in conn.info: - conn.execute("BEGIN") + conn.execute(sqlalchemy.text("BEGIN")) conn.info['in_transaction'] = True @sqlalchemy.event.listens_for(engine, "rollback") diff --git a/oslo_db/sqlalchemy/utils.py b/oslo_db/sqlalchemy/utils.py index 2329b4b..70db00b 100644 --- a/oslo_db/sqlalchemy/utils.py +++ b/oslo_db/sqlalchemy/utils.py @@ -485,8 +485,12 @@ def drop_old_duplicate_entries_from_table(engine, table_name, columns_for_select.extend(columns_for_group_by) duplicated_rows_select = sqlalchemy.sql.select( - columns_for_select, group_by=columns_for_group_by, - having=func.count(table.c.id) > 1) + *columns_for_select, + ).group_by( + *columns_for_group_by + ).having( + func.count(table.c.id) > 1 + ) for row in engine.execute(duplicated_rows_select).fetchall(): # NOTE(boris-42): Do not remove row that has the biggest ID. @@ -497,7 +501,8 @@ def drop_old_duplicate_entries_from_table(engine, table_name, delete_condition &= table.c[name] == row[name] rows_to_delete_select = sqlalchemy.sql.select( - [table.c.id]).where(delete_condition) + table.c.id, + ).where(delete_condition) for row in engine.execute(rows_to_delete_select).fetchall(): LOG.info("Deleting duplicated row with id: %(id)s from table: " "%(table)s", dict(id=row[0], table=table_name)) |