summaryrefslogtreecommitdiff
path: root/ironic/openstack
diff options
context:
space:
mode:
authorVictor Sergeyev <vsergeyev@mirantis.com>2014-02-25 15:51:49 +0200
committerVictor Sergeyev <vsergeyev@mirantis.com>2014-02-26 21:13:57 +0200
commit9bbea3bf0040eb1138f9e8a8e719039f79c647de (patch)
treec1bd8073e1c74d2995093809a10839e71940e0e8 /ironic/openstack
parent1a57ac47b3fa878d07d7c05bafa6ad20ce5afe9d (diff)
downloadironic-9bbea3bf0040eb1138f9e8a8e719039f79c647de.tar.gz
Sync common db code from Oslo
This sync contains commit - ce69e7f Don't store engine instances in oslo.db - which removed global engine from oslo.db code. So, added code for work with sqla engines and sessions to Ironic - get_engine() and get_session() functions in ironic.db.sqlalchemy.api module. Also this remove database slave connection and tpool features, because they was removed from oslo code. Latest oslo.db commit - 7959826af1f36a4bbc4c466d0d1e0b9efd468649 Change-Id: I14c3b823a8b975d5cd097778f3514cfb1abe2e10
Diffstat (limited to 'ironic/openstack')
-rw-r--r--ironic/openstack/common/__init__.py2
-rw-r--r--ironic/openstack/common/context.py32
-rw-r--r--ironic/openstack/common/db/api.py200
-rw-r--r--ironic/openstack/common/db/exception.py8
-rw-r--r--ironic/openstack/common/db/options.py168
-rw-r--r--ironic/openstack/common/db/sqlalchemy/migration.py39
-rw-r--r--ironic/openstack/common/db/sqlalchemy/migration_cli/ext_alembic.py3
-rw-r--r--ironic/openstack/common/db/sqlalchemy/migration_cli/ext_migrate.py19
-rw-r--r--ironic/openstack/common/db/sqlalchemy/models.py8
-rw-r--r--ironic/openstack/common/db/sqlalchemy/session.py629
-rw-r--r--ironic/openstack/common/db/sqlalchemy/test_base.py149
-rw-r--r--ironic/openstack/common/db/sqlalchemy/test_migrations.py79
-rw-r--r--ironic/openstack/common/db/sqlalchemy/utils.py151
13 files changed, 996 insertions, 491 deletions
diff --git a/ironic/openstack/common/__init__.py b/ironic/openstack/common/__init__.py
index e69de29bb..2a00f3bc4 100644
--- a/ironic/openstack/common/__init__.py
+++ b/ironic/openstack/common/__init__.py
@@ -0,0 +1,2 @@
+import six
+six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
diff --git a/ironic/openstack/common/context.py b/ironic/openstack/common/context.py
index 2e46d7024..09019ee38 100644
--- a/ironic/openstack/common/context.py
+++ b/ironic/openstack/common/context.py
@@ -36,12 +36,18 @@ class RequestContext(object):
accesses the system, as well as additional request information.
"""
- def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
+ user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
+
+ def __init__(self, auth_token=None, user=None, tenant=None, domain=None,
+ user_domain=None, project_domain=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None,
instance_uuid=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
+ self.domain = domain
+ self.user_domain = user_domain
+ self.project_domain = project_domain
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
@@ -51,14 +57,25 @@ class RequestContext(object):
self.request_id = request_id
def to_dict(self):
+ user_idt = (
+ self.user_idt_format.format(user=self.user or '-',
+ tenant=self.tenant or '-',
+ domain=self.domain or '-',
+ user_domain=self.user_domain or '-',
+ p_domain=self.project_domain or '-'))
+
return {'user': self.user,
'tenant': self.tenant,
+ 'domain': self.domain,
+ 'user_domain': self.user_domain,
+ 'project_domain': self.project_domain,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id,
- 'instance_uuid': self.instance_uuid}
+ 'instance_uuid': self.instance_uuid,
+ 'user_identity': user_idt}
def get_admin_context(show_deleted=False):
@@ -81,3 +98,14 @@ def get_context_from_function_and_args(function, args, kwargs):
return arg
return None
+
+
+def is_user_context(context):
+ """Indicates if the request context is a normal user."""
+ if not context:
+ return False
+ if context.is_admin:
+ return False
+ if not context.user_id or not context.project_id:
+ return False
+ return True
diff --git a/ironic/openstack/common/db/api.py b/ironic/openstack/common/db/api.py
index 0c98cec64..dbbda0fa4 100644
--- a/ironic/openstack/common/db/api.py
+++ b/ironic/openstack/common/db/api.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
@@ -17,90 +15,148 @@
"""Multiple DB API backend support.
-Supported configuration options:
-
-The following two parameters are in the 'database' group:
-`backend`: DB backend name or full module path to DB backend module.
-`use_tpool`: Enable thread pooling of DB API calls.
-
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
+"""
-*NOTE*: There are bugs in eventlet when using tpool combined with
-threading locks. The python logging module happens to use such locks. To
-work around this issue, be sure to specify thread=False with
-eventlet.monkey_patch().
+import functools
+import logging
+import threading
+import time
-A bug for eventlet has been filed here:
+from ironic.openstack.common.db import exception
+from ironic.openstack.common.gettextutils import _LE
+from ironic.openstack.common import importutils
-https://bitbucket.org/eventlet/eventlet/issue/137/
-"""
-import functools
-from oslo.config import cfg
+LOG = logging.getLogger(__name__)
+
+
+def safe_for_db_retry(f):
+ """Enable db-retry for decorated function, if config option enabled."""
+ f.__dict__['enable_retry'] = True
+ return f
+
+
+class wrap_db_retry(object):
+ """Retry db.api methods, if DBConnectionError() raised
+
+ Retry decorated db.api methods. If we enabled `use_db_reconnect`
+ in config, this decorator will be applied to all db.api functions,
+ marked with @safe_for_db_retry decorator.
+ Decorator catchs DBConnectionError() and retries function in a
+ loop until it succeeds, or until maximum retries count will be reached.
+ """
+
+ def __init__(self, retry_interval, max_retries, inc_retry_interval,
+ max_retry_interval):
+ super(wrap_db_retry, self).__init__()
+
+ self.retry_interval = retry_interval
+ self.max_retries = max_retries
+ self.inc_retry_interval = inc_retry_interval
+ self.max_retry_interval = max_retry_interval
+
+ def __call__(self, f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ next_interval = self.retry_interval
+ remaining = self.max_retries
+
+ while True:
+ try:
+ return f(*args, **kwargs)
+ except exception.DBConnectionError as e:
+ if remaining == 0:
+ LOG.exception(_LE('DB exceeded retry limit.'))
+ raise exception.DBError(e)
+ if remaining != -1:
+ remaining -= 1
+ LOG.exception(_LE('DB connection error.'))
+ # NOTE(vsergeyev): We are using patched time module, so
+ # this effectively yields the execution
+ # context to another green thread.
+ time.sleep(next_interval)
+ if self.inc_retry_interval:
+ next_interval = min(
+ next_interval * 2,
+ self.max_retry_interval
+ )
+ return wrapper
-from ironic.openstack.common import importutils
-from ironic.openstack.common import lockutils
+class DBAPI(object):
+ def __init__(self, backend_name, backend_mapping=None, lazy=False,
+ **kwargs):
+ """Initialize the chosen DB API backend.
-db_opts = [
- cfg.StrOpt('backend',
- default='sqlalchemy',
- deprecated_name='db_backend',
- deprecated_group='DEFAULT',
- help='The backend to use for db'),
- cfg.BoolOpt('use_tpool',
- default=False,
- deprecated_name='dbapi_use_tpool',
- deprecated_group='DEFAULT',
- help='Enable the experimental use of thread pooling for '
- 'all DB API calls')
-]
+ :param backend_name: name of the backend to load
+ :type backend_name: str
-CONF = cfg.CONF
-CONF.register_opts(db_opts, 'database')
+ :param backend_mapping: backend name -> module/class to load mapping
+ :type backend_mapping: dict
+ :param lazy: load the DB backend lazily on the first DB API method call
+ :type lazy: bool
+
+ Keyword arguments:
+
+ :keyword use_db_reconnect: retry DB transactions on disconnect or not
+ :type use_db_reconnect: bool
+
+ :keyword retry_interval: seconds between transaction retries
+ :type retry_interval: int
+
+ :keyword inc_retry_interval: increase retry interval or not
+ :type inc_retry_interval: bool
+
+ :keyword max_retry_interval: max interval value between retries
+ :type max_retry_interval: int
+
+ :keyword max_retries: max number of retries before an error is raised
+ :type max_retries: int
-class DBAPI(object):
- def __init__(self, backend_mapping=None):
- if backend_mapping is None:
- backend_mapping = {}
- self.__backend = None
- self.__backend_mapping = backend_mapping
-
- @lockutils.synchronized('dbapi_backend', 'ironic-')
- def __get_backend(self):
- """Get the actual backend. May be a module or an instance of
- a class. Doesn't matter to us. We do this synchronized as it's
- possible multiple greenthreads started very quickly trying to do
- DB calls and eventlet can switch threads before self.__backend gets
- assigned.
"""
- if self.__backend:
- # Another thread assigned it
- return self.__backend
- backend_name = CONF.database.backend
- self.__use_tpool = CONF.database.use_tpool
- if self.__use_tpool:
- from eventlet import tpool
- self.__tpool = tpool
- # Import the untranslated name if we don't have a
- # mapping.
- backend_path = self.__backend_mapping.get(backend_name,
- backend_name)
- backend_mod = importutils.import_module(backend_path)
- self.__backend = backend_mod.get_backend()
- return self.__backend
- def __getattr__(self, key):
- backend = self.__backend or self.__get_backend()
- attr = getattr(backend, key)
- if not self.__use_tpool or not hasattr(attr, '__call__'):
- return attr
+ self._backend = None
+ self._backend_name = backend_name
+ self._backend_mapping = backend_mapping or {}
+ self._lock = threading.Lock()
+
+ if not lazy:
+ self._load_backend()
+
+ self.use_db_reconnect = kwargs.get('use_db_reconnect', False)
+ self.retry_interval = kwargs.get('retry_interval', 1)
+ self.inc_retry_interval = kwargs.get('inc_retry_interval', True)
+ self.max_retry_interval = kwargs.get('max_retry_interval', 10)
+ self.max_retries = kwargs.get('max_retries', 20)
+
+ def _load_backend(self):
+ with self._lock:
+ if not self._backend:
+ # Import the untranslated name if we don't have a mapping
+ backend_path = self._backend_mapping.get(self._backend_name,
+ self._backend_name)
+ backend_mod = importutils.import_module(backend_path)
+ self._backend = backend_mod.get_backend()
- def tpool_wrapper(*args, **kwargs):
- return self.__tpool.execute(attr, *args, **kwargs)
+ def __getattr__(self, key):
+ if not self._backend:
+ self._load_backend()
- functools.update_wrapper(tpool_wrapper, attr)
- return tpool_wrapper
+ attr = getattr(self._backend, key)
+ if not hasattr(attr, '__call__'):
+ return attr
+ # NOTE(vsergeyev): If `use_db_reconnect` option is set to True, retry
+ # DB API methods, decorated with @safe_for_db_retry
+ # on disconnect.
+ if self.use_db_reconnect and hasattr(attr, 'enable_retry'):
+ attr = wrap_db_retry(
+ retry_interval=self.retry_interval,
+ max_retries=self.max_retries,
+ inc_retry_interval=self.inc_retry_interval,
+ max_retry_interval=self.max_retry_interval)(attr)
+
+ return attr
diff --git a/ironic/openstack/common/db/exception.py b/ironic/openstack/common/db/exception.py
index ea712b17c..d4b8ff442 100644
--- a/ironic/openstack/common/db/exception.py
+++ b/ironic/openstack/common/db/exception.py
@@ -16,14 +16,16 @@
"""DB related custom exceptions."""
-from ironic.openstack.common.gettextutils import _ # noqa
+import six
+
+from ironic.openstack.common.gettextutils import _
class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
- super(DBError, self).__init__(str(inner_exception))
+ super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError):
@@ -46,7 +48,7 @@ class DBInvalidUnicodeParameter(Exception):
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
- super(DbMigrationError, self).__init__(str(message))
+ super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):
diff --git a/ironic/openstack/common/db/options.py b/ironic/openstack/common/db/options.py
new file mode 100644
index 000000000..6be3a8503
--- /dev/null
+++ b/ironic/openstack/common/db/options.py
@@ -0,0 +1,168 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from oslo.config import cfg
+
+
+database_opts = [
+ cfg.StrOpt('sqlite_db',
+ deprecated_group='DEFAULT',
+ default='ironic.sqlite',
+ help='The file name to use with SQLite'),
+ cfg.BoolOpt('sqlite_synchronous',
+ deprecated_group='DEFAULT',
+ default=True,
+ help='If True, SQLite uses synchronous mode'),
+ cfg.StrOpt('backend',
+ default='sqlalchemy',
+ deprecated_name='db_backend',
+ deprecated_group='DEFAULT',
+ help='The backend to use for db'),
+ cfg.StrOpt('connection',
+ help='The SQLAlchemy connection string used to connect to the '
+ 'database',
+ secret=True,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('sql_connection',
+ group='DATABASE'),
+ cfg.DeprecatedOpt('connection',
+ group='sql'), ]),
+ cfg.StrOpt('mysql_sql_mode',
+ help='The SQL mode to be used for MySQL sessions '
+ '(default is empty, meaning do not override '
+ 'any server-side SQL mode setting)'),
+ cfg.IntOpt('idle_timeout',
+ default=3600,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('sql_idle_timeout',
+ group='DATABASE'),
+ cfg.DeprecatedOpt('idle_timeout',
+ group='sql')],
+ help='Timeout before idle sql connections are reaped'),
+ cfg.IntOpt('min_pool_size',
+ default=1,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('sql_min_pool_size',
+ group='DATABASE')],
+ help='Minimum number of SQL connections to keep open in a '
+ 'pool'),
+ cfg.IntOpt('max_pool_size',
+ default=None,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('sql_max_pool_size',
+ group='DATABASE')],
+ help='Maximum number of SQL connections to keep open in a '
+ 'pool'),
+ cfg.IntOpt('max_retries',
+ default=10,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('sql_max_retries',
+ group='DATABASE')],
+ help='Maximum db connection retries during startup. '
+ '(setting -1 implies an infinite retry count)'),
+ cfg.IntOpt('retry_interval',
+ default=10,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('reconnect_interval',
+ group='DATABASE')],
+ help='Interval between retries of opening a sql connection'),
+ cfg.IntOpt('max_overflow',
+ default=None,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
+ group='DEFAULT'),
+ cfg.DeprecatedOpt('sqlalchemy_max_overflow',
+ group='DATABASE')],
+ help='If set, use this value for max_overflow with sqlalchemy'),
+ cfg.IntOpt('connection_debug',
+ default=0,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
+ group='DEFAULT')],
+ help='Verbosity of SQL debugging information. 0=None, '
+ '100=Everything'),
+ cfg.BoolOpt('connection_trace',
+ default=False,
+ deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
+ group='DEFAULT')],
+ help='Add python stack traces to SQL as comment strings'),
+ cfg.IntOpt('pool_timeout',
+ default=None,
+ deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
+ group='DATABASE')],
+ help='If set, use this value for pool_timeout with sqlalchemy'),
+ cfg.BoolOpt('use_db_reconnect',
+ default=False,
+ help='Enable the experimental use of database reconnect '
+ 'on connection lost'),
+ cfg.IntOpt('db_retry_interval',
+ default=1,
+ help='seconds between db connection retries'),
+ cfg.BoolOpt('db_inc_retry_interval',
+ default=True,
+ help='Whether to increase interval between db connection '
+ 'retries, up to db_max_retry_interval'),
+ cfg.IntOpt('db_max_retry_interval',
+ default=10,
+ help='max seconds between db connection retries, if '
+ 'db_inc_retry_interval is enabled'),
+ cfg.IntOpt('db_max_retries',
+ default=20,
+ help='maximum db connection retries before error is raised. '
+ '(setting -1 implies an infinite retry count)'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(database_opts, 'database')
+
+
+def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
+ max_overflow=None, pool_timeout=None):
+ """Set defaults for configuration variables."""
+ cfg.set_defaults(database_opts,
+ connection=sql_connection,
+ sqlite_db=sqlite_db)
+ # Update the QueuePool defaults
+ if max_pool_size is not None:
+ cfg.set_defaults(database_opts,
+ max_pool_size=max_pool_size)
+ if max_overflow is not None:
+ cfg.set_defaults(database_opts,
+ max_overflow=max_overflow)
+ if pool_timeout is not None:
+ cfg.set_defaults(database_opts,
+ pool_timeout=pool_timeout)
+
+
+def list_opts():
+ """Returns a list of oslo.config options available in the library.
+
+ The returned list includes all oslo.config options which may be registered
+ at runtime by the library.
+
+ Each element of the list is a tuple. The first element is the name of the
+ group under which the list of elements in the second element will be
+ registered. A group name of None corresponds to the [DEFAULT] group in
+ config files.
+
+ The purpose of this is to allow tools like the Oslo sample config file
+ generator to discover the options exposed to users by this library.
+
+ :returns: a list of (group_name, opts) tuples
+ """
+ return [('database', copy.deepcopy(database_opts))]
diff --git a/ironic/openstack/common/db/sqlalchemy/migration.py b/ironic/openstack/common/db/sqlalchemy/migration.py
index 71d538204..214484bdd 100644
--- a/ironic/openstack/common/db/sqlalchemy/migration.py
+++ b/ironic/openstack/common/db/sqlalchemy/migration.py
@@ -51,13 +51,9 @@ import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from ironic.openstack.common.db import exception
-from ironic.openstack.common.db.sqlalchemy import session as db_session
from ironic.openstack.common.gettextutils import _
-get_engine = db_session.get_engine
-
-
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
@@ -172,11 +168,12 @@ def patch_migrate():
sqlite.SQLiteConstraintGenerator)
-def db_sync(abs_path, version=None, init_version=0):
+def db_sync(engine, abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
+ :param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
@@ -190,18 +187,23 @@ def db_sync(abs_path, version=None, init_version=0):
raise exception.DbMigrationError(
message=_("version should be an integer"))
- current_version = db_version(abs_path, init_version)
+ current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
- _db_schema_sanity_check()
+ _db_schema_sanity_check(engine)
if version is None or version > current_version:
- return versioning_api.upgrade(get_engine(), repository, version)
+ return versioning_api.upgrade(engine, repository, version)
else:
- return versioning_api.downgrade(get_engine(), repository,
+ return versioning_api.downgrade(engine, repository,
version)
-def _db_schema_sanity_check():
- engine = get_engine()
+def _db_schema_sanity_check(engine):
+ """Ensure all database tables were created with required parameters.
+
+ :param engine: SQLAlchemy engine instance for a given database
+
+ """
+
if engine.name == 'mysql':
onlyutf8_sql = ('SELECT TABLE_NAME,TABLE_COLLATION '
'from information_schema.TABLES '
@@ -216,23 +218,23 @@ def _db_schema_sanity_check():
) % ','.join(table_names))
-def db_version(abs_path, init_version):
+def db_version(engine, abs_path, init_version):
"""Show the current version of the repository.
+ :param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
- return versioning_api.db_version(get_engine(), repository)
+ return versioning_api.db_version(engine, repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
- engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0 or 'alembic_version' in tables:
- db_version_control(abs_path, init_version)
- return versioning_api.db_version(get_engine(), repository)
+ db_version_control(engine, abs_path, version=init_version)
+ return versioning_api.db_version(engine, repository)
else:
raise exception.DbMigrationError(
message=_(
@@ -241,17 +243,18 @@ def db_version(abs_path, init_version):
"manually."))
-def db_version_control(abs_path, version=None):
+def db_version_control(engine, abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
+ :param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
- versioning_api.version_control(get_engine(), repository, version)
+ versioning_api.version_control(engine, repository, version)
return version
diff --git a/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_alembic.py b/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_alembic.py
index 1c3f55403..d3f2c18b4 100644
--- a/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_alembic.py
+++ b/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_alembic.py
@@ -40,6 +40,7 @@ class AlembicExtension(ext_base.MigrationExtensionBase):
repo_path = migration_config.get('alembic_repo_path')
if repo_path:
self.config.set_main_option('script_location', repo_path)
+ self.db_url = migration_config['db_url']
def upgrade(self, version):
return alembic.command.upgrade(self.config, version or 'head')
@@ -50,7 +51,7 @@ class AlembicExtension(ext_base.MigrationExtensionBase):
return alembic.command.downgrade(self.config, version)
def version(self):
- engine = db_session.get_engine()
+ engine = db_session.create_engine(self.db_url)
with engine.connect() as conn:
context = alembic_migration.MigrationContext.configure(conn)
return context.get_current_revision()
diff --git a/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_migrate.py b/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_migrate.py
index 22e91b2a3..95bace684 100644
--- a/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_migrate.py
+++ b/ironic/openstack/common/db/sqlalchemy/migration_cli/ext_migrate.py
@@ -10,12 +10,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import os
from ironic.openstack.common.db.sqlalchemy import migration
from ironic.openstack.common.db.sqlalchemy.migration_cli import ext_base
-from ironic.openstack.common.gettextutils import _ # noqa
-from ironic.openstack.common import log as logging
+from ironic.openstack.common.db.sqlalchemy import session as db_session
+from ironic.openstack.common.gettextutils import _LE
LOG = logging.getLogger(__name__)
@@ -33,6 +34,8 @@ class MigrateExtension(ext_base.MigrationExtensionBase):
def __init__(self, migration_config):
self.repository = migration_config.get('migration_repo_path', '')
self.init_version = migration_config.get('init_version', 0)
+ self.db_url = migration_config['db_url']
+ self.engine = db_session.create_engine(self.db_url)
@property
def enabled(self):
@@ -41,7 +44,7 @@ class MigrateExtension(ext_base.MigrationExtensionBase):
def upgrade(self, version):
version = None if version == 'head' else version
return migration.db_sync(
- self.repository, version,
+ self.engine, self.repository, version,
init_version=self.init_version)
def downgrade(self, version):
@@ -51,16 +54,16 @@ class MigrateExtension(ext_base.MigrationExtensionBase):
version = self.init_version
version = int(version)
return migration.db_sync(
- self.repository, version,
+ self.engine, self.repository, version,
init_version=self.init_version)
except ValueError:
LOG.error(
- _('Migration number for migrate plugin must be valid '
- 'integer or empty, if you want to downgrade '
- 'to initial state')
+ _LE('Migration number for migrate plugin must be valid '
+ 'integer or empty, if you want to downgrade '
+ 'to initial state')
)
raise
def version(self):
return migration.db_version(
- self.repository, init_version=self.init_version)
+ self.engine, self.repository, init_version=self.init_version)
diff --git a/ironic/openstack/common/db/sqlalchemy/models.py b/ironic/openstack/common/db/sqlalchemy/models.py
index d8172dbd3..fcde86a8a 100644
--- a/ironic/openstack/common/db/sqlalchemy/models.py
+++ b/ironic/openstack/common/db/sqlalchemy/models.py
@@ -26,7 +26,6 @@ from sqlalchemy import Column, Integer
from sqlalchemy import DateTime
from sqlalchemy.orm import object_mapper
-from ironic.openstack.common.db.sqlalchemy import session as sa
from ironic.openstack.common import timeutils
@@ -34,10 +33,9 @@ class ModelBase(object):
"""Base class for models."""
__table_initialized__ = False
- def save(self, session=None):
+ def save(self, session):
"""Save this object."""
- if not session:
- session = sa.get_session()
+
# NOTE(boris-42): This part of code should be look like:
# session.add(self)
# session.flush()
@@ -110,7 +108,7 @@ class SoftDeleteMixin(object):
deleted_at = Column(DateTime)
deleted = Column(Integer, default=0)
- def soft_delete(self, session=None):
+ def soft_delete(self, session):
"""Mark this object as deleted."""
self.deleted = self.id
self.deleted_at = timeutils.utcnow()
diff --git a/ironic/openstack/common/db/sqlalchemy/session.py b/ironic/openstack/common/db/sqlalchemy/session.py
index c37cf0b44..87c58972b 100644
--- a/ironic/openstack/common/db/sqlalchemy/session.py
+++ b/ironic/openstack/common/db/sqlalchemy/session.py
@@ -16,33 +16,24 @@
"""Session Handling for SQLAlchemy backend.
-Initializing:
-
-* Call set_defaults with the minimal of the following kwargs:
- sql_connection, sqlite_db
-
- Example::
-
- session.set_defaults(
- sql_connection="sqlite:///var/lib/ironic/sqlite.db",
- sqlite_db="/var/lib/ironic/sqlite.db")
-
Recommended ways to use sessions within this framework:
-* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
- model_query() will implicitly use a session when called without one
+* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
+ `model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
- Note: Automatic retry will be enabled in a future patch.
+ .. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
- the code around quotas and reservation_rollback().
+ the code around quotas and `reservation_rollback()`.
- Examples::
+ Examples:
+
+ .. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
@@ -61,28 +52,29 @@ Recommended ways to use sessions within this framework:
return foo_ref
-* Within the scope of a single method, keeping all the reads and writes within
- the context managed by a single session. In this way, the session's __exit__
- handler will take care of calling flush() and commit() for you.
- If using this approach, you should not explicitly call flush() or commit().
- Any error within the context of the session will cause the session to emit
- a ROLLBACK. Database Errors like IntegrityError will be raised in
- session's __exit__ handler, and any try/except within the context managed
- by session will not be triggered. And catching other non-database errors in
- the session will not trigger the ROLLBACK, so exception handlers should
- always be outside the session, unless the developer wants to do a partial
- commit on purpose. If the connection is dropped before this is possible,
- the database will implicitly roll back the transaction.
+* Within the scope of a single method, keep all the reads and writes within
+ the context managed by a single session. In this way, the session's
+ `__exit__` handler will take care of calling `flush()` and `commit()` for
+ you. If using this approach, you should not explicitly call `flush()` or
+ `commit()`. Any error within the context of the session will cause the
+ session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
+ raised in `session`'s `__exit__` handler, and any try/except within the
+ context managed by `session` will not be triggered. And catching other
+ non-database errors in the session will not trigger the ROLLBACK, so
+ exception handlers should always be outside the session, unless the
+ developer wants to do a partial commit on purpose. If the connection is
+ dropped before this is possible, the database will implicitly roll back the
+ transaction.
- Note: statements in the session scope will not be automatically retried.
+ .. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
- do not need to call model.save()
+ do not need to call `model.save()`:
- ::
+ .. code:: python
def create_many_foo(context, foos):
- session = get_session()
+ session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
@@ -90,7 +82,7 @@ Recommended ways to use sessions within this framework:
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
- session = get_session()
+ session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
@@ -99,11 +91,16 @@ Recommended ways to use sessions within this framework:
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
- Note: update_bar is a trivially simple example of using "with session.begin".
- Whereas create_many_foo is a good example of when a transaction is needed,
- it is always best to use as few queries as possible. The two queries in
- update_bar can be better expressed using a single query which avoids
- the need for an explicit transaction. It can be expressed like so::
+ .. note:: `update_bar` is a trivially simple example of using
+ ``with session.begin``. Whereas `create_many_foo` is a good example of
+ when a transaction is needed, it is always best to use as few queries as
+ possible.
+
+ The two queries in `update_bar` can be better expressed using a single query
+ which avoids the need for an explicit transaction. It can be expressed like
+ so:
+
+ .. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
@@ -114,21 +111,25 @@ Recommended ways to use sessions within this framework:
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
- For reference, this emits approximately the following SQL statement::
+ For reference, this emits approximately the following SQL statement:
+
+ .. code:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
- Note: create_duplicate_foo is a trivially simple example of catching an
- exception while using "with session.begin". Here create two duplicate
- instances with same primary key, must catch the exception out of context
- managed by a single session:
+ .. note:: `create_duplicate_foo` is a trivially simple example of catching an
+ exception while using ``with session.begin``. Here create two duplicate
+ instances with same primary key, must catch the exception out of context
+ managed by a single session:
+
+ .. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
- session = get_session()
+ session = sessionmaker()
try:
with session.begin():
session.add(foo1)
@@ -138,7 +139,7 @@ Recommended ways to use sessions within this framework:
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
- SQLAlchemy will throw an error when you call session.begin() on an existing
+ SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
@@ -151,10 +152,10 @@ Recommended ways to use sessions within this framework:
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
- ::
+ .. code:: python
def myfunc(foo):
- session = get_session()
+ session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
@@ -162,7 +163,7 @@ Recommended ways to use sessions within this framework:
def _private_func(foo, session=None):
if not session:
- session = get_session()
+ session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
@@ -172,13 +173,13 @@ There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
- This means that your "with session.begin()" block should be as short
+ This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
-* Avoid "with_lockmode('UPDATE')" when possible.
+* Avoid ``with_lockmode('UPDATE')`` when possible.
- In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
+ In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
@@ -189,15 +190,18 @@ There are some things which it is best to avoid:
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
- The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
+ The better long-term solution is to use
+ ``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
-* To use/enable soft-deletes, the SoftDeleteMixin must be added
- to your model class. For example::
+* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
+ to your model class. For example:
+
+ .. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
@@ -205,15 +209,16 @@ Enabling soft deletes:
Efficient use of soft deletes:
-* There are two possible ways to mark a record as deleted::
+* There are two possible ways to mark a record as deleted:
+ `model.soft_delete()` and `query.soft_delete()`.
- model.soft_delete() and query.soft_delete().
+ The `model.soft_delete()` method works with a single already-fetched entry.
+ `query.soft_delete()` makes only one db request for all entries that
+ correspond to the query.
- model.soft_delete() method works with single already fetched entry.
- query.soft_delete() makes only one db request for all entries that correspond
- to query.
+* In almost all cases you should use `query.soft_delete()`. Some examples:
-* In almost all cases you should use query.soft_delete(). Some examples::
+ .. code:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
@@ -222,7 +227,7 @@ Efficient use of soft deletes:
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
- session = get_session()
+ session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
@@ -232,24 +237,26 @@ Efficient use of soft deletes:
if count == 0:
raise Exception("0 entries were soft deleted")
-* There is only one situation where model.soft_delete() is appropriate: when
+* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
- ::
+ .. code:: python
def soft_delete_bar_model():
- session = get_session()
+ session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
- then soft delete them you should use query.soft_delete() method::
+ then soft delete them you should use the `query.soft_delete()` method:
+
+ .. code:: python
def soft_delete_multi_models():
- session = get_session()
+ session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
@@ -260,22 +267,22 @@ Efficient use of soft deletes:
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
- which issues a single query. Using model.soft_delete(), as in the following
+ which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
- ::
+ .. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
+
"""
import functools
-import os.path
+import logging
import re
import time
-from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
@@ -284,151 +291,12 @@ from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from ironic.openstack.common.db import exception
-from ironic.openstack.common.gettextutils import _
-from ironic.openstack.common import log as logging
+from ironic.openstack.common.gettextutils import _LE, _LW, _LI
from ironic.openstack.common import timeutils
-sqlite_db_opts = [
- cfg.StrOpt('sqlite_db',
- default='ironic.sqlite',
- help='The file name to use with SQLite'),
- cfg.BoolOpt('sqlite_synchronous',
- default=True,
- help='If True, SQLite uses synchronous mode'),
-]
-
-database_opts = [
- cfg.StrOpt('connection',
- default='sqlite:///' +
- os.path.abspath(os.path.join(os.path.dirname(__file__),
- '../', '$sqlite_db')),
- help='The SQLAlchemy connection string used to connect to the '
- 'database',
- secret=True,
- deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
- group='DEFAULT'),
- cfg.DeprecatedOpt('sql_connection',
- group='DATABASE'),
- cfg.DeprecatedOpt('connection',
- group='sql'), ]),
- cfg.StrOpt('slave_connection',
- default='',
- secret=True,
- help='The SQLAlchemy connection string used to connect to the '
- 'slave database'),
- cfg.IntOpt('idle_timeout',
- default=3600,
- deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
- group='DEFAULT'),
- cfg.DeprecatedOpt('sql_idle_timeout',
- group='DATABASE'),
- cfg.DeprecatedOpt('idle_timeout',
- group='sql')],
- help='Timeout before idle sql connections are reaped'),
- cfg.IntOpt('min_pool_size',
- default=1,
- deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
- group='DEFAULT'),
- cfg.DeprecatedOpt('sql_min_pool_size',
- group='DATABASE')],
- help='Minimum number of SQL connections to keep open in a '
- 'pool'),
- cfg.IntOpt('max_pool_size',
- default=None,
- deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
- group='DEFAULT'),
- cfg.DeprecatedOpt('sql_max_pool_size',
- group='DATABASE')],
- help='Maximum number of SQL connections to keep open in a '
- 'pool'),
- cfg.IntOpt('max_retries',
- default=10,
- deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
- group='DEFAULT'),
- cfg.DeprecatedOpt('sql_max_retries',
- group='DATABASE')],
- help='Maximum db connection retries during startup. '
- '(setting -1 implies an infinite retry count)'),
- cfg.IntOpt('retry_interval',
- default=10,
- deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
- group='DEFAULT'),
- cfg.DeprecatedOpt('reconnect_interval',
- group='DATABASE')],
- help='Interval between retries of opening a sql connection'),
- cfg.IntOpt('max_overflow',
- default=None,
- deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
- group='DEFAULT'),
- cfg.DeprecatedOpt('sqlalchemy_max_overflow',
- group='DATABASE')],
- help='If set, use this value for max_overflow with sqlalchemy'),
- cfg.IntOpt('connection_debug',
- default=0,
- deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
- group='DEFAULT')],
- help='Verbosity of SQL debugging information. 0=None, '
- '100=Everything'),
- cfg.BoolOpt('connection_trace',
- default=False,
- deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
- group='DEFAULT')],
- help='Add python stack traces to SQL as comment strings'),
- cfg.IntOpt('pool_timeout',
- default=None,
- deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
- group='DATABASE')],
- help='If set, use this value for pool_timeout with sqlalchemy'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(sqlite_db_opts)
-CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
-_ENGINE = None
-_MAKER = None
-_SLAVE_ENGINE = None
-_SLAVE_MAKER = None
-
-
-def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
- max_overflow=None, pool_timeout=None):
- """Set defaults for configuration variables."""
- cfg.set_defaults(database_opts,
- connection=sql_connection)
- cfg.set_defaults(sqlite_db_opts,
- sqlite_db=sqlite_db)
- # Update the QueuePool defaults
- if max_pool_size is not None:
- cfg.set_defaults(database_opts,
- max_pool_size=max_pool_size)
- if max_overflow is not None:
- cfg.set_defaults(database_opts,
- max_overflow=max_overflow)
- if pool_timeout is not None:
- cfg.set_defaults(database_opts,
- pool_timeout=pool_timeout)
-
-
-def cleanup():
- global _ENGINE, _MAKER
- global _SLAVE_ENGINE, _SLAVE_MAKER
-
- if _MAKER:
- _MAKER.close_all()
- _MAKER = None
- if _ENGINE:
- _ENGINE.dispose()
- _ENGINE = None
- if _SLAVE_MAKER:
- _SLAVE_MAKER.close_all()
- _SLAVE_MAKER = None
- if _SLAVE_ENGINE:
- _SLAVE_ENGINE.dispose()
- _SLAVE_ENGINE = None
-
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
@@ -441,30 +309,6 @@ class SqliteForeignKeysListener(PoolListener):
dbapi_con.execute('pragma foreign_keys=ON')
-def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
- slave_session=False, mysql_traditional_mode=False):
- """Return a SQLAlchemy session."""
- global _MAKER
- global _SLAVE_MAKER
- maker = _MAKER
-
- if slave_session:
- maker = _SLAVE_MAKER
-
- if maker is None:
- engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
- mysql_traditional_mode=mysql_traditional_mode)
- maker = get_maker(engine, autocommit, expire_on_commit)
-
- if slave_session:
- _SLAVE_MAKER = maker
- else:
- _MAKER = maker
-
- session = maker()
- return session
-
-
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
@@ -473,9 +317,9 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
-# 1 column - (IntegrityError) UNIQUE constraint failed: k1
+# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
-# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
+# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
@@ -488,11 +332,20 @@ def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
+#
+# ibm_db_sa:
+# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
+# statement, UPDATE statement, or foreign key update caused by a
+# DELETE statement are not valid because the primary key, unique
+# constraint or unique index identified by "2" constrains table
+# "NOVA.KEY_PAIRS" from having duplicate values for the index
+# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
- "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
+ "mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
+ "ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}
@@ -514,7 +367,7 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
return [columns]
return columns[len(uniqbase):].split("0")[1:]
- if engine_name not in ["mysql", "sqlite", "postgresql"]:
+ if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
@@ -529,10 +382,15 @@ def _raise_if_duplicate_entry_error(integrity_error, engine_name):
else:
return
- columns = match.group(1)
+ # NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
+ # columns so we have to omit that from the DBDuplicateEntry error.
+ columns = ''
+
+ if engine_name != 'ibm_db_sa':
+ columns = match.group(1)
if engine_name == "sqlite":
- columns = columns.strip().split(", ")
+ columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
@@ -570,57 +428,39 @@ def _raise_if_deadlock_error(operational_error, engine_name):
def _wrap_db_error(f):
+ #TODO(rpodolyaka): in a subsequent commit make this a class decorator to
+ # ensure it can only applied to Session subclasses instances (as we use
+ # Session instance bind attribute below)
+
@functools.wraps(f)
- def _wrap(*args, **kwargs):
+ def _wrap(self, *args, **kwargs):
try:
- return f(*args, **kwargs)
+ return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
- # note(boris-42): We should catch unique constraint violation and
- # wrap it by our own DBDuplicateEntry exception. Unique constraint
- # violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
- _raise_if_deadlock_error(e, get_engine().name)
+ _raise_if_db_connection_lost(e, self.bind)
+ _raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
+ # note(boris-42): We should catch unique constraint violation and
+ # wrap it by our own DBDuplicateEntry exception. Unique constraint
+ # violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
- _raise_if_duplicate_entry_error(e, get_engine().name)
+ _raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
- LOG.exception(_('DB exception wrapped.'))
+ LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
-def get_engine(sqlite_fk=False, slave_engine=False,
- mysql_traditional_mode=False):
- """Return a SQLAlchemy engine."""
- global _ENGINE
- global _SLAVE_ENGINE
- engine = _ENGINE
- db_uri = CONF.database.connection
-
- if slave_engine:
- engine = _SLAVE_ENGINE
- db_uri = CONF.database.slave_connection
-
- if engine is None:
- engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
- mysql_traditional_mode=mysql_traditional_mode)
- if slave_engine:
- _SLAVE_ENGINE = engine
- else:
- _ENGINE = engine
-
- return engine
-
-
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
@@ -662,7 +502,7 @@ def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
- msg = _('Database server has gone away: %s') % ex
+ msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
@@ -677,7 +517,44 @@ def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
than a declared field just with warning. That is fraught with data
corruption.
"""
- dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
+ _set_session_sql_mode(dbapi_con, connection_rec,
+ connection_proxy, 'TRADITIONAL')
+
+
+def _set_session_sql_mode(dbapi_con, connection_rec,
+ connection_proxy, sql_mode=None):
+ """Set the sql_mode session variable.
+
+ MySQL supports several server modes. The default is None, but sessions
+ may choose to enable server modes like TRADITIONAL, ANSI,
+ several STRICT_* modes and others.
+
+ Note: passing in '' (empty string) for sql_mode clears
+ the SQL mode for the session, overriding a potentially set
+ server default. Passing in None (the default) makes this
+ a no-op, meaning if a server-side SQL mode is set, it still applies.
+ """
+ cursor = dbapi_con.cursor()
+ if sql_mode is not None:
+ cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
+
+ # Check against the real effective SQL mode. Even when unset by
+ # our own config, the server may still be operating in a specific
+ # SQL mode as set by the server configuration
+ cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
+ row = cursor.fetchone()
+ if row is None:
+ LOG.warning(_LW('Unable to detect effective SQL mode'))
+ return
+ realmode = row[1]
+ LOG.info(_LI('MySQL server mode set to %s') % realmode)
+ # 'TRADITIONAL' mode enables several other modes, so
+ # we need a substring match here
+ if not ('TRADITIONAL' in realmode.upper() or
+ 'STRICT_ALL_TABLES' in realmode.upper()):
+ LOG.warning(_LW("MySQL SQL mode is '%s', "
+ "consider enabling TRADITIONAL or STRICT_ALL_TABLES")
+ % realmode)
def _is_db_connection_error(args):
@@ -692,66 +569,79 @@ def _is_db_connection_error(args):
return False
-def create_engine(sql_connection, sqlite_fk=False,
- mysql_traditional_mode=False):
+def _raise_if_db_connection_lost(error, engine):
+ # NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
+ # requires connection and cursor in incoming parameters,
+ # but we have no possibility to create connection if DB
+ # is not available, so in such case reconnect fails.
+ # But is_disconnect() ignores these parameters, so it
+ # makes sense to pass to function None as placeholder
+ # instead of connection and cursor.
+ if engine.dialect.is_disconnect(error, None, None):
+ raise exception.DBConnectionError(error)
+
+
+def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
+ mysql_traditional_mode=False, idle_timeout=3600,
+ connection_debug=0, max_pool_size=None, max_overflow=None,
+ pool_timeout=None, sqlite_synchronous=True,
+ connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine."""
- # NOTE(geekinutah): At this point we could be connecting to the normal
- # db handle or the slave db handle. Things like
- # _wrap_db_error aren't going to work well if their
- # backends don't match. Let's check.
- _assert_matching_drivers()
+
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
- "pool_recycle": CONF.database.idle_timeout,
- "echo": False,
+ "pool_recycle": idle_timeout,
'convert_unicode': True,
}
- # Map our SQL debug level to SQLAlchemy's options
- if CONF.database.connection_debug >= 100:
- engine_args['echo'] = 'debug'
- elif CONF.database.connection_debug >= 50:
- engine_args['echo'] = True
+ logger = logging.getLogger('sqlalchemy.engine')
+
+ # Map SQL debug level to Python log level
+ if connection_debug >= 100:
+ logger.setLevel(logging.DEBUG)
+ elif connection_debug >= 50:
+ logger.setLevel(logging.INFO)
+ else:
+ logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
- if CONF.database.connection == "sqlite://":
+ if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
- if CONF.database.max_pool_size is not None:
- engine_args['pool_size'] = CONF.database.max_pool_size
- if CONF.database.max_overflow is not None:
- engine_args['max_overflow'] = CONF.database.max_overflow
- if CONF.database.pool_timeout is not None:
- engine_args['pool_timeout'] = CONF.database.pool_timeout
+ if max_pool_size is not None:
+ engine_args['pool_size'] = max_pool_size
+ if max_overflow is not None:
+ engine_args['max_overflow'] = max_overflow
+ if pool_timeout is not None:
+ engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
- callback = functools.partial(_ping_listener, engine)
- sqlalchemy.event.listen(engine, 'checkout', callback)
- if mysql_traditional_mode:
- sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
- else:
- LOG.warning(_("This application has not enabled MySQL traditional"
- " mode, which means silent data corruption may"
- " occur. Please encourage the application"
- " developers to enable this mode."))
+ ping_callback = functools.partial(_ping_listener, engine)
+ sqlalchemy.event.listen(engine, 'checkout', ping_callback)
+ if engine.name == 'mysql':
+ if mysql_traditional_mode:
+ mysql_sql_mode = 'TRADITIONAL'
+ if mysql_sql_mode:
+ mode_callback = functools.partial(_set_session_sql_mode,
+ sql_mode=mysql_sql_mode)
+ sqlalchemy.event.listen(engine, 'checkout', mode_callback)
elif 'sqlite' in connection_dict.drivername:
- if not CONF.sqlite_synchronous:
+ if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
- if (CONF.database.connection_trace and
- engine.dialect.dbapi.__name__ == 'MySQLdb'):
+ if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
_patch_mysqldb_with_stacktrace_comments()
try:
@@ -760,15 +650,15 @@ def create_engine(sql_connection, sqlite_fk=False,
if not _is_db_connection_error(e.args[0]):
raise
- remaining = CONF.database.max_retries
+ remaining = max_retries
if remaining == -1:
remaining = 'infinite'
while True:
- msg = _('SQL connection failed. %s attempts left.')
+ msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
- time.sleep(CONF.database.retry_interval)
+ time.sleep(retry_interval)
try:
engine.connect()
break
@@ -855,13 +745,116 @@ def _patch_mysqldb_with_stacktrace_comments():
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
-def _assert_matching_drivers():
- """Make sure slave handle and normal handle have the same driver."""
- # NOTE(geekinutah): There's no use case for writing to one backend and
- # reading from another. Who knows what the future holds?
- if CONF.database.slave_connection == '':
- return
+class EngineFacade(object):
+ """A helper class for removing of global engine instances from ironic.db.
+
+ As a library, ironic.db can't decide where to store/when to create engine
+ and sessionmaker instances, so this must be left for a target application.
+
+ On the other hand, in order to simplify the adoption of ironic.db changes,
+ we'll provide a helper class, which creates engine and sessionmaker
+ on its instantiation and provides get_engine()/get_session() methods
+ that are compatible with corresponding utility functions that currently
+ exist in target projects, e.g. in Nova.
+
+ engine/sessionmaker instances will still be global (and they are meant to
+ be global), but they will be stored in the app context, rather that in the
+ ironic.db context.
+
+ Note: using of this helper is completely optional and you are encouraged to
+ integrate engine/sessionmaker instances into your apps any way you like
+ (e.g. one might want to bind a session to a request context). Two important
+ things to remember:
+ 1. An Engine instance is effectively a pool of DB connections, so it's
+ meant to be shared (and it's thread-safe).
+ 2. A Session instance is not meant to be shared and represents a DB
+ transactional context (i.e. it's not thread-safe). sessionmaker is
+ a factory of sessions.
+
+ """
- normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
- slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
- assert normal.drivername == slave.drivername
+ def __init__(self, sql_connection,
+ sqlite_fk=False, mysql_sql_mode=None,
+ autocommit=True, expire_on_commit=False, **kwargs):
+ """Initialize engine and sessionmaker instances.
+
+ :param sqlite_fk: enable foreign keys in SQLite
+ :type sqlite_fk: bool
+
+ :param mysql_sql_mode: set SQL mode in MySQL
+ :type mysql_sql_mode: string
+
+ :param autocommit: use autocommit mode for created Session instances
+ :type autocommit: bool
+
+ :param expire_on_commit: expire session objects on commit
+ :type expire_on_commit: bool
+
+ Keyword arguments:
+
+ :keyword idle_timeout: timeout before idle sql connections are reaped
+ (defaults to 3600)
+ :keyword connection_debug: verbosity of SQL debugging information.
+ 0=None, 100=Everything (defaults to 0)
+ :keyword max_pool_size: maximum number of SQL connections to keep open
+ in a pool (defaults to SQLAlchemy settings)
+ :keyword max_overflow: if set, use this value for max_overflow with
+ sqlalchemy (defaults to SQLAlchemy settings)
+ :keyword pool_timeout: if set, use this value for pool_timeout with
+ sqlalchemy (defaults to SQLAlchemy settings)
+ :keyword sqlite_synchronous: if True, SQLite uses synchronous mode
+ (defaults to True)
+ :keyword connection_trace: add python stack traces to SQL as comment
+ strings (defaults to False)
+ :keyword max_retries: maximum db connection retries during startup.
+ (setting -1 implies an infinite retry count)
+ (defaults to 10)
+ :keyword retry_interval: interval between retries of opening a sql
+ connection (defaults to 10)
+
+ """
+
+ super(EngineFacade, self).__init__()
+
+ self._engine = create_engine(
+ sql_connection=sql_connection,
+ sqlite_fk=sqlite_fk,
+ mysql_sql_mode=mysql_sql_mode,
+ idle_timeout=kwargs.get('idle_timeout', 3600),
+ connection_debug=kwargs.get('connection_debug', 0),
+ max_pool_size=kwargs.get('max_pool_size'),
+ max_overflow=kwargs.get('max_overflow'),
+ pool_timeout=kwargs.get('pool_timeout'),
+ sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
+ connection_trace=kwargs.get('connection_trace', False),
+ max_retries=kwargs.get('max_retries', 10),
+ retry_interval=kwargs.get('retry_interval', 10))
+ self._session_maker = get_maker(
+ engine=self._engine,
+ autocommit=autocommit,
+ expire_on_commit=expire_on_commit)
+
+ def get_engine(self):
+ """Get the engine instance (note, that it's shared)."""
+
+ return self._engine
+
+ def get_session(self, **kwargs):
+ """Get a Session instance.
+
+ If passed, keyword arguments values override the ones used when the
+ sessionmaker instance was created.
+
+ :keyword autocommit: use autocommit mode for created Session instances
+ :type autocommit: bool
+
+ :keyword expire_on_commit: expire session objects on commit
+ :type expire_on_commit: bool
+
+ """
+
+ for arg in kwargs:
+ if arg not in ('autocommit', 'expire_on_commit'):
+ del kwargs[arg]
+
+ return self._session_maker(**kwargs)
diff --git a/ironic/openstack/common/db/sqlalchemy/test_base.py b/ironic/openstack/common/db/sqlalchemy/test_base.py
new file mode 100644
index 000000000..12386f360
--- /dev/null
+++ b/ironic/openstack/common/db/sqlalchemy/test_base.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import functools
+import os
+
+import fixtures
+import six
+
+from ironic.openstack.common.db.sqlalchemy import session
+from ironic.openstack.common.db.sqlalchemy import utils
+from ironic.openstack.common import test
+
+
+class DbFixture(fixtures.Fixture):
+ """Basic database fixture.
+
+ Allows to run tests on various db backends, such as SQLite, MySQL and
+ PostgreSQL. By default use sqlite backend. To override default backend
+ uri set env variable OS_TEST_DBAPI_CONNECTION with database admin
+ credentials for specific backend.
+ """
+
+ def _get_uri(self):
+ return os.getenv('OS_TEST_DBAPI_CONNECTION', 'sqlite://')
+
+ def __init__(self, test):
+ super(DbFixture, self).__init__()
+
+ self.test = test
+
+ def setUp(self):
+ super(DbFixture, self).setUp()
+
+ self.test.engine = session.create_engine(self._get_uri())
+ self.test.sessionmaker = session.get_maker(self.test.engine)
+ self.addCleanup(self.test.engine.dispose)
+
+
+class DbTestCase(test.BaseTestCase):
+ """Base class for testing of DB code.
+
+ Using `DbFixture`. Intended to be the main database test case to use all
+ the tests on a given backend with user defined uri. Backend specific
+ tests should be decorated with `backend_specific` decorator.
+ """
+
+ FIXTURE = DbFixture
+
+ def setUp(self):
+ super(DbTestCase, self).setUp()
+ self.useFixture(self.FIXTURE(self))
+
+
+ALLOWED_DIALECTS = ['sqlite', 'mysql', 'postgresql']
+
+
+def backend_specific(*dialects):
+ """Decorator to skip backend specific tests on inappropriate engines.
+
+ ::dialects: list of dialects names under which the test will be launched.
+ """
+ def wrap(f):
+ @functools.wraps(f)
+ def ins_wrap(self):
+ if not set(dialects).issubset(ALLOWED_DIALECTS):
+ raise ValueError(
+ "Please use allowed dialects: %s" % ALLOWED_DIALECTS)
+ if self.engine.name not in dialects:
+ msg = ('The test "%s" can be run '
+ 'only on %s. Current engine is %s.')
+ args = (f.__name__, ' '.join(dialects), self.engine.name)
+ self.skip(msg % args)
+ else:
+ return f(self)
+ return ins_wrap
+ return wrap
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OpportunisticFixture(DbFixture):
+ """Base fixture to use default CI databases.
+
+ The databases exist in OpenStack CI infrastructure. But for the
+ correct functioning in local environment the databases must be
+ created manually.
+ """
+
+ DRIVER = abc.abstractproperty(lambda: None)
+ DBNAME = PASSWORD = USERNAME = 'openstack_citest'
+
+ def _get_uri(self):
+ return utils.get_connect_string(backend=self.DRIVER,
+ user=self.USERNAME,
+ passwd=self.PASSWORD,
+ database=self.DBNAME)
+
+
+@six.add_metaclass(abc.ABCMeta)
+class OpportunisticTestCase(DbTestCase):
+ """Base test case to use default CI databases.
+
+ The subclasses of the test case are running only when openstack_citest
+ database is available otherwise a tests will be skipped.
+ """
+
+ FIXTURE = abc.abstractproperty(lambda: None)
+
+ def setUp(self):
+ credentials = {
+ 'backend': self.FIXTURE.DRIVER,
+ 'user': self.FIXTURE.USERNAME,
+ 'passwd': self.FIXTURE.PASSWORD,
+ 'database': self.FIXTURE.DBNAME}
+
+ if self.FIXTURE.DRIVER and not utils.is_backend_avail(**credentials):
+ msg = '%s backend is not available.' % self.FIXTURE.DRIVER
+ return self.skip(msg)
+
+ super(OpportunisticTestCase, self).setUp()
+
+
+class MySQLOpportunisticFixture(OpportunisticFixture):
+ DRIVER = 'mysql'
+
+
+class PostgreSQLOpportunisticFixture(OpportunisticFixture):
+ DRIVER = 'postgresql'
+
+
+class MySQLOpportunisticTestCase(OpportunisticTestCase):
+ FIXTURE = MySQLOpportunisticFixture
+
+
+class PostgreSQLOpportunisticTestCase(OpportunisticTestCase):
+ FIXTURE = PostgreSQLOpportunisticFixture
diff --git a/ironic/openstack/common/db/sqlalchemy/test_migrations.py b/ironic/openstack/common/db/sqlalchemy/test_migrations.py
index 2e1ba2751..6e595d7e9 100644
--- a/ironic/openstack/common/db/sqlalchemy/test_migrations.py
+++ b/ironic/openstack/common/db/sqlalchemy/test_migrations.py
@@ -15,83 +15,43 @@
# under the License.
import functools
+import logging
import os
import subprocess
import lockfile
from six import moves
+from six.moves.urllib import parse
import sqlalchemy
import sqlalchemy.exc
-from ironic.openstack.common.gettextutils import _
-from ironic.openstack.common import log as logging
-from ironic.openstack.common.py3kcompat import urlutils
+from ironic.openstack.common.db.sqlalchemy import utils
+from ironic.openstack.common.gettextutils import _LE
from ironic.openstack.common import test
LOG = logging.getLogger(__name__)
-def _get_connect_string(backend, user, passwd, database):
- """Get database connection
-
- Try to get a connection with a very specific set of values, if we get
- these then we'll run the tests, otherwise they are skipped
- """
- if backend == "postgres":
- backend = "postgresql+psycopg2"
- elif backend == "mysql":
- backend = "mysql+mysqldb"
- else:
- raise Exception("Unrecognized backend: '%s'" % backend)
-
- return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
- % {'backend': backend, 'user': user, 'passwd': passwd,
- 'database': database})
-
-
-def _is_backend_avail(backend, user, passwd, database):
- try:
- connect_uri = _get_connect_string(backend, user, passwd, database)
- engine = sqlalchemy.create_engine(connect_uri)
- connection = engine.connect()
- except Exception:
- # intentionally catch all to handle exceptions even if we don't
- # have any backend code loaded.
- return False
- else:
- connection.close()
- engine.dispose()
- return True
-
-
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
- return _is_backend_avail('mysql', user, passwd, database)
+ return utils.is_backend_avail(backend='mysql',
+ user=user,
+ passwd=passwd,
+ database=database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
- return _is_backend_avail('postgres', user, passwd, database)
+ return utils.is_backend_avail(backend='postgres',
+ user=user,
+ passwd=passwd,
+ database=database)
return present.lower() in ('', 'true')
-def get_db_connection_info(conn_pieces):
- database = conn_pieces.path.strip('/')
- loc_pieces = conn_pieces.netloc.split('@')
- host = loc_pieces[1]
-
- auth_pieces = loc_pieces[0].split(':')
- user = auth_pieces[0]
- password = ""
- if len(auth_pieces) > 1:
- password = auth_pieces[1].strip()
-
- return (user, password, database, host)
-
-
def _set_db_lock(lock_path=None, lock_prefix=None):
def decorator(f):
@functools.wraps(f)
@@ -100,10 +60,10 @@ def _set_db_lock(lock_path=None, lock_prefix=None):
path = lock_path or os.environ.get("IRONIC_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
- LOG.debug(_('Got lock "%s"') % f.__name__)
+ LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs)
finally:
- LOG.debug(_('Lock released "%s"') % f.__name__)
+ LOG.debug('Lock released "%s"' % f.__name__)
return wrapper
return decorator
@@ -166,7 +126,10 @@ class BaseMigrationTestCase(test.BaseTestCase):
"Failed to run: %s\n%s" % (cmd, output))
def _reset_pg(self, conn_pieces):
- (user, password, database, host) = get_db_connection_info(conn_pieces)
+ (user,
+ password,
+ database,
+ host) = utils.get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
@@ -190,7 +153,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
- conn_pieces = urlutils.urlparse(conn_string)
+ conn_pieces = parse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
@@ -205,7 +168,7 @@ class BaseMigrationTestCase(test.BaseTestCase):
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
- get_db_connection_info(conn_pieces)
+ utils.get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
@@ -301,6 +264,6 @@ class WalkVersionsMixin(object):
if check:
check(engine, data)
except Exception:
- LOG.error("Failed to migrate to version %s on engine %s" %
+ LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine))
raise
diff --git a/ironic/openstack/common/db/sqlalchemy/utils.py b/ironic/openstack/common/db/sqlalchemy/utils.py
index 7ed4a6dd2..7a48038d4 100644
--- a/ironic/openstack/common/db/sqlalchemy/utils.py
+++ b/ironic/openstack/common/db/sqlalchemy/utils.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import logging
import re
from migrate.changeset import UniqueConstraint
@@ -29,6 +30,7 @@ from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
+from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy.sql import select
@@ -36,9 +38,9 @@ from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
-from ironic.openstack.common.gettextutils import _
-
-from ironic.openstack.common import log as logging
+from ironic.openstack.common import context as request_context
+from ironic.openstack.common.db.sqlalchemy import models
+from ironic.openstack.common.gettextutils import _, _LI, _LW
from ironic.openstack.common import timeutils
@@ -94,7 +96,7 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
- LOG.warning(_('Id not in sort_keys; is sort_keys unique?'))
+ LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
@@ -157,6 +159,94 @@ def paginate_query(query, model, limit, sort_keys, marker=None,
return query
+def _read_deleted_filter(query, db_model, read_deleted):
+ if 'deleted' not in db_model.__table__.columns:
+ raise ValueError(_("There is no `deleted` column in `%s` table. "
+ "Project doesn't use soft-deleted feature.")
+ % db_model.__name__)
+
+ default_deleted_value = db_model.__table__.c.deleted.default.arg
+ if read_deleted == 'no':
+ query = query.filter(db_model.deleted == default_deleted_value)
+ elif read_deleted == 'yes':
+ pass # omit the filter to include deleted and active
+ elif read_deleted == 'only':
+ query = query.filter(db_model.deleted != default_deleted_value)
+ else:
+ raise ValueError(_("Unrecognized read_deleted value '%s'")
+ % read_deleted)
+ return query
+
+
+def _project_filter(query, db_model, context, project_only):
+ if project_only and 'project_id' not in db_model.__table__.columns:
+ raise ValueError(_("There is no `project_id` column in `%s` table.")
+ % db_model.__name__)
+
+ if request_context.is_user_context(context) and project_only:
+ if project_only == 'allow_none':
+ is_none = None
+ query = query.filter(or_(db_model.project_id == context.project_id,
+ db_model.project_id == is_none))
+ else:
+ query = query.filter(db_model.project_id == context.project_id)
+
+ return query
+
+
+def model_query(context, model, session, args=None, project_only=False,
+ read_deleted=None):
+ """Query helper that accounts for context's `read_deleted` field.
+
+ :param context: context to query under
+
+ :param model: Model to query. Must be a subclass of ModelBase.
+ :type model: models.ModelBase
+
+ :param session: The session to use.
+ :type session: sqlalchemy.orm.session.Session
+
+ :param args: Arguments to query. If None - model is used.
+ :type args: tuple
+
+ :param project_only: If present and context is user-type, then restrict
+ query to match the context's project_id. If set to
+ 'allow_none', restriction includes project_id = None.
+ :type project_only: bool
+
+ :param read_deleted: If present, overrides context's read_deleted field.
+ :type read_deleted: bool
+
+ Usage:
+ result = (utils.model_query(context, models.Instance, session=session)
+ .filter_by(uuid=instance_uuid)
+ .all())
+
+ query = utils.model_query(
+ context, Node,
+ session=session,
+ args=(func.count(Node.id), func.sum(Node.ram))
+ ).filter_by(project_id=project_id)
+ """
+
+ if not read_deleted:
+ if hasattr(context, 'read_deleted'):
+ # NOTE(viktors): some projects use `read_deleted` attribute in
+ # their contexts instead of `show_deleted`.
+ read_deleted = context.read_deleted
+ else:
+ read_deleted = context.show_deleted
+
+ if not issubclass(model, models.ModelBase):
+ raise TypeError(_("model should be a subclass of ModelBase"))
+
+ query = session.query(model) if not args else session.query(*args)
+ query = _read_deleted_filter(query, model, read_deleted)
+ query = _project_filter(query, model, context, project_only)
+
+ return query
+
+
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
@@ -277,8 +367,8 @@ def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
rows_to_delete_select = select([table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
- LOG.info(_("Deleting duplicated row with id: %(id)s from table: "
- "%(table)s") % dict(id=row[0], table=table_name))
+ LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
+ "%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
@@ -497,3 +587,52 @@ def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
+
+
+def get_connect_string(backend, database, user=None, passwd=None):
+ """Get database connection
+
+ Try to get a connection with a very specific set of values, if we get
+ these then we'll run the tests, otherwise they are skipped
+ """
+ args = {'backend': backend,
+ 'user': user,
+ 'passwd': passwd,
+ 'database': database}
+ if backend == 'sqlite':
+ template = '%(backend)s:///%(database)s'
+ else:
+ template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
+ return template % args
+
+
+def is_backend_avail(backend, database, user=None, passwd=None):
+ try:
+ connect_uri = get_connect_string(backend=backend,
+ database=database,
+ user=user,
+ passwd=passwd)
+ engine = sqlalchemy.create_engine(connect_uri)
+ connection = engine.connect()
+ except Exception:
+ # intentionally catch all to handle exceptions even if we don't
+ # have any backend code loaded.
+ return False
+ else:
+ connection.close()
+ engine.dispose()
+ return True
+
+
+def get_db_connection_info(conn_pieces):
+ database = conn_pieces.path.strip('/')
+ loc_pieces = conn_pieces.netloc.split('@')
+ host = loc_pieces[1]
+
+ auth_pieces = loc_pieces[0].split(':')
+ user = auth_pieces[0]
+ password = ""
+ if len(auth_pieces) > 1:
+ password = auth_pieces[1].strip()
+
+ return (user, password, database, host)