summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml8
-rw-r--r--api-ref/source/v3/roles.inc2
-rw-r--r--api-ref/source/v3/unified_limits.inc4
-rw-r--r--bindep.txt4
-rw-r--r--doc/source/admin/auth-totp.rst4
-rw-r--r--doc/source/admin/credential-encryption.rst41
-rw-r--r--doc/source/admin/service-api-protection.rst2
-rw-r--r--doc/source/admin/upgrading.rst52
-rw-r--r--doc/source/contributor/database-migrations.rst61
-rw-r--r--doc/source/contributor/programming-exercises.rst4
-rw-r--r--doc/source/contributor/services.rst2
-rw-r--r--doc/source/contributor/testing-keystone.rst29
-rw-r--r--doc/source/install/index-obs.rst8
-rw-r--r--doc/source/install/index-rdo.rst8
-rw-r--r--doc/source/install/index-ubuntu.rst8
-rw-r--r--doc/source/user/application_credentials.rst43
-rw-r--r--keystone/cmd/cli.py56
-rw-r--r--keystone/common/sql/migrations/env.py21
-rw-r--r--keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py2
-rw-r--r--keystone/common/sql/upgrades.py375
-rw-r--r--keystone/locale/de/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/en_GB/LC_MESSAGES/keystone.po27
-rw-r--r--keystone/locale/es/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/fr/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/it/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ja/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ko_KR/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/pt_BR/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ru/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/zh_CN/LC_MESSAGES/keystone.po21
-rw-r--r--keystone/locale/zh_TW/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/server/flask/application.py4
-rw-r--r--keystone/tests/unit/base_classes.py9
-rw-r--r--keystone/tests/unit/common/sql/__init__.py0
-rw-r--r--keystone/tests/unit/common/sql/test_upgrades.py546
-rw-r--r--keystone/tests/unit/ksfixtures/warnings.py17
-rw-r--r--keystone/tests/unit/test_cli.py20
-rw-r--r--keystone/tests/unit/test_sql_banned_operations.py512
-rw-r--r--keystone/tests/unit/test_sql_upgrade.py356
-rw-r--r--keystone/tests/unit/token/test_fernet_provider.py56
-rw-r--r--keystone/token/token_formatters.py9
-rw-r--r--lower-constraints.txt68
-rw-r--r--releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml7
-rw-r--r--releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml5
-rw-r--r--releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml23
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po98
-rw-r--r--releasenotes/source/yoga.rst6
-rw-r--r--requirements.txt4
-rw-r--r--setup.cfg6
-rw-r--r--tox.ini19
51 files changed, 1228 insertions, 1365 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 05db30ae8..ef9782f4c 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -35,10 +35,12 @@
- job:
name: keystone-dsvm-py3-functional-fips
parent: keystone-dsvm-py3-functional
- nodeset: devstack-single-node-centos-8-stream
+ nodeset: devstack-single-node-centos-9-stream
description: |
- Functional testing for a FIPS enabled Centos 8 system
+ Functional testing for a FIPS enabled Centos 9 system
pre-run: playbooks/enable-fips.yaml
+ vars:
+ nslookup_target: 'opendev.org'
- job:
name: keystone-dsvm-functional-federation-opensuse15
@@ -203,7 +205,7 @@
- project:
templates:
- openstack-cover-jobs
- - openstack-python3-xena-jobs
+ - openstack-python3-zed-jobs
- publish-openstack-docs-pti
- periodic-stable-jobs
- check-requirements
diff --git a/api-ref/source/v3/roles.inc b/api-ref/source/v3/roles.inc
index 3073e241d..80092ec82 100644
--- a/api-ref/source/v3/roles.inc
+++ b/api-ref/source/v3/roles.inc
@@ -1002,7 +1002,7 @@ Status Codes
.. rest_status_code:: success status.yaml
- - 201
+ - 204
.. rest_status_code:: error status.yaml
diff --git a/api-ref/source/v3/unified_limits.inc b/api-ref/source/v3/unified_limits.inc
index ce32a0f1c..bdb1d1959 100644
--- a/api-ref/source/v3/unified_limits.inc
+++ b/api-ref/source/v3/unified_limits.inc
@@ -614,8 +614,8 @@ Example
:language: javascript
-Delete Registered Limit
-=======================
+Delete Limit
+============
.. rest_method:: DELETE /v3/limits/{limit_id}
diff --git a/bindep.txt b/bindep.txt
index 9ed75e0e0..efa6c067b 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -29,9 +29,7 @@ openldap-devel [platform:redhat]
openssl-devel [platform:rpm]
postgresql-devel [platform:rpm]
postgresql-server [platform:rpm]
-python2-devel [platform:rpm]
-python3-devel [platform:fedora]
-python34-devel [platform:centos]
+python3-devel [platform:rpm]
libmariadb-devel [platform:suse]
openldap2-devel [platform:suse]
diff --git a/doc/source/admin/auth-totp.rst b/doc/source/admin/auth-totp.rst
index 3c331be96..c77ca64a0 100644
--- a/doc/source/admin/auth-totp.rst
+++ b/doc/source/admin/auth-totp.rst
@@ -40,8 +40,8 @@ secret:
.. code-block:: python
import base64
- message = '1234567890123456'
- print base64.b32encode(message).rstrip('=')
+ message = b'1234567890123456'
+ print(base64.b32encode(message).rstrip(b'='))
Example output::
diff --git a/doc/source/admin/credential-encryption.rst b/doc/source/admin/credential-encryption.rst
index 7a721ff70..d54209be3 100644
--- a/doc/source/admin/credential-encryption.rst
+++ b/doc/source/admin/credential-encryption.rst
@@ -60,47 +60,6 @@ the response. Neither the cipher text, nor the hash of the key used to encrypt
the ``blob`` are exposed through the API. Furthermore, the key is only used
internally to keystone.
-Encrypting existing credentials
--------------------------------
-
-When upgrading a Mitaka deployment to Newton, three database migrations will
-ensure all credentials are encrypted. The process is as follows:
-
-1. An additive schema change is made to create the new ``encrypted_blob`` and
- ``key_hash`` columns in the existing ``credential`` table using
- ``keystone-manage db_sync --expand``.
-2. A data migration will loop through all existing credentials, encrypt each
- ``blob`` and store the result in the new ``encrypted_blob`` column. The hash
- of the key used is also written to the ``key_hash`` column for that specific
- credential. This step is done using ``keystone-manage db_sync --migrate``.
-3. A contractive schema will remove the ``blob`` column that held the plain
- text representations of the credential using ``keystone-manage db_sync
- --contract``. This should only be done after all nodes in the deployment are
- running Newton. If any Mitaka nodes are running after the database is
- contracted, they won't be able to read credentials since they are looking
- for the ``blob`` column that no longer exists.
-
-.. NOTE::
-
- You may also use ``keystone-manage db_sync --check`` in order to check the
- current status of your rolling upgrades.
-
-If performing a rolling upgrade, please note that a limited service outage will
-take affect during this migration. When the migration is in place, credentials
-will become read-only until the database is contracted. After the contract
-phase is complete, credentials will be writeable to the backend. A
-``[credential] key_repository`` location must be specified through
-configuration and bootstrapped with keys using ``keystone-manage
-credential_setup`` prior to migrating any existing credentials. If a new key
-repository isn't setup using ``keystone-manage credential_setup`` keystone will
-assume a null key to encrypt and decrypt credentials until a proper key
-repository is present. The null key is a key consisting of all null bytes and
-its only purpose is to ease the upgrade process from Mitaka to Newton. It is
-highly recommended that the null key isn't used. It is no more secure than
-storing credentials in plain text. If the null key is used, you should migrate
-to a proper key repository using ``keystone-manage credential_setup`` and
-``keystone-manage credential_migrate``.
-
Encryption key management
-------------------------
diff --git a/doc/source/admin/service-api-protection.rst b/doc/source/admin/service-api-protection.rst
index 47886aeb0..249944354 100644
--- a/doc/source/admin/service-api-protection.rst
+++ b/doc/source/admin/service-api-protection.rst
@@ -31,7 +31,7 @@ custom policies.
Roles Definitions
-----------------
-The default roles provided by keystone, via ``keystone-manage boostrap``, are
+The default roles provided by keystone, via ``keystone-manage bootstrap``, are
related through role implications. The ``admin`` role implies the ``member``
role, and the ``member`` role implies the ``reader`` role. These implications
mean users with the ``admin`` role automatically have the ``member`` and
diff --git a/doc/source/admin/upgrading.rst b/doc/source/admin/upgrading.rst
index 709d98dac..e20071436 100644
--- a/doc/source/admin/upgrading.rst
+++ b/doc/source/admin/upgrading.rst
@@ -155,7 +155,7 @@ downtime if it is required.
Upgrading without downtime
--------------------------
-.. NOTE:
+.. versionadded:: 10.0.0 (Newton)
Upgrading without downtime is only supported in deployments upgrading
*from* Newton or a newer release.
@@ -166,6 +166,12 @@ Upgrading without downtime
``keystone-manage db_sync``), as it runs legacy (downtime-incurring)
migrations prior to running schema expansions.
+.. versionchanged:: 21.0.0 (Yoga)
+
+ The migration tooling was changed from *SQLAlchemy-Migrate* to *Alembic*.
+ As part of this change, the data migration phase of the database upgrades
+ was dropped.
+
This is a high-level description of our upgrade strategy built around
additional options in ``keystone-manage db_sync``. Although it is much more
complex than the upgrade process described above, it assumes that you are not
@@ -187,11 +193,11 @@ authenticate requests normally.
#. Update your configuration files on the first node (``/etc/keystone/``) with
those corresponding to the latest release.
-#. (*New in Newton*) Run ``keystone-manage doctor`` on the first node to
+#. Run ``keystone-manage doctor`` on the first node to
diagnose symptoms of common deployment issues and receive instructions for
resolving them.
-#. (*New in Newton*) Run ``keystone-manage db_sync --expand`` on the first node
+#. Run ``keystone-manage db_sync --expand`` on the first node
to expand the database schema to a superset of what both the previous and
next release can utilize, and create triggers to facilitate the live
migration process.
@@ -210,14 +216,12 @@ authenticate requests normally.
triggers will live migrate the data to the new schema so it can be read by
the next release.
-#. (*New in Newton*) Run ``keystone-manage db_sync --migrate`` on the first
- node to forcefully perform data migrations. This process will migrate all
- data from the old schema to the new schema while the previous release
- continues to operate normally.
+ .. note::
- When this process completes, all data will be available in both the new
- schema and the old schema, so both the previous release and the next release
- will be capable of operating normally.
+ Prior to Yoga, data migrations were treated separatly and required the
+ use of the ``keystone-manage db_sync --migrate`` command after applying
+ the expand migrations. This is no longer necessary and the
+ ``keystone-manage db_sync --migrate`` command is now a no-op.
#. Update your configuration files (``/etc/keystone/``) on all nodes (except
the first node, which you've already done) with those corresponding to the
@@ -230,20 +234,27 @@ authenticate requests normally.
As the next release begins writing to the new schema, database triggers will
also migrate the data to the old schema, keeping both data schemas in sync.
-#. (*New in Newton*) Run ``keystone-manage db_sync --contract`` to remove the
- old schema and all data migration triggers.
+#. Run ``keystone-manage db_sync --contract`` to remove the old schema and all
+ data migration triggers.
When this process completes, the database will no longer be able to support
the previous release.
-Using db_sync check
-~~~~~~~~~~~~~~~~~~~
+Using ``db_sync check``
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 12.0.0 (Pike)
-(*New in Pike*) In order to check the current state of your rolling upgrades,
-you may run the command ``keystone-manage db_sync --check``. This will inform
-you of any outstanding actions you have left to take as well as any possible
-upgrades you can make from your current version. Here are a list of possible
-return codes.
+.. versionchanged:: 21.0.0 (Yoga)
+
+ Previously this command would return ``3`` if data migrations were
+ required. Data migrations are now part of the expand schema migrations,
+ therefore this step is no longer necessary.
+
+In order to check the current state of your rolling upgrades, you may run the
+command ``keystone-manage db_sync --check``. This will inform you of any
+outstanding actions you have left to take as well as any possible upgrades you
+can make from your current version. Here are a list of possible return codes.
* A return code of ``0`` means you are currently up to date with the latest
migration script version and all ``db_sync`` commands are complete.
@@ -256,8 +267,5 @@ return codes.
or the database is already under control. Your first step is to run
``keystone-manage db_sync --expand``.
-* A return code of ``3`` means that the expansion stage is complete, and the
- next step is to run ``keystone-manage db_sync --migrate``.
-
* A return code of ``4`` means that the expansion and data migration stages are
complete, and the next step is to run ``keystone-manage db_sync --contract``.
diff --git a/doc/source/contributor/database-migrations.rst b/doc/source/contributor/database-migrations.rst
index 3827ea8e6..59afb2d0c 100644
--- a/doc/source/contributor/database-migrations.rst
+++ b/doc/source/contributor/database-migrations.rst
@@ -17,52 +17,45 @@
Database Migrations
===================
-.. note::
+.. versionchanged:: 21.0.0 (Yoga)
- The framework being used is currently being migrated from
- SQLAlchemy-Migrate to Alembic, meaning this information will change in the
- near-term.
+ The database migration framework was changed from SQLAlchemy-Migrate to
+ Alembic in the Yoga release. Previously there were three SQLAlchemy-Migrate
+ repos, corresponding to different type of migration operation: the *expand*
+ repo, the *data migration* repo, and the *contract* repo. There are now
+ only two Alembic branches, the *expand* branch and the *contract* branch,
+ and data migration operations have been folded into the former
Starting with Newton, keystone supports upgrading both with and without
-downtime. In order to support this, there are three separate migration
-repositories (all under ``keystone/common/sql/legacy_migrations``) that match
-the three phases of an upgrade (schema expansion, data migration, and schema
-contraction):
+downtime. In order to support this, there are two separate branches (all under
+``keystone/common/sql/migrations``): the *expand* and the *contract* branch.
-``expand_repo``
+*expand*
For additive schema modifications and triggers to ensure data is kept in
sync between the old and new schema until the point when there are no
keystone instances running old code.
-``data_migration_repo``
- To ensure new tables/columns are fully populated with data from the old
- schema.
+ May also contain data migrations to ensure new tables/columns are fully
+ populated with data from the old schema.
-``contract_repo``
+*contract*
Run after all old code versions have been upgraded to running the new code,
so remove any old schema columns/tables that are not used by the new
version of the code. Drop any triggers added in the expand phase.
-All migrations are required to have a migration script in each of these repos,
-each with the same version number (which is indicated by the first three digits
-of the name of the script, e.g. ``003_add_X_table.py``). If there is no work to
-do in a specific phase, then include a no-op migration to simply ``pass`` (in
-fact the ``001`` migration in each of these repositories is a no-op migration,
-so that can be used as a template).
+A migration script must belong to one branch. If a migration has both additive
+and destruction operations, it must be split into two migrations scripts, one
+in each branch.
In order to support rolling upgrades, where two releases of keystone briefly
operate side-by-side using the same database without downtime, each phase of
the migration must adhere to following constraints:
-These triggers should be removed in the contract phase. There are further
-restrictions as to what can and cannot be included in migration scripts in each
-phase:
-
Expand phase:
- Only additive schema changes are allowed, such as new columns, tables,
- indices, and triggers.
+ Only additive schema changes, such as new columns, tables, indices, and
+ triggers, and data insertion are allowed.
- Data insertion, modification, and removal is not allowed.
+ Data modification or removal is not allowed.
Triggers must be created to keep data in sync between the previous release
and the next release. Data written by the previous release must be readable
@@ -72,20 +65,14 @@ Expand phase:
In cases it is not possible for triggers to maintain data integrity across
multiple schemas, writing data should be forbidden using triggers.
-Data Migration phase:
- Data is allowed to be inserted, updated, and deleted.
-
- No schema changes are allowed.
-
Contract phase:
- Only destructive schema changes are allowed, such as dropping or altering
- columns, tables, indices, and triggers.
-
- Data insertion, modification, and removal is not allowed.
+ Only destructive schema changes, such as dropping or altering
+ columns, tables, indices, and triggers, or data modification or removal are
+ allowed.
Triggers created during the expand phase must be dropped.
For more information on writing individual migration scripts refer to
-`SQLAlchemy-migrate`_.
+`Alembic`_.
-.. _SQLAlchemy-migrate: https://opendev.org/openstack/sqlalchemy-migrate
+.. _Alembic: https://alembic.sqlalchemy.org/
diff --git a/doc/source/contributor/programming-exercises.rst b/doc/source/contributor/programming-exercises.rst
index b51725d08..77a91bc74 100644
--- a/doc/source/contributor/programming-exercises.rst
+++ b/doc/source/contributor/programming-exercises.rst
@@ -53,9 +53,7 @@ Refer to the :doc:`API Change tutorial <api_change_tutorial>`. In short, you wil
steps:
#. Create a SQL migration to add the parameter to the database table
- (:py:mod:`keystone.common.sql.legacy_migration.expand_repo.versions`,
- :py:mod:`keystone.common.sql.legacy_migration.data_migration_repo.versions`,
- :py:mod:`keystone.common.sql.legacy_migration.contract_repo.versions`)
+ (:py:mod:`keystone.common.sql.migrations.versions`)
#. Add a SQL migration unit test (`keystone/tests/unit/test_sql_upgrade.py`)
diff --git a/doc/source/contributor/services.rst b/doc/source/contributor/services.rst
index bdca28b15..c1c397e30 100644
--- a/doc/source/contributor/services.rst
+++ b/doc/source/contributor/services.rst
@@ -99,7 +99,7 @@ The "default" domain
The v2.0 API has been removed as of the Queens release. While this section
references the v2.0 API, it is purely for historical reasons that clarify
- the existance of the *default* domain.
+ the existence of the *default* domain.
Domains were introduced as a v3-only feature. As a result, the v2.0 API didn't
understand the concept of domains. To allow for both versions of the Identity
diff --git a/doc/source/contributor/testing-keystone.rst b/doc/source/contributor/testing-keystone.rst
index 72575fbcb..721562a3a 100644
--- a/doc/source/contributor/testing-keystone.rst
+++ b/doc/source/contributor/testing-keystone.rst
@@ -138,32 +138,9 @@ Identity module.
Testing Schema Migrations
-------------------------
-.. note::
-
- The framework being used is currently being migrated from
- SQLAlchemy-Migrate to Alembic, meaning this information will change in the
- near-term.
-
-The application of schema migrations can be tested using SQLAlchemy Migrate's
-built-in test runner, one migration at a time.
-
-.. WARNING::
-
- This may leave your database in an inconsistent state; attempt this in
- non-production environments only!
-
-This is useful for testing the *next* migration in sequence in a database under
-version control:
-
-.. code-block:: bash
-
- $ python keystone/common/sql/legacy_migrations/expand_repo/manage.py test \
- --url=sqlite:///test.db \
- --repository=keystone/common/sql/legacy_migrations/expand_repo/
-
-This command references to a SQLite database (test.db) to be used. Depending on
-the migration, this command alone does not make assertions as to the integrity
-of your data during migration.
+Tests for database migrations can be found in
+``keystone/tests/unit/test_sql_upgrade.py`` and
+``keystone/tests/unit/test_sql_banned_operations.py``.
LDAP Tests
----------
diff --git a/doc/source/install/index-obs.rst b/doc/source/install/index-obs.rst
index c67974d74..46129285a 100644
--- a/doc/source/install/index-obs.rst
+++ b/doc/source/install/index-obs.rst
@@ -12,14 +12,6 @@ both SP1 and SP2 - through the Open Build Service Cloud repository.
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/install/index-rdo.rst b/doc/source/install/index-rdo.rst
index 6e0e3984f..dc48e890f 100644
--- a/doc/source/install/index-rdo.rst
+++ b/doc/source/install/index-rdo.rst
@@ -12,14 +12,6 @@ the RDO repository.
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/install/index-ubuntu.rst b/doc/source/install/index-ubuntu.rst
index b3e5cb064..d1c7fe138 100644
--- a/doc/source/install/index-ubuntu.rst
+++ b/doc/source/install/index-ubuntu.rst
@@ -12,14 +12,6 @@ Ubuntu 16.04 (LTS).
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/user/application_credentials.rst b/doc/source/user/application_credentials.rst
index eff86f7b3..5455a04e7 100644
--- a/doc/source/user/application_credentials.rst
+++ b/doc/source/user/application_credentials.rst
@@ -174,8 +174,47 @@ Access Rules
============
In addition to delegating a subset of roles to an application credential, you
-may also delegate more fine-grained access control by using access rules. For
-example, to create an application credential that is constricted to creating
+may also delegate more fine-grained access control by using access rules.
+
+.. note::
+
+ Application credentials with access rules require additional configuration
+ of each service that will use it. See below for details.
+
+If application credentials with access rules are required, an OpenStack
+service using keystonemiddleware to authenticate with keystone, needs to
+define ``service_type`` in its configuration file. Following is an example for the
+cinder V3 service:
+
+.. code-block:: ini
+
+ [keystone_authtoken]
+ service_type = volumev3
+
+For other OpenStack sevices, their types can be obtained using the OpenStack
+client. For example:
+
+.. code-block:: console
+
+ $ openstack service list -c Name -c Type
+ +-----------+-----------+
+ | Name | Type |
+ +-----------+-----------+
+ | glance | image |
+ | cinderv3 | volumev3 |
+ | cinderv2 | volumev2 |
+ | keystone | identity |
+ | nova | compute |
+ | neutron | network |
+ | placement | placement |
+ +-----------+-----------+
+
+.. note::
+
+ Updates to the configuration files of a service require restart of the appropriate
+ services for the changes to take effect.
+
+In order to create an example application credential that is constricted to creating
servers in nova, the user can add the following access rules:
.. code-block:: console
diff --git a/keystone/cmd/cli.py b/keystone/cmd/cli.py
index 1e866d76a..ad65b2622 100644
--- a/keystone/cmd/cli.py
+++ b/keystone/cmd/cli.py
@@ -281,61 +281,53 @@ class DbSync(BaseApp):
except db_exception.DBMigrationError:
LOG.info(
'Your database is not currently under version '
- 'control or the database is already controlled. Your '
- 'first step is to run `keystone-manage db_sync --expand`.'
+ 'control or the database is already controlled. '
+ 'Your first step is to run `keystone-manage db_sync --expand`.'
)
return 2
- try:
- migrate_version = upgrades.get_db_version(
- branch='data_migration')
- except db_exception.DBMigrationError:
- migrate_version = 0
+ if isinstance(expand_version, int):
+ # we're still using sqlalchemy-migrate
+ LOG.info(
+ 'Your database is currently using legacy version control. '
+ 'Your first step is to run `keystone-manage db_sync --expand`.'
+ )
+ return 2
try:
contract_version = upgrades.get_db_version(branch='contract')
except db_exception.DBMigrationError:
- contract_version = 0
+ contract_version = None
- migration_script_version = upgrades.LATEST_VERSION
+ heads = upgrades.get_current_heads()
if (
- contract_version > migrate_version or
- migrate_version > expand_version
+ upgrades.EXPAND_BRANCH not in heads or
+ heads[upgrades.EXPAND_BRANCH] != expand_version
):
- LOG.info('Your database is out of sync. For more information '
- 'refer to https://docs.openstack.org/keystone/'
- 'latest/admin/identity-upgrading.html')
- status = 1
- elif migration_script_version > expand_version:
LOG.info('Your database is not up to date. Your first step is '
'to run `keystone-manage db_sync --expand`.')
status = 2
- elif expand_version > migrate_version:
- LOG.info('Expand version is ahead of migrate. Your next step '
- 'is to run `keystone-manage db_sync --migrate`.')
- status = 3
- elif migrate_version > contract_version:
- LOG.info('Migrate version is ahead of contract. Your next '
- 'step is to run `keystone-manage db_sync --contract`.')
- status = 4
elif (
- migration_script_version == expand_version == migrate_version ==
- contract_version
+ upgrades.CONTRACT_BRANCH not in heads or
+ heads[upgrades.CONTRACT_BRANCH] != contract_version
):
+ LOG.info('Expand version is ahead of contract. Your next '
+ 'step is to run `keystone-manage db_sync --contract`.')
+ status = 4
+ else:
LOG.info('All db_sync commands are upgraded to the same '
'version and up-to-date.')
+
LOG.info(
- 'The latest installed migration script version is: %(script)d.\n'
'Current repository versions:\n'
- 'Expand: %(expand)d\n'
- 'Migrate: %(migrate)d\n'
- 'Contract: %(contract)d',
+ 'Expand: %(expand)s (head: %(expand_head)s)\n'
+ 'Contract: %(contract)s (head: %(contract_head)s)',
{
- 'script': migration_script_version,
'expand': expand_version,
- 'migrate': migrate_version,
+ 'expand_head': heads.get(upgrades.EXPAND_BRANCH),
'contract': contract_version,
+ 'contract_head': heads.get(upgrades.CONTRACT_BRANCH),
},
)
return status
diff --git a/keystone/common/sql/migrations/env.py b/keystone/common/sql/migrations/env.py
index 2d116f1bd..f5547a4e4 100644
--- a/keystone/common/sql/migrations/env.py
+++ b/keystone/common/sql/migrations/env.py
@@ -59,15 +59,24 @@ def run_migrations_online():
In this scenario we need to create an Engine and associate a connection
with the context.
"""
- connectable = engine_from_config(
- config.get_section(config.config_ini_section),
- prefix="sqlalchemy.",
- poolclass=pool.NullPool,
- )
+ connectable = config.attributes.get('connection', None)
+
+ if connectable is None:
+ # only create Engine if we don't have a Connection from the outside
+ connectable = engine_from_config(
+ config.get_section(config.config_ini_section),
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ # when connectable is already a Connection object, calling connect() gives
+ # us a *branched connection*.
with connectable.connect() as connection:
context.configure(
- connection=connection, target_metadata=target_metadata
+ connection=connection,
+ target_metadata=target_metadata,
+ render_as_batch=True,
)
with context.begin_transaction():
diff --git a/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py b/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
index c57cdf13d..eec97c573 100644
--- a/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
+++ b/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
@@ -1100,7 +1100,7 @@ def upgrade():
bind = op.get_bind()
meta = sql.MetaData()
- project = sql.Table('project', meta, autoload_with=bind.engine)
+ project = sql.Table('project', meta, autoload_with=bind)
root_domain_project = _generate_root_domain_project()
op.execute(project.insert().values(**root_domain_project))
diff --git a/keystone/common/sql/upgrades.py b/keystone/common/sql/upgrades.py
index f463771f2..41a094819 100644
--- a/keystone/common/sql/upgrades.py
+++ b/keystone/common/sql/upgrades.py
@@ -16,24 +16,47 @@
import os
+from alembic import command as alembic_api
+from alembic import config as alembic_config
+from alembic import migration as alembic_migration
+from alembic import script as alembic_script
from migrate import exceptions as migrate_exceptions
from migrate.versioning import api as migrate_api
from migrate.versioning import repository as migrate_repository
from oslo_db import exception as db_exception
-import sqlalchemy as sa
+from oslo_log import log as logging
from keystone.common import sql
-from keystone import exception
-from keystone.i18n import _
+import keystone.conf
+
+CONF = keystone.conf.CONF
+LOG = logging.getLogger(__name__)
+
+ALEMBIC_INIT_VERSION = '27e647c0fad4'
+MIGRATE_INIT_VERSION = 72
-INITIAL_VERSION = 72
-LATEST_VERSION = 79
EXPAND_BRANCH = 'expand'
DATA_MIGRATION_BRANCH = 'data_migration'
CONTRACT_BRANCH = 'contract'
+RELEASES = (
+ 'yoga',
+)
+MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH)
+VERSIONS_PATH = os.path.join(
+ os.path.dirname(sql.__file__),
+ 'migrations',
+ 'versions',
+)
+
-def _get_migrate_repo_path(branch):
+def _find_migrate_repo(branch):
+ """Get the project's change script repository
+
+ :param branch: Name of the repository "branch" to be used; this will be
+ transformed to repository path.
+ :returns: An instance of ``migrate.versioning.repository.Repository``
+ """
abs_path = os.path.abspath(
os.path.join(
os.path.dirname(sql.__file__),
@@ -41,203 +64,273 @@ def _get_migrate_repo_path(branch):
f'{branch}_repo',
)
)
+ if not os.path.exists(abs_path):
+ raise db_exception.DBMigrationError("Path %s not found" % abs_path)
+ return migrate_repository.Repository(abs_path)
- if not os.path.isdir(abs_path):
- raise exception.MigrationNotProvided(sql.__name__, abs_path)
- return abs_path
+def _find_alembic_conf():
+ """Get the project's alembic configuration
+ :returns: An instance of ``alembic.config.Config``
+ """
+ path = os.path.join(
+ os.path.abspath(os.path.dirname(__file__)), 'alembic.ini',
+ )
-def _find_migrate_repo(abs_path):
- """Get the project's change script repository
+ config = alembic_config.Config(os.path.abspath(path))
- :param abs_path: Absolute path to migrate repository
- """
- if not os.path.exists(abs_path):
- raise db_exception.DBMigrationError("Path %s not found" % abs_path)
- return migrate_repository.Repository(abs_path)
+ config.set_main_option('sqlalchemy.url', CONF.database.connection)
+ # we don't want to use the logger configuration from the file, which is
+ # only really intended for the CLI
+ # https://stackoverflow.com/a/42691781/613428
+ config.attributes['configure_logger'] = False
-def _migrate_db_version_control(engine, abs_path, version=None):
- """Mark a database as under this repository's version control.
+ # we want to scan all the versioned subdirectories
+ version_paths = [VERSIONS_PATH]
+ for release in RELEASES:
+ for branch in MIGRATION_BRANCHES:
+ version_path = os.path.join(VERSIONS_PATH, release, branch)
+ version_paths.append(version_path)
+ config.set_main_option('version_locations', ' '.join(version_paths))
- Once a database is under version control, schema changes should
- only be done via change scripts in this repository.
+ return config
- :param engine: SQLAlchemy engine instance for a given database
- :param abs_path: Absolute path to migrate repository
- :param version: Initial database version
- """
- repository = _find_migrate_repo(abs_path)
- try:
- migrate_api.version_control(engine, repository, version)
- except migrate_exceptions.InvalidVersionError as ex:
- raise db_exception.DBMigrationError("Invalid version : %s" % ex)
- except migrate_exceptions.DatabaseAlreadyControlledError:
- raise db_exception.DBMigrationError("Database is already controlled.")
+def _get_current_heads(engine, config):
+ script = alembic_script.ScriptDirectory.from_config(config)
- return version
+ with engine.connect() as conn:
+ context = alembic_migration.MigrationContext.configure(conn)
+ heads = context.get_current_heads()
+ heads_map = {}
-def _migrate_db_version(engine, abs_path, init_version):
- """Show the current version of the repository.
+ for head in heads:
+ if CONTRACT_BRANCH in script.get_revision(head).branch_labels:
+ heads_map[CONTRACT_BRANCH] = head
+ else:
+ heads_map[EXPAND_BRANCH] = head
- :param engine: SQLAlchemy engine instance for a given database
- :param abs_path: Absolute path to migrate repository
- :param init_version: Initial database version
- """
- repository = _find_migrate_repo(abs_path)
- try:
- return migrate_api.db_version(engine, repository)
- except migrate_exceptions.DatabaseNotControlledError:
- pass
+ return heads_map
- meta = sa.MetaData()
- meta.reflect(bind=engine)
- tables = meta.tables
- if (
- len(tables) == 0 or
- 'alembic_version' in tables or
- 'migrate_version' in tables
- ):
- _migrate_db_version_control(engine, abs_path, version=init_version)
- return migrate_api.db_version(engine, repository)
- msg = _(
- "The database is not under version control, but has tables. "
- "Please stamp the current version of the schema manually."
- )
- raise db_exception.DBMigrationError(msg)
+def get_current_heads():
+ """Get the current head of each the expand and contract branches."""
+ config = _find_alembic_conf()
+ with sql.session_for_read() as session:
+ engine = session.get_bind()
-def _migrate_db_sync(engine, abs_path, version=None, init_version=0):
- """Upgrade or downgrade a database.
+ # discard the URL encoded in alembic.ini in favour of the URL
+ # configured for the engine by the database fixtures, casting from
+ # 'sqlalchemy.engine.url.URL' to str in the process. This returns a
+ # RFC-1738 quoted URL, which means that a password like "foo@" will be
+ # turned into "foo%40". This in turns causes a problem for
+ # set_main_option() because that uses ConfigParser.set, which (by
+ # design) uses *python* interpolation to write the string out ... where
+ # "%" is the special python interpolation character! Avoid this
+ # mismatch by quoting all %'s for the set below.
+ engine_url = str(engine.url).replace('%', '%%')
+ config.set_main_option('sqlalchemy.url', str(engine_url))
- Function runs the upgrade() or downgrade() functions in change scripts.
+ heads = _get_current_heads(engine, config)
- :param engine: SQLAlchemy engine instance for a given database
- :param abs_path: Absolute path to migrate repository.
- :param version: Database will upgrade/downgrade until this version.
- If None - database will update to the latest available version.
- :param init_version: Initial database version
- """
+ return heads
- if version is not None:
- try:
- version = int(version)
- except ValueError:
- msg = _("version should be an integer")
- raise db_exception.DBMigrationError(msg)
- current_version = _migrate_db_version(engine, abs_path, init_version)
- repository = _find_migrate_repo(abs_path)
+def _is_database_under_migrate_control(engine):
+ # if any of the repos is present, they're all present (in theory, at least)
+ repository = _find_migrate_repo('expand')
+ try:
+ migrate_api.db_version(engine, repository)
+ return True
+ except migrate_exceptions.DatabaseNotControlledError:
+ return False
- if version is None or version > current_version:
- try:
- return migrate_api.upgrade(engine, repository, version)
- except Exception as ex:
- raise db_exception.DBMigrationError(ex)
- else:
- return migrate_api.downgrade(engine, repository, version)
+def _is_database_under_alembic_control(engine):
+ with engine.connect() as conn:
+ context = alembic_migration.MigrationContext.configure(conn)
+ return bool(context.get_current_heads())
-def get_db_version(branch=EXPAND_BRANCH):
- abs_path = _get_migrate_repo_path(branch)
- with sql.session_for_read() as session:
- return _migrate_db_version(
- session.get_bind(),
- abs_path,
- INITIAL_VERSION,
- )
+def _init_alembic_on_legacy_database(engine, config):
+ """Init alembic in an existing environment with sqlalchemy-migrate."""
+ LOG.info(
+ 'The database is still under sqlalchemy-migrate control; '
+ 'applying any remaining sqlalchemy-migrate-based migrations '
+ 'and fake applying the initial alembic migration'
+ )
-def _db_sync(branch):
- abs_path = _get_migrate_repo_path(branch)
- with sql.session_for_write() as session:
- engine = session.get_bind()
- _migrate_db_sync(
- engine=engine,
- abs_path=abs_path,
- init_version=INITIAL_VERSION,
- )
+ # bring all repos up to date; note that we're relying on the fact that
+ # there aren't any "real" contract migrations left (since the great squash
+ # of migrations in yoga) so we're really only applying the expand side of
+ # '079_expand_update_local_id_limit' and the rest are for completeness'
+ # sake
+ for branch in (EXPAND_BRANCH, DATA_MIGRATION_BRANCH, CONTRACT_BRANCH):
+ repository = _find_migrate_repo(branch or 'expand')
+ migrate_api.upgrade(engine, repository)
+
+ # re-use the connection rather than creating a new one
+ with engine.begin() as connection:
+ config.attributes['connection'] = connection
+ alembic_api.stamp(config, ALEMBIC_INIT_VERSION)
+
+
+def _upgrade_alembic(engine, config, branch):
+ revision = 'heads'
+ if branch:
+ revision = f'{branch}@head'
+
+ # re-use the connection rather than creating a new one
+ with engine.begin() as connection:
+ config.attributes['connection'] = connection
+ alembic_api.upgrade(config, revision)
+
+
+def get_db_version(branch=EXPAND_BRANCH, *, engine=None):
+ config = _find_alembic_conf()
+
+ if engine is None:
+ with sql.session_for_read() as session:
+ engine = session.get_bind()
+
+ # discard the URL encoded in alembic.ini in favour of the URL
+ # configured for the engine by the database fixtures, casting from
+ # 'sqlalchemy.engine.url.URL' to str in the process. This returns a
+ # RFC-1738 quoted URL, which means that a password like "foo@" will be
+ # turned into "foo%40". This in turns causes a problem for
+ # set_main_option() because that uses ConfigParser.set, which (by
+ # design) uses *python* interpolation to write the string out ... where
+ # "%" is the special python interpolation character! Avoid this
+ # mismatch by quoting all %'s for the set below.
+ engine_url = str(engine.url).replace('%', '%%')
+ config.set_main_option('sqlalchemy.url', str(engine_url))
+
+ migrate_version = None
+ if _is_database_under_migrate_control(engine):
+ repository = _find_migrate_repo(branch)
+ migrate_version = migrate_api.db_version(engine, repository)
+
+ alembic_version = None
+ if _is_database_under_alembic_control(engine):
+ # we use '.get' since the particular branch might not have been created
+ alembic_version = _get_current_heads(engine, config).get(branch)
+
+ return alembic_version or migrate_version
+
+
+def _db_sync(branch=None, *, engine=None):
+ config = _find_alembic_conf()
+
+ if engine is None:
+ with sql.session_for_write() as session:
+ engine = session.get_bind()
+
+ # discard the URL encoded in alembic.ini in favour of the URL
+ # configured for the engine by the database fixtures, casting from
+ # 'sqlalchemy.engine.url.URL' to str in the process. This returns a
+ # RFC-1738 quoted URL, which means that a password like "foo@" will be
+ # turned into "foo%40". This in turns causes a problem for
+ # set_main_option() because that uses ConfigParser.set, which (by
+ # design) uses *python* interpolation to write the string out ... where
+ # "%" is the special python interpolation character! Avoid this
+ # mismatch by quoting all %'s for the set below.
+ engine_url = str(engine.url).replace('%', '%%')
+ config.set_main_option('sqlalchemy.url', str(engine_url))
+
+ # if we're in a deployment where sqlalchemy-migrate is already present,
+ # then apply all the updates for that and fake apply the initial
+ # alembic migration; if we're not then 'upgrade' will take care of
+ # everything this should be a one-time operation
+ if (
+ not _is_database_under_alembic_control(engine) and
+ _is_database_under_migrate_control(engine)
+ ):
+ _init_alembic_on_legacy_database(engine, config)
+
+ _upgrade_alembic(engine, config, branch)
-def _validate_upgrade_order(branch, target_repo_version=None):
- """Validate the state of the migration repositories.
+def _validate_upgrade_order(branch, *, engine=None):
+ """Validate the upgrade order of the migration branches.
This is run before allowing the db_sync command to execute. Ensure the
- upgrade step and version specified by the operator remains consistent with
- the upgrade process. I.e. expand's version is greater or equal to
- migrate's, migrate's version is greater or equal to contract's.
-
- :param branch: The name of the repository that the user is trying to
- upgrade.
- :param target_repo_version: The version to upgrade the repo. Otherwise, the
- version will be upgraded to the latest version
- available.
- """
- # Initialize a dict to have each key assigned a repo with their value being
- # the repo that comes before.
- db_sync_order = {
- DATA_MIGRATION_BRANCH: EXPAND_BRANCH,
- CONTRACT_BRANCH: DATA_MIGRATION_BRANCH,
- }
+ expand steps have been run before the contract steps.
+ :param branch: The name of the branch that the user is trying to
+ upgrade.
+ """
if branch == EXPAND_BRANCH:
return
- # find the latest version that the current command will upgrade to if there
- # wasn't a version specified for upgrade.
- if not target_repo_version:
- abs_path = _get_migrate_repo_path(branch)
- repo = _find_migrate_repo(abs_path)
- target_repo_version = int(repo.latest)
+ if branch == DATA_MIGRATION_BRANCH:
+ # this is a no-op in alembic land
+ return
- # get current version of the command that runs before the current command.
- dependency_repo_version = get_db_version(branch=db_sync_order[branch])
+ config = _find_alembic_conf()
- if dependency_repo_version < target_repo_version:
+ if engine is None:
+ with sql.session_for_read() as session:
+ engine = session.get_bind()
+
+ script = alembic_script.ScriptDirectory.from_config(config)
+ expand_head = None
+ for head in script.get_heads():
+ if EXPAND_BRANCH in script.get_revision(head).branch_labels:
+ expand_head = head
+ break
+
+ with engine.connect() as conn:
+ context = alembic_migration.MigrationContext.configure(conn)
+ current_heads = context.get_current_heads()
+
+ if expand_head not in current_heads:
raise db_exception.DBMigrationError(
- 'You are attempting to upgrade %s ahead of %s. Please refer to '
+ 'You are attempting to upgrade contract ahead of expand. '
+ 'Please refer to '
'https://docs.openstack.org/keystone/latest/admin/'
'identity-upgrading.html '
- 'to see the proper steps for rolling upgrades.' % (
- branch, db_sync_order[branch]))
+ 'to see the proper steps for rolling upgrades.'
+ )
-def expand_schema():
+def expand_schema(engine=None):
"""Expand the database schema ahead of data migration.
This is run manually by the keystone-manage command before the first
keystone node is migrated to the latest release.
"""
- _validate_upgrade_order(EXPAND_BRANCH)
- _db_sync(branch=EXPAND_BRANCH)
+ _validate_upgrade_order(EXPAND_BRANCH, engine=engine)
+ _db_sync(EXPAND_BRANCH, engine=engine)
-def migrate_data():
+def migrate_data(engine=None):
"""Migrate data to match the new schema.
This is run manually by the keystone-manage command once the keystone
schema has been expanded for the new release.
"""
- _validate_upgrade_order(DATA_MIGRATION_BRANCH)
- _db_sync(branch=DATA_MIGRATION_BRANCH)
+ print(
+ 'Data migrations are no longer supported with alembic. '
+ 'This is now a no-op.'
+ )
-def contract_schema():
+def contract_schema(engine=None):
"""Contract the database.
This is run manually by the keystone-manage command once the keystone
nodes have been upgraded to the latest release and will remove any old
tables/columns that are no longer required.
"""
- _validate_upgrade_order(CONTRACT_BRANCH)
- _db_sync(branch=CONTRACT_BRANCH)
+ _validate_upgrade_order(CONTRACT_BRANCH, engine=engine)
+ _db_sync(CONTRACT_BRANCH, engine=engine)
-def offline_sync_database_to_version(version=None):
+def offline_sync_database_to_version(version=None, *, engine=None):
"""Perform and off-line sync of the database.
Migrate the database up to the latest version, doing the equivalent of
@@ -252,6 +345,4 @@ def offline_sync_database_to_version(version=None):
if version:
raise Exception('Specifying a version is no longer supported')
- expand_schema()
- migrate_data()
- contract_schema()
+ _db_sync(engine=engine)
diff --git a/keystone/locale/de/LC_MESSAGES/keystone.po b/keystone/locale/de/LC_MESSAGES/keystone.po
index 8c3b16303..a126f83dc 100644
--- a/keystone/locale/de/LC_MESSAGES/keystone.po
+++ b/keystone/locale/de/LC_MESSAGES/keystone.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 10:31+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -806,9 +806,6 @@ msgstr ""
"Region %(region_id)s kann nicht gelöscht werden, da sie oder ihr "
"untergeordnete Regionen über zugeordnete Endpunkte verfügen. "
-msgid "Unable to downgrade schema"
-msgstr "Das Schema konnte nicht herabgestuft werden."
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Domänenkonfigurationsverzeichnis wurde nicht gefunden: %s"
diff --git a/keystone/locale/en_GB/LC_MESSAGES/keystone.po b/keystone/locale/en_GB/LC_MESSAGES/keystone.po
index 191ed5596..aa775b0d7 100644
--- a/keystone/locale/en_GB/LC_MESSAGES/keystone.po
+++ b/keystone/locale/en_GB/LC_MESSAGES/keystone.po
@@ -8,15 +8,16 @@
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2019. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2021-01-08 19:57+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-10-28 02:12+0000\n"
+"PO-Revision-Date: 2022-05-25 08:57+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language: en_GB\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
@@ -134,15 +135,6 @@ msgstr "Access token not found"
msgid "Additional authentications steps required."
msgstr "Additional authentications steps required."
-msgid ""
-"All extensions have been moved into keystone core and as such its migrations "
-"are maintained by the main keystone database control. Use the command: "
-"keystone-manage db_sync"
-msgstr ""
-"All extensions have been moved into Keystone core and as such its migrations "
-"are maintained by the main Keystone database control. Use the command: "
-"keystone-manage db_sync"
-
msgid "An unexpected error occurred when retrieving domain configs"
msgstr "An unexpected error occurred when retrieving domain configs"
@@ -1205,6 +1197,13 @@ msgstr "The action you have requested has not been implemented."
msgid "The authenticated user should match the trustor"
msgstr "The authenticated user should match the trustor"
+msgid ""
+"The database is not under version control, but has tables. Please stamp the "
+"current version of the schema manually."
+msgstr ""
+"The database is not under version control but has tables. Please stamp the "
+"current version of the schema manually."
+
#, python-format
msgid ""
"The given operator %(_op)s is not valid. It must be one of the following: "
@@ -1406,9 +1405,6 @@ msgstr ""
"Unable to delete region %(region_id)s because it or its child regions have "
"associated endpoints."
-msgid "Unable to downgrade schema"
-msgstr "Unable to downgrade schema"
-
#, python-format
msgid "Unable to establish a connection to LDAP Server (%(url)s)."
msgstr "Unable to establish a connection to LDAP Server (%(url)s)."
@@ -1760,3 +1756,6 @@ msgstr "tls_cacertdir %s not found or is not a directory"
#, python-format
msgid "tls_cacertfile %s not found or is not a file"
msgstr "tls_cacertfile %s not found or is not a file"
+
+msgid "version should be an integer"
+msgstr "version should be an integer"
diff --git a/keystone/locale/es/LC_MESSAGES/keystone.po b/keystone/locale/es/LC_MESSAGES/keystone.po
index 6bce54265..d585f728a 100644
--- a/keystone/locale/es/LC_MESSAGES/keystone.po
+++ b/keystone/locale/es/LC_MESSAGES/keystone.po
@@ -15,7 +15,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -748,9 +748,6 @@ msgstr ""
"No se puede suprimir la región %(region_id)s porque sus regiones secundarias "
"tienen puntos finales asociados."
-msgid "Unable to downgrade schema"
-msgstr "No se ha podido degradar el esquema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "No se ha podido localizar el directorio config de dominio: %s"
diff --git a/keystone/locale/fr/LC_MESSAGES/keystone.po b/keystone/locale/fr/LC_MESSAGES/keystone.po
index 66540fd97..6d69341b5 100644
--- a/keystone/locale/fr/LC_MESSAGES/keystone.po
+++ b/keystone/locale/fr/LC_MESSAGES/keystone.po
@@ -14,7 +14,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -741,9 +741,6 @@ msgstr ""
"Impossible de supprimer la région %(region_id)s car la région ou ses régions "
"enfant ont des noeuds finals associés."
-msgid "Unable to downgrade schema"
-msgstr "Impossible de rétrograder le schéma"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossible de localiser le répertoire de configuration domaine: %s"
diff --git a/keystone/locale/it/LC_MESSAGES/keystone.po b/keystone/locale/it/LC_MESSAGES/keystone.po
index 2bc580c20..c9384b0b8 100644
--- a/keystone/locale/it/LC_MESSAGES/keystone.po
+++ b/keystone/locale/it/LC_MESSAGES/keystone.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -740,9 +740,6 @@ msgstr ""
"Impossibile eliminare la regione %(region_id)s perché la regione o le "
"relative regioni child hanno degli endpoint associati."
-msgid "Unable to downgrade schema"
-msgstr "Impossibile eseguire il downgrade dello schema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossibile individuare la directory config del dominio: %s"
diff --git a/keystone/locale/ja/LC_MESSAGES/keystone.po b/keystone/locale/ja/LC_MESSAGES/keystone.po
index 433c673b8..e62f4f492 100644
--- a/keystone/locale/ja/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ja/LC_MESSAGES/keystone.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -729,9 +729,6 @@ msgstr ""
"リージョン %(region_id)s またはその子リージョンがエンドポイントに関連付けられ"
"ているため、このリージョンを削除できません。"
-msgid "Unable to downgrade schema"
-msgstr "スキーマをダウングレードすることができません"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "ドメイン設定ディレクトリーが見つかりません: %s"
diff --git a/keystone/locale/ko_KR/LC_MESSAGES/keystone.po b/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
index 102b67fa6..8c278558c 100644
--- a/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -712,9 +712,6 @@ msgstr ""
"리젼 %(region_id)s 또는 하위 리젼에 연관된 엔드포인트가 있어 삭제할 수 없습니"
"다."
-msgid "Unable to downgrade schema"
-msgstr "스키마를 다운그레이드할 수 없음"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "%s: 도메인 설정 디렉토리를 찾을 수 없습니다."
diff --git a/keystone/locale/pt_BR/LC_MESSAGES/keystone.po b/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
index 853478f93..7516816b7 100644
--- a/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
+++ b/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -728,9 +728,6 @@ msgstr ""
"Não foi possível excluir a região %(region_id)s, uma vez que ela ou suas "
"regiões filhas possuem terminais associados."
-msgid "Unable to downgrade schema"
-msgstr "Não é possível fazer downgrade do esquema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Não é possível localizar diretório de configuração de domínio: %s"
diff --git a/keystone/locale/ru/LC_MESSAGES/keystone.po b/keystone/locale/ru/LC_MESSAGES/keystone.po
index 542b138f2..56e50d9c0 100644
--- a/keystone/locale/ru/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ru/LC_MESSAGES/keystone.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -723,9 +723,6 @@ msgstr ""
"Не удалось удалить регион %(region_id)s: регион или его дочерние регионы "
"имеют связанные конечные точки."
-msgid "Unable to downgrade schema"
-msgstr "Не удается понизить версию схемы"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Не удалось найти каталог конфигурации домена: %s"
diff --git a/keystone/locale/zh_CN/LC_MESSAGES/keystone.po b/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
index 27b9c6f4e..cb194dc71 100644
--- a/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
+++ b/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
@@ -11,16 +11,18 @@
# 颜海峰 <yanheven@gmail.com>, 2014
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Eric Lei <1165970798@qq.com>, 2016. #zanata
+# Research and Development Center UnitedStack <dev@unitedstack.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-07-01 18:11+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2016-09-28 03:23+0000\n"
-"Last-Translator: Eric Lei <1165970798@qq.com>\n"
+"PO-Revision-Date: 2022-06-14 12:29+0000\n"
+"Last-Translator: Research and Development Center UnitedStack "
+"<dev@unitedstack.com>\n"
"Language: zh_CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@@ -654,9 +656,6 @@ msgid ""
"associated endpoints."
msgstr "无法删除区域 %(region_id)s,因为它或它的子区域具有关联的端点。"
-msgid "Unable to downgrade schema"
-msgstr "无法对模式进行降级"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "找不到指定的域配置目录:%s"
@@ -732,6 +731,14 @@ msgstr "用户类型 %s 不受支持"
msgid "You are not authorized to perform the requested action."
msgstr "您没有授权完成所请求的操作。"
+msgid ""
+"You cannot change your password at this time due to password policy "
+"disallowing password changes. Please contact your administrator to reset "
+"your password."
+msgstr ""
+"因为密码策略被设置为禁止修改密码,目前您不能更改密码。请联系管理员重置您的密"
+"码。"
+
#, python-format
msgid ""
"You cannot change your password at this time due to the minimum password "
@@ -740,7 +747,7 @@ msgid ""
"contact your administrator to reset your password."
msgstr ""
"没有达到密码最小使用时长,目前您不能更改密码。一旦您修改了密码,在下次可被修"
-"改前该密码必须使用%(min_age_days)d天.请在%(days_left)d天后重试,或者联系管理"
+"改前该密码必须使用%(min_age_days)d天。请在%(days_left)d天后重试,或者联系管理"
"员重置您的密码。"
msgid ""
diff --git a/keystone/locale/zh_TW/LC_MESSAGES/keystone.po b/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
index 4d0399c9d..4529c4bc0 100644
--- a/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
+++ b/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -638,9 +638,6 @@ msgid ""
"associated endpoints."
msgstr "無法刪除區域 %(region_id)s,因為此區域或其子區域具有相關聯的端點。"
-msgid "Unable to downgrade schema"
-msgstr "無法將綱目降級"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "找不到網域配置目錄:%s"
diff --git a/keystone/server/flask/application.py b/keystone/server/flask/application.py
index bb572fde6..5c4c0b065 100644
--- a/keystone/server/flask/application.py
+++ b/keystone/server/flask/application.py
@@ -83,10 +83,8 @@ def _handle_keystone_exception(error):
LOG.warning(
"Authorization failed. %(exception)s from %(remote_addr)s",
{'exception': error, 'remote_addr': flask.request.remote_addr})
- elif isinstance(error, exception.UnexpectedError):
- LOG.exception(str(error))
else:
- LOG.warning(str(error))
+ LOG.exception(str(error))
# Render the exception to something user "friendly"
error_message = error.args[0]
diff --git a/keystone/tests/unit/base_classes.py b/keystone/tests/unit/base_classes.py
index 95bf7fa02..9bf3b50eb 100644
--- a/keystone/tests/unit/base_classes.py
+++ b/keystone/tests/unit/base_classes.py
@@ -31,7 +31,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
Re-implementation of TestCase that doesn't load a bunch of fixtures by
hand and instead uses the bootstrap process. This makes it so that our base
tests have the same things available to us as operators after they run
- boostrap. It also makes our tests DRY and pushes setup required for
+ bootstrap. It also makes our tests DRY and pushes setup required for
specific tests into the actual test class, instead of pushing it into a
generic structure that gets loaded for every test.
@@ -46,7 +46,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
- CONF.fernet_tokens.max_active_keys
+ CONF.fernet_tokens.max_active_keys,
)
)
@@ -54,7 +54,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_receipts',
- CONF.fernet_receipts.max_active_keys
+ CONF.fernet_receipts.max_active_keys,
)
)
@@ -72,7 +72,8 @@ class TestCaseWithBootstrap(core.BaseTestCase):
try:
PROVIDERS.resource_api.create_domain(
default_fixtures.ROOT_DOMAIN['id'],
- default_fixtures.ROOT_DOMAIN)
+ default_fixtures.ROOT_DOMAIN,
+ )
except exception.Conflict:
pass
diff --git a/keystone/tests/unit/common/sql/__init__.py b/keystone/tests/unit/common/sql/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/tests/unit/common/sql/__init__.py
diff --git a/keystone/tests/unit/common/sql/test_upgrades.py b/keystone/tests/unit/common/sql/test_upgrades.py
index c6c4a2e56..bb53cbd23 100644
--- a/keystone/tests/unit/common/sql/test_upgrades.py
+++ b/keystone/tests/unit/common/sql/test_upgrades.py
@@ -10,243 +10,331 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-import tempfile
-from unittest import mock
+"""Tests for database migrations for the database.
-from migrate import exceptions as migrate_exception
+These are "opportunistic" tests which allow testing against all three databases
+(sqlite in memory, mysql, pg) in a properly configured unit test environment.
+
+For the opportunistic testing you need to set up DBs named 'openstack_citest'
+with user 'openstack_citest' and password 'openstack_citest' on localhost. The
+test will then use that DB and username/password combo to run the tests.
+"""
+
+import fixtures
from migrate.versioning import api as migrate_api
-from migrate.versioning import repository as migrate_repository
-from oslo_db import exception as db_exception
+from oslo_db import options as db_options
from oslo_db.sqlalchemy import enginefacade
-from oslo_db.sqlalchemy import test_fixtures as db_fixtures
-from oslotest import base as test_base
-import sqlalchemy
+from oslo_db.sqlalchemy import test_fixtures
+from oslo_db.sqlalchemy import test_migrations
+from oslo_log.fixture import logging_error as log_fixture
+from oslo_log import log as logging
+from oslotest import base
+from keystone.common import sql
from keystone.common.sql import upgrades
-from keystone.common import utils
+import keystone.conf
+from keystone.tests.unit import ksfixtures
+
+# We need to import all of these so the tables are registered. It would be
+# easier if these were all in a central location :(
+import keystone.application_credential.backends.sql # noqa: F401
+import keystone.assignment.backends.sql # noqa: F401
+import keystone.assignment.role_backends.sql_model # noqa: F401
+import keystone.catalog.backends.sql # noqa: F401
+import keystone.credential.backends.sql # noqa: F401
+import keystone.endpoint_policy.backends.sql # noqa: F401
+import keystone.federation.backends.sql # noqa: F401
+import keystone.identity.backends.sql_model # noqa: F401
+import keystone.identity.mapping_backends.sql # noqa: F401
+import keystone.limit.backends.sql # noqa: F401
+import keystone.oauth1.backends.sql # noqa: F401
+import keystone.policy.backends.sql # noqa: F401
+import keystone.resource.backends.sql_model # noqa: F401
+import keystone.resource.config_backends.sql # noqa: F401
+import keystone.revoke.backends.sql # noqa: F401
+import keystone.trust.backends.sql # noqa: F401
+
+CONF = keystone.conf.CONF
+LOG = logging.getLogger(__name__)
+
+
+class KeystoneModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
+ """Test sqlalchemy-migrate migrations."""
+
+ # Migrations can take a long time, particularly on underpowered CI nodes.
+ # Give them some breathing room.
+ TIMEOUT_SCALING_FACTOR = 4
+ def setUp(self):
+ # Ensure BaseTestCase's ConfigureLogging fixture is disabled since
+ # we're using our own (StandardLogging).
+ with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'):
+ super().setUp()
-class TestMigrationCommon(
- db_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase,
+ self.useFixture(log_fixture.get_logging_handle_error_fixture())
+ self.useFixture(ksfixtures.WarningsFixture())
+ self.useFixture(ksfixtures.StandardLogging())
+
+ self.engine = enginefacade.writer.get_engine()
+
+ # Configure our connection string in CONF and enable SQLite fkeys
+ db_options.set_defaults(CONF, connection=self.engine.url)
+
+ # TODO(stephenfin): Do we need this? I suspect not since we're using
+ # enginefacade.write.get_engine() directly above
+ # Override keystone's context manager to be oslo.db's global context
+ # manager.
+ sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True
+ self.addCleanup(setattr,
+ sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False)
+ self.addCleanup(sql.cleanup)
+
+ def db_sync(self, engine):
+ upgrades.offline_sync_database_to_version(engine=engine)
+
+ def get_engine(self):
+ return self.engine
+
+ def get_metadata(self):
+ return sql.ModelBase.metadata
+
+ def include_object(self, object_, name, type_, reflected, compare_to):
+ if type_ == 'table':
+ # migrate_version is a sqlalchemy-migrate control table and
+ # isn't included in the models
+ if name == 'migrate_version':
+ return False
+
+ # This is created in tests and isn't a "real" table
+ if name == 'test_table':
+ return False
+
+ # FIXME(stephenfin): This was dropped in commit 93aff6e42 but the
+ # migrations were never adjusted
+ if name == 'token':
+ return False
+
+ return True
+
+ def filter_metadata_diff(self, diff):
+ """Filter changes before assert in test_models_sync().
+
+ :param diff: a list of differences (see `compare_metadata()` docs for
+ details on format)
+ :returns: a list of differences
+ """
+ new_diff = []
+ for element in diff:
+ # The modify_foo elements are lists; everything else is a tuple
+ if isinstance(element, list):
+ if element[0][0] == 'modify_nullable':
+ if (element[0][2], element[0][3]) in (
+ ('credential', 'encrypted_blob'),
+ ('credential', 'key_hash'),
+ ('federated_user', 'user_id'),
+ ('federated_user', 'idp_id'),
+ ('local_user', 'user_id'),
+ ('nonlocal_user', 'user_id'),
+ ('password', 'local_user_id'),
+ ):
+ continue # skip
+
+ if element[0][0] == 'modify_default':
+ if (element[0][2], element[0][3]) in (
+ ('password', 'created_at_int'),
+ ('password', 'self_service'),
+ ('project', 'is_domain'),
+ ('service_provider', 'relay_state_prefix'),
+ ):
+ continue # skip
+ else:
+ if element[0] == 'add_constraint':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('project_tag', ['project_id', 'name']),
+ (
+ 'trust',
+ [
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ ],
+ ),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These have a different name on PostgreSQL.
+ # Resolve by renaming the constraint on the models.
+ if element[0] == 'remove_constraint':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('access_rule', ['external_id']),
+ (
+ 'trust',
+ [
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ 'expires_at_int',
+ ],
+ ),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These indexes are present in the
+ # migrations but not on the equivalent models. Resolve by
+ # updating the models.
+ if element[0] == 'add_index':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('access_rule', ['external_id']),
+ ('access_rule', ['user_id']),
+ ('revocation_event', ['revoked_at']),
+ ('system_assignment', ['actor_id']),
+ ('user', ['default_project_id']),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These indexes are present on the models
+ # but not in the migrations. Resolve by either removing from
+ # the models or adding new migrations.
+ if element[0] == 'remove_index':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('access_rule', ['external_id']),
+ ('access_rule', ['user_id']),
+ ('access_token', ['consumer_id']),
+ ('endpoint', ['service_id']),
+ ('revocation_event', ['revoked_at']),
+ ('user', ['default_project_id']),
+ ('user_group_membership', ['group_id']),
+ (
+ 'trust',
+ [
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ 'expires_at_int',
+ ],
+ ),
+ (),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These fks are present in the
+ # migrations but not on the equivalent models. Resolve by
+ # updating the models.
+ if element[0] == 'add_fk':
+ if (element[1].table.name, element[1].column_keys) in (
+ (
+ 'application_credential_access_rule',
+ ['access_rule_id'],
+ ),
+ ('limit', ['registered_limit_id']),
+ ('registered_limit', ['service_id']),
+ ('registered_limit', ['region_id']),
+ ('endpoint', ['region_id']),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These indexes are present on the models
+ # but not in the migrations. Resolve by either removing from
+ # the models or adding new migrations.
+ if element[0] == 'remove_fk':
+ if (element[1].table.name, element[1].column_keys) in (
+ (
+ 'application_credential_access_rule',
+ ['access_rule_id'],
+ ),
+ ('endpoint', ['region_id']),
+ ('assignment', ['role_id']),
+ ):
+ continue # skip
+
+ new_diff.append(element)
+
+ return new_diff
+
+
+class TestModelsSyncSQLite(
+ KeystoneModelsMigrationsSync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
):
+ pass
- def setUp(self):
- super().setUp()
- self.engine = enginefacade.writer.get_engine()
+class TestModelsSyncMySQL(
+ KeystoneModelsMigrationsSync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.MySQLOpportunisticFixture
- self.path = tempfile.mkdtemp('test_migration')
- self.path1 = tempfile.mkdtemp('test_migration')
- self.return_value = '/home/openstack/migrations'
- self.return_value1 = '/home/extension/migrations'
- self.init_version = 1
- self.test_version = 123
-
- self.patcher_repo = mock.patch.object(migrate_repository, 'Repository')
- self.repository = self.patcher_repo.start()
- self.repository.side_effect = [self.return_value, self.return_value1]
-
- self.mock_api_db = mock.patch.object(migrate_api, 'db_version')
- self.mock_api_db_version = self.mock_api_db.start()
- self.mock_api_db_version.return_value = self.test_version
-
- def tearDown(self):
- os.rmdir(self.path)
- self.mock_api_db.stop()
- self.patcher_repo.stop()
- super().tearDown()
-
- def test_find_migrate_repo_path_not_found(self):
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._find_migrate_repo,
- "/foo/bar/",
- )
-
- def test_find_migrate_repo_called_once(self):
- my_repository = upgrades._find_migrate_repo(self.path)
- self.repository.assert_called_once_with(self.path)
- self.assertEqual(self.return_value, my_repository)
-
- def test_find_migrate_repo_called_few_times(self):
- repo1 = upgrades._find_migrate_repo(self.path)
- repo2 = upgrades._find_migrate_repo(self.path1)
- self.assertNotEqual(repo1, repo2)
-
- def test_db_version_control(self):
- with utils.nested_contexts(
- mock.patch.object(upgrades, '_find_migrate_repo'),
- mock.patch.object(migrate_api, 'version_control'),
- ) as (mock_find_repo, mock_version_control):
- mock_find_repo.return_value = self.return_value
-
- version = upgrades._migrate_db_version_control(
- self.engine, self.path, self.test_version)
-
- self.assertEqual(self.test_version, version)
- mock_version_control.assert_called_once_with(
- self.engine, self.return_value, self.test_version)
-
- @mock.patch.object(upgrades, '_find_migrate_repo')
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_control_version_less_than_actual_version(
- self, mock_version_control, mock_find_repo,
- ):
- mock_find_repo.return_value = self.return_value
- mock_version_control.side_effect = \
- migrate_exception.DatabaseAlreadyControlledError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_version_control, self.engine,
- self.path, self.test_version - 1)
-
- @mock.patch.object(upgrades, '_find_migrate_repo')
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_control_version_greater_than_actual_version(
- self, mock_version_control, mock_find_repo,
- ):
- mock_find_repo.return_value = self.return_value
- mock_version_control.side_effect = \
- migrate_exception.InvalidVersionError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_version_control, self.engine,
- self.path, self.test_version + 1)
-
- def test_db_version_return(self):
- ret_val = upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
- self.assertEqual(self.test_version, ret_val)
-
- def test_db_version_raise_not_controlled_error_first(self):
- with mock.patch.object(
- upgrades, '_migrate_db_version_control',
- ) as mock_ver:
- self.mock_api_db_version.side_effect = [
- migrate_exception.DatabaseNotControlledError('oups'),
- self.test_version]
-
- ret_val = upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
- self.assertEqual(self.test_version, ret_val)
- mock_ver.assert_called_once_with(
- self.engine, self.path, version=self.init_version)
-
- def test_db_version_raise_not_controlled_error_tables(self):
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = \
- migrate_exception.DatabaseNotControlledError('oups')
- my_meta = mock.MagicMock()
- my_meta.tables = {'a': 1, 'b': 2}
- mock_meta.return_value = my_meta
-
- self.assertRaises(
- db_exception.DBMigrationError, upgrades._migrate_db_version,
- self.engine, self.path, self.init_version)
-
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_raise_not_controlled_error_no_tables(self, mock_vc):
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = (
- migrate_exception.DatabaseNotControlledError('oups'),
- self.init_version)
- my_meta = mock.MagicMock()
- my_meta.tables = {}
- mock_meta.return_value = my_meta
-
- upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
-
- mock_vc.assert_called_once_with(
- self.engine, self.return_value1, self.init_version)
-
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_raise_not_controlled_alembic_tables(self, mock_vc):
- # When there are tables but the alembic control table
- # (alembic_version) is present, attempt to version the db.
- # This simulates the case where there is are multiple repos (different
- # abs_paths) and a different path has been versioned already.
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = [
- migrate_exception.DatabaseNotControlledError('oups'), None]
- my_meta = mock.MagicMock()
- my_meta.tables = {'alembic_version': 1, 'b': 2}
- mock_meta.return_value = my_meta
-
- upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
-
- mock_vc.assert_called_once_with(
- self.engine, self.return_value1, self.init_version)
-
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_raise_not_controlled_migrate_tables(self, mock_vc):
- # When there are tables but the sqlalchemy-migrate control table
- # (migrate_version) is present, attempt to version the db.
- # This simulates the case where there is are multiple repos (different
- # abs_paths) and a different path has been versioned already.
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = [
- migrate_exception.DatabaseNotControlledError('oups'), None]
- my_meta = mock.MagicMock()
- my_meta.tables = {'migrate_version': 1, 'b': 2}
- mock_meta.return_value = my_meta
-
- upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
-
- mock_vc.assert_called_once_with(
- self.engine, self.return_value1, self.init_version)
-
- def test_db_sync_wrong_version(self):
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_sync, self.engine, self.path, 'foo')
-
- @mock.patch.object(migrate_api, 'upgrade')
- def test_db_sync_script_not_present(self, upgrade):
- # For non existent upgrades script file sqlalchemy-migrate will raise
- # VersionNotFoundError which will be wrapped in DBMigrationError.
- upgrade.side_effect = migrate_exception.VersionNotFoundError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_sync, self.engine, self.path,
- self.test_version + 1)
-
- @mock.patch.object(migrate_api, 'upgrade')
- def test_db_sync_known_error_raised(self, upgrade):
- upgrade.side_effect = migrate_exception.KnownError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_sync, self.engine, self.path,
- self.test_version + 1)
-
- def test_db_sync_upgrade(self):
- init_ver = 55
- with utils.nested_contexts(
- mock.patch.object(upgrades, '_find_migrate_repo'),
- mock.patch.object(migrate_api, 'upgrade')
- ) as (mock_find_repo, mock_upgrade):
- mock_find_repo.return_value = self.return_value
- self.mock_api_db_version.return_value = self.test_version - 1
-
- upgrades._migrate_db_sync(
- self.engine, self.path, self.test_version, init_ver)
-
- mock_upgrade.assert_called_once_with(
- self.engine, self.return_value, self.test_version)
-
- def test_db_sync_downgrade(self):
- with utils.nested_contexts(
- mock.patch.object(upgrades, '_find_migrate_repo'),
- mock.patch.object(migrate_api, 'downgrade')
- ) as (mock_find_repo, mock_downgrade):
- mock_find_repo.return_value = self.return_value
- self.mock_api_db_version.return_value = self.test_version + 1
-
- upgrades._migrate_db_sync(
- self.engine, self.path, self.test_version)
-
- mock_downgrade.assert_called_once_with(
- self.engine, self.return_value, self.test_version)
+
+class TestModelsSyncPostgreSQL(
+ KeystoneModelsMigrationsSync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
+
+
+class KeystoneModelsMigrationsLegacySync(KeystoneModelsMigrationsSync):
+ """Test that the models match the database after old migrations are run."""
+
+ def db_sync(self, engine):
+ # the 'upgrades._db_sync' method will not use the legacy
+ # sqlalchemy-migrate-based migration flow unless the database is
+ # already controlled with sqlalchemy-migrate, so we need to manually
+ # enable version controlling with this tool to test this code path
+ for branch in (
+ upgrades.EXPAND_BRANCH,
+ upgrades.DATA_MIGRATION_BRANCH,
+ upgrades.CONTRACT_BRANCH,
+ ):
+ repository = upgrades._find_migrate_repo(branch)
+ migrate_api.version_control(
+ engine, repository, upgrades.MIGRATE_INIT_VERSION)
+
+ # now we can apply migrations as expected and the legacy path will be
+ # followed
+ super().db_sync(engine)
+
+
+class TestModelsLegacySyncSQLite(
+ KeystoneModelsMigrationsLegacySync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ pass
+
+
+class TestModelsLegacySyncMySQL(
+ KeystoneModelsMigrationsLegacySync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.MySQLOpportunisticFixture
+
+
+class TestModelsLegacySyncPostgreSQL(
+ KeystoneModelsMigrationsLegacySync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
diff --git a/keystone/tests/unit/ksfixtures/warnings.py b/keystone/tests/unit/ksfixtures/warnings.py
index 9e3a9c4d4..43519925f 100644
--- a/keystone/tests/unit/ksfixtures/warnings.py
+++ b/keystone/tests/unit/ksfixtures/warnings.py
@@ -35,6 +35,23 @@ class WarningsFixture(fixtures.Fixture):
module='^keystone\\.',
)
+ warnings.filterwarnings(
+ 'ignore',
+ message=(
+ 'Policy enforcement is depending on the value of '
+ '(token|group_ids). '
+ 'This key is deprecated. Please update your policy '
+ 'file to use the standard policy values.'
+ ),
+ )
+
+ # NOTE(stephenfin): Ignore scope check UserWarnings from oslo.policy.
+ warnings.filterwarnings(
+ 'ignore',
+ message="Policy .* failed scope check",
+ category=UserWarning,
+ )
+
# TODO(stephenfin): This will be fixed once we drop sqlalchemy-migrate
warnings.filterwarnings(
'ignore',
diff --git a/keystone/tests/unit/test_cli.py b/keystone/tests/unit/test_cli.py
index c94d8c196..2f9bed064 100644
--- a/keystone/tests/unit/test_cli.py
+++ b/keystone/tests/unit/test_cli.py
@@ -754,18 +754,28 @@ class CliDBSyncTestCase(unit.BaseTestCase):
self.version = None
def setUp(self):
- super(CliDBSyncTestCase, self).setUp()
+ super().setUp()
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
- upgrades.offline_sync_database_to_version = mock.Mock()
- upgrades.expand_schema = mock.Mock()
- upgrades.migrate_data = mock.Mock()
- upgrades.contract_schema = mock.Mock()
+
+ self.patchers = patchers = [
+ mock.patch.object(upgrades, "offline_sync_database_to_version"),
+ mock.patch.object(upgrades, "expand_schema"),
+ mock.patch.object(upgrades, "migrate_data"),
+ mock.patch.object(upgrades, "contract_schema"),
+ ]
+ for p in patchers:
+ p.start()
self.command_check = False
self.command_expand = False
self.command_migrate = False
self.command_contract = False
+ def tearDown(self):
+ for p in self.patchers:
+ p.stop()
+ super().tearDown()
+
def _assert_correct_call(self, mocked_function):
for func in [upgrades.offline_sync_database_to_version,
upgrades.expand_schema,
diff --git a/keystone/tests/unit/test_sql_banned_operations.py b/keystone/tests/unit/test_sql_banned_operations.py
index 2a9be1029..95ba2368d 100644
--- a/keystone/tests/unit/test_sql_banned_operations.py
+++ b/keystone/tests/unit/test_sql_banned_operations.py
@@ -1,10 +1,8 @@
-# Copyright 2016 Intel Corporation
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -14,20 +12,38 @@
import os
+from alembic import command as alembic_api
+from alembic import script as alembic_script
import fixtures
-from migrate.versioning import api as versioning_api
-from migrate.versioning import repository
from oslo_db.sqlalchemy import enginefacade
-from oslo_db.sqlalchemy import test_fixtures as db_fixtures
-from oslo_db.sqlalchemy import test_migrations
-from oslotest import base as test_base
-import sqlalchemy
-import testtools
-
-from keystone.common.sql.legacy_migrations import contract_repo
-from keystone.common.sql.legacy_migrations import data_migration_repo
-from keystone.common.sql.legacy_migrations import expand_repo
+from oslo_db.sqlalchemy import test_fixtures
+from oslo_log import log as logging
+
+from keystone.common import sql
from keystone.common.sql import upgrades
+import keystone.conf
+from keystone.tests import unit
+
+# We need to import all of these so the tables are registered. It would be
+# easier if these were all in a central location :(
+import keystone.application_credential.backends.sql # noqa: F401
+import keystone.assignment.backends.sql # noqa: F401
+import keystone.assignment.role_backends.sql_model # noqa: F401
+import keystone.catalog.backends.sql # noqa: F401
+import keystone.credential.backends.sql # noqa: F401
+import keystone.endpoint_policy.backends.sql # noqa: F401
+import keystone.federation.backends.sql # noqa: F401
+import keystone.identity.backends.sql_model # noqa: F401
+import keystone.identity.mapping_backends.sql # noqa: F401
+import keystone.limit.backends.sql # noqa: F401
+import keystone.oauth1.backends.sql # noqa: F401
+import keystone.policy.backends.sql # noqa: F401
+import keystone.resource.backends.sql_model # noqa: F401
+import keystone.resource.config_backends.sql # noqa: F401
+import keystone.revoke.backends.sql # noqa: F401
+import keystone.trust.backends.sql # noqa: F401
+
+LOG = logging.getLogger(__name__)
class DBOperationNotAllowed(Exception):
@@ -37,322 +53,228 @@ class DBOperationNotAllowed(Exception):
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations."""
- def __init__(self, banned_ops, migration_repo):
+ def __init__(self, banned_ops, revision):
super().__init__()
self._banned_ops = banned_ops or {}
- self._migration_repo = migration_repo
+ self._revision = revision
@staticmethod
- def _explode(resource_op, repo):
- # Extract the repo name prior to the trailing '/__init__.py'
- repo_name = repo.split('/')[-2]
- raise DBOperationNotAllowed(
- 'Operation %s() is not allowed in %s database migrations' % (
- resource_op, repo_name))
+ def _explode(op, revision):
+ msg = "Operation '%s' is not allowed in migration %s"
+ raise DBOperationNotAllowed(msg % (op, revision))
def setUp(self):
super().setUp()
explode_lambda = {
- 'Table.create': lambda *a, **k: self._explode(
- 'Table.create', self._migration_repo),
- 'Table.alter': lambda *a, **k: self._explode(
- 'Table.alter', self._migration_repo),
- 'Table.drop': lambda *a, **k: self._explode(
- 'Table.drop', self._migration_repo),
- 'Table.insert': lambda *a, **k: self._explode(
- 'Table.insert', self._migration_repo),
- 'Table.update': lambda *a, **k: self._explode(
- 'Table.update', self._migration_repo),
- 'Table.delete': lambda *a, **k: self._explode(
- 'Table.delete', self._migration_repo),
- 'Column.create': lambda *a, **k: self._explode(
- 'Column.create', self._migration_repo),
- 'Column.alter': lambda *a, **k: self._explode(
- 'Column.alter', self._migration_repo),
- 'Column.drop': lambda *a, **k: self._explode(
- 'Column.drop', self._migration_repo)
+ x: lambda *a, **k: self._explode(x, self._revision)
+ for x in [
+ 'add_column',
+ 'alter_column',
+ 'batch_alter_table',
+ 'bulk_insert',
+ 'create_check_constraint',
+ 'create_exclude_constraint',
+ 'create_foreign_key',
+ 'create_index',
+ 'create_primary_key',
+ 'create_table',
+ 'create_table_comment',
+ 'create_unique_constraint',
+ 'drop_column',
+ 'drop_constraint',
+ 'drop_index',
+ 'drop_table',
+ 'drop_table_comment',
+ # 'execute',
+ 'rename_table',
+ ]
}
- for resource in self._banned_ops:
- for op in self._banned_ops[resource]:
- resource_op = '%(resource)s.%(op)s' % {
- 'resource': resource, 'op': op}
- self.useFixture(fixtures.MonkeyPatch(
- 'sqlalchemy.%s' % resource_op,
- explode_lambda[resource_op]))
-
-
-class TestBannedDBSchemaOperations(testtools.TestCase):
- """Test the BannedDBSchemaOperations fixture."""
-
- def test_column(self):
- """Test column operations raise DBOperationNotAllowed."""
- column = sqlalchemy.Column()
- with BannedDBSchemaOperations(
- banned_ops={'Column': ['create', 'alter', 'drop']},
- migration_repo=expand_repo.__file__,
- ):
- self.assertRaises(DBOperationNotAllowed, column.drop)
- self.assertRaises(DBOperationNotAllowed, column.alter)
- self.assertRaises(DBOperationNotAllowed, column.create)
-
- def test_table(self):
- """Test table operations raise DBOperationNotAllowed."""
- table = sqlalchemy.Table()
- with BannedDBSchemaOperations(
- banned_ops={'Table': ['create', 'alter', 'drop',
- 'insert', 'update', 'delete']},
- migration_repo=expand_repo.__file__,
- ):
- self.assertRaises(DBOperationNotAllowed, table.drop)
- self.assertRaises(DBOperationNotAllowed, table.alter)
- self.assertRaises(DBOperationNotAllowed, table.create)
- self.assertRaises(DBOperationNotAllowed, table.insert)
- self.assertRaises(DBOperationNotAllowed, table.update)
- self.assertRaises(DBOperationNotAllowed, table.delete)
-
-
-class KeystoneMigrationsCheckers(test_migrations.WalkVersionsMixin):
- """Walk over and test all sqlalchemy-migrate migrations."""
-
- migrate_file = None
- first_version = 1
- # A mapping of entity (Table, Column, ...) to operation
- banned_ops = {}
- exceptions = [
- # NOTE(xek): Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE UNLESS
- # JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT CAUSE
- # PROBLEMS FOR ROLLING UPGRADES.
- ]
-
- @property
- def INIT_VERSION(self):
- return upgrades.INITIAL_VERSION
-
- @property
- def REPOSITORY(self):
- return repository.Repository(
- os.path.abspath(os.path.dirname(self.migrate_file))
- )
-
- @property
- def migration_api(self):
- temp = __import__('oslo_db.sqlalchemy.migration', globals(),
- locals(), ['versioning_api'], 0)
- return temp.versioning_api
-
- @property
- def migrate_engine(self):
- return self.engine
-
- def migrate_fully(self, repo_name):
- abs_path = os.path.abspath(os.path.dirname(repo_name))
- init_version = upgrades.get_init_version(abs_path=abs_path)
- schema = versioning_api.ControlledSchema.create(
- self.migrate_engine, abs_path, init_version)
- max_version = schema.repository.version().version
- upgrade = True
- err = ''
- version = versioning_api._migrate_version(
- schema, max_version, upgrade, err)
- schema.upgrade(version)
-
- def migrate_up(self, version, with_data=False):
- """Check that migrations don't cause downtime.
-
- Schema migrations can be done online, allowing for rolling upgrades.
- """
- # NOTE(xek):
- # self.exceptions contains a list of migrations where we allow the
- # banned operations. Only Migrations which don't cause
- # incompatibilities are allowed, for example dropping an index or
- # constraint.
- #
- # Please follow the guidelines outlined at:
- # https://docs.openstack.org/keystone/latest/contributor/database-migrations.html
-
- if version >= self.first_version and version not in self.exceptions:
- banned_ops = self.banned_ops
- else:
- banned_ops = None
- with BannedDBSchemaOperations(banned_ops, self.migrate_file):
- super().migrate_up(version, with_data)
-
- snake_walk = False
- downgrade = False
-
- def test_walk_versions(self):
- self.walk_versions(self.snake_walk, self.downgrade)
-
-
-class TestKeystoneExpandSchemaMigrations(KeystoneMigrationsCheckers):
-
- migrate_file = expand_repo.__file__
- first_version = 1
- # TODO(henry-nash): we should include Table update here as well, but this
- # causes the update of the migration version to appear as a banned
- # operation!
- banned_ops = {'Table': ['alter', 'drop', 'insert', 'delete'],
- 'Column': ['alter', 'drop']}
- exceptions = [
+ for op in self._banned_ops:
+ self.useFixture(
+ fixtures.MonkeyPatch('alembic.op.%s' % op, explode_lambda[op])
+ )
+
+
+class KeystoneMigrationsWalk(
+ test_fixtures.OpportunisticDBTestMixin,
+):
+ # Migrations can take a long time, particularly on underpowered CI nodes.
+ # Give them some breathing room.
+ TIMEOUT_SCALING_FACTOR = 4
+
+ BANNED_OPS = {
+ 'expand': [
+ 'alter_column',
+ 'batch_alter_table',
+ 'drop_column',
+ 'drop_constraint',
+ 'drop_index',
+ 'drop_table',
+ 'drop_table_comment',
+ # 'execute',
+ 'rename_table',
+ ],
+ 'contract': {
+ 'add_column',
+ 'bulk_insert',
+ 'create_check_constraint',
+ 'create_exclude_constraint',
+ 'create_foreign_key',
+ 'create_index',
+ 'create_primary_key',
+ 'create_table',
+ 'create_table_comment',
+ 'create_unique_constraint',
+ # 'execute',
+ 'rename_table',
+ },
+ }
+
+ BANNED_OP_EXCEPTIONS = [
# NOTE(xek, henry-nash): Reviewers: DO NOT ALLOW THINGS TO BE ADDED
# HERE UNLESS JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT
# CAUSE PROBLEMS FOR ROLLING UPGRADES.
-
- # Migration 002 changes the column type, from datetime to timestamp in
- # the contract phase. Adding exception here to pass expand banned
- # tests, otherwise fails.
- 2,
- # NOTE(lbragstad): The expand 003 migration alters the credential table
- # to make `blob` nullable. This allows the triggers added in 003 to
- # catch writes when the `blob` attribute isn't populated. We do this so
- # that the triggers aren't aware of the encryption implementation.
- 3,
- # Migration 004 changes the password created_at column type, from
- # timestamp to datetime and updates the initial value in the contract
- # phase. Adding an exception here to pass expand banned tests,
- # otherwise fails.
- 4,
-
- # Migration 79 changes a varchar column length, doesn't
- # convert the data within that column/table and doesn't rebuild
- # indexes.
- 79
]
def setUp(self):
- super(TestKeystoneExpandSchemaMigrations, self).setUp()
-
-
-class TestKeystoneExpandSchemaMigrationsMySQL(
- db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase,
- TestKeystoneExpandSchemaMigrations):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
- def setUp(self):
- super(TestKeystoneExpandSchemaMigrationsMySQL, self).setUp()
- self.engine = enginefacade.writer.get_engine()
- self.sessionmaker = enginefacade.writer.get_sessionmaker()
-
-
-class TestKeystoneExpandSchemaMigrationsPostgreSQL(
- db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase,
- TestKeystoneExpandSchemaMigrations):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
- def setUp(self):
- super(TestKeystoneExpandSchemaMigrationsPostgreSQL, self).setUp()
+ super().setUp()
self.engine = enginefacade.writer.get_engine()
- self.sessionmaker = enginefacade.writer.get_sessionmaker()
+ self.config = upgrades._find_alembic_conf()
+ self.init_version = upgrades.ALEMBIC_INIT_VERSION
+
+ # TODO(stephenfin): Do we need this? I suspect not since we're using
+ # enginefacade.write.get_engine() directly above
+ # Override keystone's context manager to be oslo.db's global context
+ # manager.
+ sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True
+ self.addCleanup(setattr,
+ sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False)
+ self.addCleanup(sql.cleanup)
+
+ def _migrate_up(self, connection, revision):
+ version = revision.revision
+
+ if version == self.init_version: # no tests for the initial revision
+ alembic_api.upgrade(self.config, version)
+ return
+
+ self.assertIsNotNone(
+ getattr(self, '_check_%s' % version, None),
+ (
+ 'DB Migration %s does not have a test; you must add one'
+ ) % version,
+ )
+ pre_upgrade = getattr(self, '_pre_upgrade_%s' % version, None)
+ if pre_upgrade:
+ pre_upgrade(connection)
-class TestKeystoneDataMigrations(
- KeystoneMigrationsCheckers):
+ banned_ops = []
+ if version not in self.BANNED_OP_EXCEPTIONS:
+ # there should only ever be one label, but this is safer
+ for branch_label in revision.branch_labels:
+ banned_ops.extend(self.BANNED_OPS[branch_label])
- migrate_file = data_migration_repo.__file__
- first_version = 1
- banned_ops = {'Table': ['create', 'alter', 'drop'],
- 'Column': ['create', 'alter', 'drop']}
- exceptions = [
- # NOTE(xek, henry-nash): Reviewers: DO NOT ALLOW THINGS TO BE ADDED
- # HERE UNLESS JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT
- # CAUSE PROBLEMS FOR ROLLING UPGRADES.
+ with BannedDBSchemaOperations(banned_ops, version):
+ alembic_api.upgrade(self.config, version)
- # Migration 002 changes the column type, from datetime to timestamp in
- # the contract phase. Adding exception here to pass banned data
- # migration tests. Fails otherwise.
- 2,
- # Migration 004 changes the password created_at column type, from
- # timestamp to datetime and updates the initial value in the contract
- # phase. Adding an exception here to pass data migrations banned tests,
- # otherwise fails.
- 4
- ]
+ post_upgrade = getattr(self, '_check_%s' % version, None)
+ if post_upgrade:
+ post_upgrade(connection)
- def setUp(self):
- super(TestKeystoneDataMigrations, self).setUp()
- self.migrate_fully(expand_repo.__file__)
+ def _pre_upgrade_e25ffa003242(self, connection):
+ """This is a no-op migration."""
+ pass
+ def _check_e25ffa003242(self, connection):
+ """This is a no-op migration."""
+ pass
-class TestKeystoneDataMigrationsMySQL(
- TestKeystoneDataMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
+ def _pre_upgrade_29e87d24a316(self, connection):
+ """This is a no-op migration."""
+ pass
+ def _check_29e87d24a316(self, connection):
+ """This is a no-op migration."""
+ pass
-class TestKeystoneDataMigrationsPostgreSQL(
- TestKeystoneDataMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
+ def test_single_base_revision(self):
+ """Ensure we only have a single base revision.
+ There's no good reason for us to have diverging history, so validate
+ that only one base revision exists. This will prevent simple errors
+ where people forget to specify the base revision. If this fail for
+ your change, look for migrations that do not have a 'revises' line in
+ them.
+ """
+ script = alembic_script.ScriptDirectory.from_config(self.config)
+ self.assertEqual(1, len(script.get_bases()))
-class TestKeystoneDataMigrationsSQLite(
- TestKeystoneDataMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- pass
+ def test_head_revisions(self):
+ """Ensure we only have a two head revisions.
+ There's no good reason for us to have diverging history beyond the
+ expand and contract branches, so validate that only these head
+ revisions exist. This will prevent merge conflicts adding additional
+ head revision points. If this fail for your change, look for migrations
+ with the duplicate 'revises' line in them.
+ """
+ script = alembic_script.ScriptDirectory.from_config(self.config)
+ self.assertEqual(2, len(script.get_heads()))
-class TestKeystoneContractSchemaMigrations(
- KeystoneMigrationsCheckers):
+ def test_walk_versions(self):
+ with self.engine.begin() as connection:
+ self.config.attributes['connection'] = connection
+ script = alembic_script.ScriptDirectory.from_config(self.config)
+ revisions = [x for x in script.walk_revisions()]
+
+ # for some reason, 'walk_revisions' gives us the revisions in
+ # reverse chronological order so we have to invert this
+ revisions.reverse()
+ self.assertEqual(revisions[0].revision, self.init_version)
+
+ for revision in revisions:
+ LOG.info('Testing revision %s', revision.revision)
+ self._migrate_up(connection, revision)
+
+ def _get_head_from_file(self, branch):
+ path = os.path.join(
+ os.path.dirname(upgrades.__file__),
+ 'migrations',
+ 'versions',
+ f'{branch.upper()}_HEAD',
+ )
- migrate_file = contract_repo.__file__
- first_version = 1
- # TODO(henry-nash): we should include Table update here as well, but this
- # causes the update of the migration version to appear as a banned
- # operation!
- banned_ops = {'Table': ['create', 'insert', 'delete'],
- 'Column': ['create']}
- exceptions = [
- # NOTE(xek, henry-nash): Reviewers: DO NOT ALLOW THINGS TO BE ADDED
- # HERE UNLESS JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT
- # CAUSE PROBLEMS FOR ROLLING UPGRADES.
+ with open(path) as fh:
+ return fh.read().strip()
- # Migration 002 changes the column type, from datetime to timestamp.
- # To do this, the column is first dropped and recreated. This should
- # not have any negative impact on a rolling upgrade deployment.
- 2,
- # Migration 004 changes the password created_at column type, from
- # timestamp to datetime and updates the created_at value. This is
- # likely not going to impact a rolling upgrade as the contract repo is
- # executed once the code has been updated; thus the created_at column
- # would be populated for any password changes. That being said, there
- # could be a performance issue for existing large password tables, as
- # the migration is not batched. However, it's a compromise and not
- # likely going to be a problem for operators.
- 4,
- # Migration 013 updates a foreign key constraint at the federated_user
- # table. It is a composite key pointing to the procotol.id and
- # protocol.idp_id columns. Since we can't create a new foreign key
- # before dropping the old one and the operations happens in the same
- # upgrade phase, adding an exception here to pass the contract
- # banned tests.
- 13
- ]
+ def test_db_version_alembic(self):
+ upgrades.offline_sync_database_to_version(engine=self.engine)
- def setUp(self):
- super(TestKeystoneContractSchemaMigrations, self).setUp()
- self.migrate_fully(expand_repo.__file__)
- self.migrate_fully(data_migration_repo.__file__)
+ for branch in (upgrades.EXPAND_BRANCH, upgrades.CONTRACT_BRANCH):
+ head = self._get_head_from_file(branch)
+ self.assertEqual(head, upgrades.get_db_version(branch))
-class TestKeystoneContractSchemaMigrationsMySQL(
- TestKeystoneContractSchemaMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
+class TestMigrationsWalkSQLite(
+ KeystoneMigrationsWalk,
+ test_fixtures.OpportunisticDBTestMixin,
+ unit.TestCase,
+):
+ pass
-class TestKeystoneContractSchemaMigrationsPostgreSQL(
- TestKeystoneContractSchemaMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
+class TestMigrationsWalkMySQL(
+ KeystoneMigrationsWalk,
+ test_fixtures.OpportunisticDBTestMixin,
+ unit.TestCase,
+):
+ FIXTURE = test_fixtures.MySQLOpportunisticFixture
-class TestKeystoneContractSchemaMigrationsSQLite(
- TestKeystoneContractSchemaMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- # In Sqlite an alter will appear as a create, so if we check for creates
- # we will get false positives.
- def setUp(self):
- super(TestKeystoneContractSchemaMigrationsSQLite, self).setUp()
- self.banned_ops['Table'].remove('create')
+class TestMigrationsWalkPostgreSQL(
+ KeystoneMigrationsWalk,
+ test_fixtures.OpportunisticDBTestMixin,
+ unit.TestCase,
+):
+ FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
diff --git a/keystone/tests/unit/test_sql_upgrade.py b/keystone/tests/unit/test_sql_upgrade.py
index 78f644977..55440c955 100644
--- a/keystone/tests/unit/test_sql_upgrade.py
+++ b/keystone/tests/unit/test_sql_upgrade.py
@@ -39,28 +39,23 @@ For further information, see `oslo.db documentation
all data will be lost.
"""
-import glob
-import os
-
import fixtures
-from migrate.versioning import api as migrate_api
from migrate.versioning import script
-from oslo_db import exception as db_exception
+from oslo_db import options as db_options
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures as db_fixtures
from oslo_log import fixture as log_fixture
from oslo_log import log
-from oslotest import base as test_base
import sqlalchemy.exc
from keystone.cmd import cli
from keystone.common import sql
from keystone.common.sql import upgrades
-from keystone.credential.providers import fernet as credential_fernet
+import keystone.conf
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
-from keystone.tests.unit.ksfixtures import database
+CONF = keystone.conf.CONF
# NOTE(morganfainberg): This should be updated when each DB migration collapse
# is done to mirror the expected structure of the DB in the format of
@@ -229,54 +224,10 @@ INITIAL_TABLE_STRUCTURE = {
}
-class Repository:
-
- def __init__(self, engine, repo_name):
- self.repo_name = repo_name
-
- self.repo_path = upgrades._get_migrate_repo_path(self.repo_name)
- self.min_version = upgrades.INITIAL_VERSION
- self.schema_ = migrate_api.ControlledSchema.create(
- engine, self.repo_path, self.min_version,
- )
- self.max_version = self.schema_.repository.version().version
-
- def upgrade(self, version=None, current_schema=None):
- version = version or self.max_version
- err = ''
- upgrade = True
- version = migrate_api._migrate_version(
- self.schema_, version, upgrade, err,
- )
- upgrades._validate_upgrade_order(
- self.repo_name, target_repo_version=version,
- )
- if not current_schema:
- current_schema = self.schema_
- changeset = current_schema.changeset(version)
- for ver, change in changeset:
- self.schema_.runchange(ver, change, changeset.step)
-
- if self.schema_.version != version:
- raise Exception(
- 'Actual version (%s) of %s does not equal expected '
- 'version (%s)' % (
- self.schema_.version, self.repo_name, version,
- ),
- )
-
- @property
- def version(self):
- with sql.session_for_read() as session:
- return upgrades._migrate_db_version(
- session.get_bind(), self.repo_path, self.min_version,
- )
-
-
class MigrateBase(
db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase,
):
+ """Test complete orchestration between all database phases."""
def setUp(self):
super().setUp()
@@ -292,10 +243,7 @@ class MigrateBase(
# modules have the same name (001_awesome.py).
self.addCleanup(script.PythonScript.clear)
- # NOTE(dstanek): SQLAlchemy's migrate makes some assumptions in the
- # SQLite driver about the lack of foreign key enforcement.
- database.initialize_sql_session(self.engine.url,
- enforce_sqlite_fks=False)
+ db_options.set_defaults(CONF, connection=self.engine.url)
# Override keystone's context manager to be oslo.db's global context
# manager.
@@ -304,29 +252,13 @@ class MigrateBase(
sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False)
self.addCleanup(sql.cleanup)
- self.repos = {
- upgrades.EXPAND_BRANCH: Repository(
- self.engine, upgrades.EXPAND_BRANCH,
- ),
- upgrades.DATA_MIGRATION_BRANCH: Repository(
- self.engine, upgrades.DATA_MIGRATION_BRANCH,
- ),
- upgrades.CONTRACT_BRANCH: Repository(
- self.engine, upgrades.CONTRACT_BRANCH,
- ),
- }
-
- def expand(self, *args, **kwargs):
+ def expand(self):
"""Expand database schema."""
- self.repos[upgrades.EXPAND_BRANCH].upgrade(*args, **kwargs)
+ upgrades.expand_schema(engine=self.engine)
- def migrate(self, *args, **kwargs):
- """Migrate data."""
- self.repos[upgrades.DATA_MIGRATION_BRANCH].upgrade(*args, **kwargs)
-
- def contract(self, *args, **kwargs):
+ def contract(self):
"""Contract database schema."""
- self.repos[upgrades.CONTRACT_BRANCH].upgrade(*args, **kwargs)
+ upgrades.contract_schema(engine=self.engine)
@property
def metadata(self):
@@ -334,7 +266,9 @@ class MigrateBase(
return sqlalchemy.MetaData(self.engine)
def load_table(self, name):
- table = sqlalchemy.Table(name, self.metadata, autoload=True)
+ table = sqlalchemy.Table(
+ name, self.metadata, autoload_with=self.engine,
+ )
return table
def assertTableDoesNotExist(self, table_name):
@@ -342,7 +276,9 @@ class MigrateBase(
# Switch to a different metadata otherwise you might still
# detect renamed or dropped tables
try:
- sqlalchemy.Table(table_name, self.metadata, autoload=True)
+ sqlalchemy.Table(
+ table_name, self.metadata, autoload_with=self.engine,
+ )
except sqlalchemy.exc.NoSuchTableError:
pass
else:
@@ -357,210 +293,8 @@ class MigrateBase(
self.assertCountEqual(expected_cols, actual_cols,
'%s table' % table_name)
-
-class ExpandSchemaUpgradeTests(MigrateBase):
-
- def test_start_version_db_init_version(self):
- self.assertEqual(
- self.repos[upgrades.EXPAND_BRANCH].min_version,
- self.repos[upgrades.EXPAND_BRANCH].version)
-
- def test_blank_db_to_start(self):
- self.assertTableDoesNotExist('user')
-
- def test_upgrade_add_initial_tables(self):
- self.expand(upgrades.INITIAL_VERSION + 1)
- self.check_initial_table_structure()
-
- def check_initial_table_structure(self):
- for table in INITIAL_TABLE_STRUCTURE:
- self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
-
-
-class MySQLOpportunisticExpandSchemaUpgradeTestCase(
- ExpandSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
-
-class PostgreSQLOpportunisticExpandSchemaUpgradeTestCase(
- ExpandSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
-
-class DataMigrationUpgradeTests(MigrateBase):
-
- def setUp(self):
- # Make sure the expand repo is fully upgraded, since the data migration
- # phase is only run after this is upgraded
- super().setUp()
- self.expand()
-
- def test_start_version_db_init_version(self):
- self.assertEqual(
- self.repos[upgrades.DATA_MIGRATION_BRANCH].min_version,
- self.repos[upgrades.DATA_MIGRATION_BRANCH].version,
- )
-
-
-class MySQLOpportunisticDataMigrationUpgradeTestCase(
- DataMigrationUpgradeTests,
-):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
-
-class PostgreSQLOpportunisticDataMigrationUpgradeTestCase(
- DataMigrationUpgradeTests,
-):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
-
-class ContractSchemaUpgradeTests(MigrateBase, unit.TestCase):
-
- def setUp(self):
- # Make sure the expand and data migration repos are fully
- # upgraded, since the contract phase is only run after these are
- # upgraded.
- super().setUp()
- self.useFixture(
- ksfixtures.KeyRepository(
- self.config_fixture,
- 'credential',
- credential_fernet.MAX_ACTIVE_KEYS
- )
- )
- self.expand()
- self.migrate()
-
- def test_start_version_db_init_version(self):
- self.assertEqual(
- self.repos[upgrades.CONTRACT_BRANCH].min_version,
- self.repos[upgrades.CONTRACT_BRANCH].version,
- )
-
-
-class MySQLOpportunisticContractSchemaUpgradeTestCase(
- ContractSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
-
-class PostgreSQLOpportunisticContractSchemaUpgradeTestCase(
- ContractSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
-
-class VersionTests(MigrateBase):
-
- def test_migrate_repos_stay_in_lockstep(self):
- """Rolling upgrade repositories should always stay in lockstep.
-
- By maintaining a single "latest" version number in each of the three
- migration repositories (expand, data migrate, and contract), we can
- trivially prevent operators from "doing the wrong thing", such as
- running upgrades operations out of order (for example, you should not
- be able to run data migration 5 until schema expansion 5 has been run).
-
- For example, even if your rolling upgrade task *only* involves adding a
- new column with a reasonable default, and doesn't require any triggers,
- data migration, etc, you still need to create "empty" upgrade steps in
- the data migration and contract repositories with the same version
- number as the expansion.
-
- For more information, see "Database Migrations" here:
-
- https://docs.openstack.org/keystone/latest/contributor/database-migrations.html
-
- """
- # Transitive comparison: expand == data migration == contract
- self.assertEqual(
- self.repos[upgrades.EXPAND_BRANCH].max_version,
- self.repos[upgrades.DATA_MIGRATION_BRANCH].max_version,
- )
- self.assertEqual(
- self.repos[upgrades.DATA_MIGRATION_BRANCH].max_version,
- self.repos[upgrades.CONTRACT_BRANCH].max_version,
- )
-
- def test_migrate_repos_file_names_have_prefix(self):
- """Migration files should be unique to avoid caching errors.
-
- This test enforces migration files to include a prefix (expand,
- migrate, contract) in order to keep them unique. Here is the required
- format: [version]_[prefix]_[description]. For example:
- 001_expand_add_created_column.py
-
- """
- versions_path = '/versions'
-
- # test for expand prefix, e.g. 001_expand_new_fk_constraint.py
- repo_path = self.repos[upgrades.EXPAND_BRANCH].repo_path
- expand_list = glob.glob(repo_path + versions_path + '/*.py')
- self.assertRepoFileNamePrefix(expand_list, 'expand')
-
- # test for migrate prefix, e.g. 001_migrate_new_fk_constraint.py
- repo_path = self.repos[upgrades.DATA_MIGRATION_BRANCH].repo_path
- migrate_list = glob.glob(repo_path + versions_path + '/*.py')
- self.assertRepoFileNamePrefix(migrate_list, 'migrate')
-
- # test for contract prefix, e.g. 001_contract_new_fk_constraint.py
- repo_path = self.repos[upgrades.CONTRACT_BRANCH].repo_path
- contract_list = glob.glob(repo_path + versions_path + '/*.py')
- self.assertRepoFileNamePrefix(contract_list, 'contract')
-
- def assertRepoFileNamePrefix(self, repo_list, prefix):
- if len(repo_list) > 1:
- # grab the file name for the max version
- file_name = os.path.basename(sorted(repo_list)[-2])
- # pattern for the prefix standard, ignoring placeholder, init files
- pattern = (
- '^[0-9]{3,}_PREFIX_|^[0-9]{3,}_placeholder.py|^__init__.py')
- pattern = pattern.replace('PREFIX', prefix)
- msg = 'Missing required prefix %s in $file_name' % prefix
- self.assertRegex(file_name, pattern, msg)
-
-
-class MigrationValidation(MigrateBase, unit.TestCase):
- """Test validation of database between database phases."""
-
- def _set_db_sync_command_versions(self):
- self.expand(upgrades.INITIAL_VERSION + 1)
- self.migrate(upgrades.INITIAL_VERSION + 1)
- self.contract(upgrades.INITIAL_VERSION + 1)
- for version in (
- upgrades.get_db_version('expand'),
- upgrades.get_db_version('data_migration'),
- upgrades.get_db_version('contract'),
- ):
- self.assertEqual(upgrades.INITIAL_VERSION + 1, version)
-
- def test_running_db_sync_migrate_ahead_of_expand_fails(self):
- self._set_db_sync_command_versions()
- self.assertRaises(
- db_exception.DBMigrationError,
- self.migrate,
- upgrades.INITIAL_VERSION + 2,
- "You are attempting to upgrade migrate ahead of expand",
- )
-
- def test_running_db_sync_contract_ahead_of_migrate_fails(self):
- self._set_db_sync_command_versions()
- self.assertRaises(
- db_exception.DBMigrationError,
- self.contract,
- upgrades.INITIAL_VERSION + 2,
- "You are attempting to upgrade contract ahead of migrate",
- )
-
-
-class FullMigration(MigrateBase, unit.TestCase):
- """Test complete orchestration between all database phases."""
-
def test_db_sync_check(self):
checker = cli.DbSync()
- latest_version = self.repos[upgrades.EXPAND_BRANCH].max_version
# If the expand repository doesn't exist yet, then we need to make sure
# we advertise that `--expand` must be run first.
@@ -569,25 +303,9 @@ class FullMigration(MigrateBase, unit.TestCase):
self.assertIn("keystone-manage db_sync --expand", log_info.output)
self.assertEqual(status, 2)
- # Assert the correct message is printed when expand is the first step
- # that needs to run
- self.expand(upgrades.INITIAL_VERSION + 1)
- log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
- status = checker.check_db_sync_status()
- self.assertIn("keystone-manage db_sync --expand", log_info.output)
- self.assertEqual(status, 2)
-
- # Assert the correct message is printed when expand is farther than
- # migrate
- self.expand(latest_version)
- log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
- status = checker.check_db_sync_status()
- self.assertIn("keystone-manage db_sync --migrate", log_info.output)
- self.assertEqual(status, 3)
-
- # Assert the correct message is printed when migrate is farther than
+ # Assert the correct message is printed when migrate is ahead of
# contract
- self.migrate(latest_version)
+ self.expand()
log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
status = checker.check_db_sync_status()
self.assertIn("keystone-manage db_sync --contract", log_info.output)
@@ -595,47 +313,25 @@ class FullMigration(MigrateBase, unit.TestCase):
# Assert the correct message gets printed when all commands are on
# the same version
- self.contract(latest_version)
+ self.contract()
log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
status = checker.check_db_sync_status()
self.assertIn("All db_sync commands are upgraded", log_info.output)
self.assertEqual(status, 0)
- def test_out_of_sync_db_migration_fails(self):
- # We shouldn't allow for operators to accidentally run migration out of
- # order. This test ensures we fail if we attempt to upgrade the
- # contract repository ahead of the expand or migrate repositories.
- self.expand(upgrades.INITIAL_VERSION + 1)
- self.migrate(upgrades.INITIAL_VERSION + 1)
- self.assertRaises(
- db_exception.DBMigrationError,
- self.contract,
- upgrades.INITIAL_VERSION + 2,
- )
-
- def test_migration_079_expand_update_local_id_limit(self):
- self.expand(78)
- self.migrate(78)
- self.contract(78)
-
- id_mapping_table = sqlalchemy.Table('id_mapping',
- self.metadata, autoload=True)
- # assert local_id column is a string of 64 characters (before)
- self.assertEqual('VARCHAR(64)', str(id_mapping_table.c.local_id.type))
+ def test_upgrade_add_initial_tables(self):
+ self.expand()
+ for table in INITIAL_TABLE_STRUCTURE:
+ self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
- self.expand(79)
- self.migrate(79)
- self.contract(79)
- id_mapping_table = sqlalchemy.Table('id_mapping',
- self.metadata, autoload=True)
- # assert local_id column is a string of 255 characters (after)
- self.assertEqual('VARCHAR(255)', str(id_mapping_table.c.local_id.type))
+class FullMigrationSQLite(MigrateBase, unit.TestCase):
+ pass
-class MySQLOpportunisticFullMigration(FullMigration):
+class FullMigrationMySQL(MigrateBase, unit.TestCase):
FIXTURE = db_fixtures.MySQLOpportunisticFixture
-class PostgreSQLOpportunisticFullMigration(FullMigration):
+class FullMigrationPostgreSQL(MigrateBase, unit.TestCase):
FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
diff --git a/keystone/tests/unit/token/test_fernet_provider.py b/keystone/tests/unit/token/test_fernet_provider.py
index cc2a49d0b..997b5e6f7 100644
--- a/keystone/tests/unit/token/test_fernet_provider.py
+++ b/keystone/tests/unit/token/test_fernet_provider.py
@@ -17,6 +17,8 @@ import os
from unittest import mock
import uuid
+import fixtures
+from oslo_log import log
from oslo_utils import timeutils
from keystone import auth
@@ -26,6 +28,7 @@ from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.federation import constants as federation_constants
+from keystone.models import token_model
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
@@ -51,6 +54,59 @@ class TestFernetTokenProvider(unit.TestCase):
self.provider.validate_token,
token_id)
+ def test_log_warning_when_token_exceeds_max_token_size_default(self):
+ self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO))
+
+ token = token_model.TokenModel()
+ token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.expires_at = utils.isotime(
+ provider.default_expire_time(), subsecond=True)
+ token.methods = ['password']
+ token.audit_id = provider.random_urlsafe_str()
+ token_id, issued_at = self.provider.generate_id_and_issued_at(token)
+ expected_output = (
+ f'Fernet token created with length of {len(token_id)} characters, '
+ 'which exceeds 255 characters'
+ )
+ self.assertIn(expected_output, self.logging.output)
+
+ def test_log_warning_when_token_exceeds_max_token_size_override(self):
+ self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO))
+ self.config_fixture.config(max_token_size=250)
+
+ token = token_model.TokenModel()
+ token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.expires_at = utils.isotime(
+ provider.default_expire_time(), subsecond=True)
+ token.methods = ['password']
+ token.audit_id = provider.random_urlsafe_str()
+ token_id, issued_at = self.provider.generate_id_and_issued_at(token)
+ expected_output = (
+ f'Fernet token created with length of {len(token_id)} characters, '
+ 'which exceeds 250 characters'
+ )
+ self.assertIn(expected_output, self.logging.output)
+
+ def test_no_warning_when_token_does_not_exceed_max_token_size(self):
+ self.config_fixture.config(max_token_size=300)
+ self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO))
+
+ token = token_model.TokenModel()
+ token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.expires_at = utils.isotime(
+ provider.default_expire_time(), subsecond=True)
+ token.methods = ['password']
+ token.audit_id = provider.random_urlsafe_str()
+ token_id, issued_at = self.provider.generate_id_and_issued_at(token)
+ expected_output = (
+ f'Fernet token created with length of {len(token_id)} characters, '
+ 'which exceeds 255 characters'
+ )
+ self.assertNotIn(expected_output, self.logging.output)
+
class TestValidate(unit.TestCase):
def setUp(self):
diff --git a/keystone/token/token_formatters.py b/keystone/token/token_formatters.py
index bb407ab09..76220b0ef 100644
--- a/keystone/token/token_formatters.py
+++ b/keystone/token/token_formatters.py
@@ -156,10 +156,11 @@ class TokenFormatter(object):
# characters. Even though Keystone isn't storing a Fernet token
# anywhere, we can't say it isn't being stored somewhere else with
# those kind of backend constraints.
- if len(token) > 255:
- LOG.info('Fernet token created with length of %d '
- 'characters, which exceeds 255 characters',
- len(token))
+ if len(token) > CONF.max_token_size:
+ LOG.info(
+ f'Fernet token created with length of {len(token)} '
+ f'characters, which exceeds {CONF.max_token_size} characters',
+ )
return token
diff --git a/lower-constraints.txt b/lower-constraints.txt
deleted file mode 100644
index 71f497fbd..000000000
--- a/lower-constraints.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-amqp==5.0.0
-Babel==2.3.4
-bashate==0.5.1
-bcrypt==3.1.3
-coverage==4.0
-cryptography==2.7
-docutils==0.14
-dogpile.cache==1.0.2
-fixtures==3.0.0
-flake8-docstrings==0.2.1.post1
-flake8==2.6.0
-Flask===1.0.2
-Flask-RESTful===0.3.5
-freezegun==0.3.6
-hacking==1.1.0
-iso8601==0.1.12
-jsonschema==3.2.0
-keystoneauth1==3.4.0
-keystonemiddleware==7.0.0
-ldappool===2.3.1
-lxml==4.5.0
-mock==2.0.0
-msgpack==0.5.0
-oauthlib==0.6.2
-os-api-ref==1.4.0
-oslo.cache==1.26.0
-oslo.concurrency==3.26.0
-oslo.config==6.8.0
-oslo.context==2.22.0
-oslo.db==6.0.0
-oslo.i18n==3.15.3
-oslo.log==3.44.0
-oslo.messaging==5.29.0
-oslo.middleware==3.31.0
-oslo.policy==3.10.0
-oslo.serialization==2.18.0
-oslo.upgradecheck==1.3.0
-oslo.utils==3.33.0
-oslotest==3.2.0
-osprofiler==1.4.0
-passlib==1.7.0
-pbr==2.0.0
-pep257==0.7.0
-pika==0.10.0
-pycadf==1.1.0
-pycodestyle==2.0.0
-python-ldap===3.0.0
-pymongo===3.0.2
-pysaml2==5.0.0
-PyJWT==1.6.1
-PyMySQL==0.8.0
-python-keystoneclient==3.8.0
-python-memcached===1.56
-pytz==2013.6
-requests==2.14.2
-scrypt==0.8.0
-six==1.10.0
-sqlalchemy-migrate==0.13.0
-SQLAlchemy==1.3.0
-stestr==1.0.0
-stevedore==1.20.0
-tempest==17.1.0
-testtools==2.2.0
-urllib3==1.22
-vine==1.3.0
-WebOb==1.7.1
-WebTest==2.0.27
-Werkzeug==0.14.1
diff --git a/releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml b/releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml
new file mode 100644
index 000000000..040811b79
--- /dev/null
+++ b/releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ [`bug 1926483 <https://bugs.launchpad.net/keystone/+bug/1926483>`_]
+ Keystone will only log warnings about token length for Fernet tokens when
+ the token length exceeds the value of `keystone.conf [DEFAULT]
+ max_token_size`.
diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml
new file mode 100644
index 000000000..db420d739
--- /dev/null
+++ b/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Python 3.6 & 3.7 support has been dropped. The minimum version of Python now
+ supported is Python 3.8.
diff --git a/releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml b/releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml
new file mode 100644
index 000000000..833837dcb
--- /dev/null
+++ b/releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ The database migration engine has changed from `sqlalchemy-migrate`__ to
+ `alembic`__. For most deployments, this should have minimal to no impact
+ and the switch should be mostly transparent. The main user-facing impact is
+ the change in schema versioning. While sqlalchemy-migrate used a linear,
+ integer-based versioning scheme, which required placeholder migrations to
+ allow for potential migration backports, alembic uses a distributed version
+ control-like schema where a migration's ancestor is encoded in the file and
+ branches are possible. The alembic migration files therefore use a
+ arbitrary UUID-like naming scheme and the ``keystone-manage db_version``
+ command returns such a version.
+
+ When the ``keystone-manage db_sync`` command is run without options or
+ with the ``--expand`` or ``--contract`` options, all remaining
+ sqlalchemy-migrate-based migrations will be automatically applied.
+
+ Data migrations are now included in the expand phase and the ``--migrate``
+ option is now a no-op. It may be removed in a future release.
+
+ .. __: https://sqlalchemy-migrate.readthedocs.io/en/latest/
+ .. __: https://alembic.sqlalchemy.org/en/latest/
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index aae197f1e..620554687 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -19,6 +19,7 @@
:maxdepth: 1
unreleased
+ yoga
xena
wallaby
victoria
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 600d9e0b0..713bd7089 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,15 +1,16 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Keystone Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-01-08 19:54+0000\n"
+"POT-Creation-Date: 2022-07-01 18:09+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-12-19 01:35+0000\n"
+"PO-Revision-Date: 2022-06-20 11:10+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -81,6 +82,9 @@ msgstr "13.0.2"
msgid "13.0.3"
msgstr "13.0.3"
+msgid "13.0.4-9"
+msgstr "13.0.4-9"
+
msgid "14.0.0"
msgstr "14.0.0"
@@ -93,8 +97,8 @@ msgstr "14.1.0"
msgid "14.2.0"
msgstr "14.2.0"
-msgid "14.2.0-4"
-msgstr "14.2.0-4"
+msgid "14.2.0-7"
+msgstr "14.2.0-7"
msgid "15.0.0"
msgstr "15.0.0"
@@ -102,17 +106,41 @@ msgstr "15.0.0"
msgid "15.0.1"
msgstr "15.0.1"
+msgid "15.0.1-9"
+msgstr "15.0.1-9"
+
msgid "16.0.0"
msgstr "16.0.0"
msgid "16.0.1"
msgstr "16.0.1"
+msgid "16.0.2"
+msgstr "16.0.2"
+
msgid "17.0.0"
msgstr "17.0.0"
-msgid "17.0.0-6"
-msgstr "17.0.0-6"
+msgid "17.0.1"
+msgstr "17.0.1"
+
+msgid "18.0.0"
+msgstr "18.0.0"
+
+msgid "18.1.0"
+msgstr "18.1.0"
+
+msgid "19.0.0"
+msgstr "19.0.0"
+
+msgid "19.0.0-8"
+msgstr "19.0.0-8"
+
+msgid "20.0.0"
+msgstr "20.0.0"
+
+msgid "21.0.0"
+msgstr "21.0.0"
msgid "8.0.1"
msgstr "8.0.1"
@@ -297,6 +325,15 @@ msgstr ""
"Certain variables in ``keystone.conf`` now have options, which determine if "
"the user's setting is valid."
+msgid ""
+"Change the min value of pool_retry_max to 1. Setting this value to 0 caused "
+"the pool to fail before connecting to ldap, always raising "
+"MaxConnectionReachedError."
+msgstr ""
+"Change the min value of pool_retry_max to 1. Setting this value to 0 caused "
+"the pool to fail before connecting to ldap, always raising "
+"MaxConnectionReachedError."
+
msgid "Configuring per-Identity Provider WebSSO is now supported."
msgstr "Configuring per-Identity Provider WebSSO is now supported."
@@ -463,6 +500,24 @@ msgstr ""
"this option is set back to `False`."
msgid ""
+"If you are affected by this bug, a fix in the keystone database will be "
+"needed so we recommend to dump the users' tables before doing this process:"
+msgstr ""
+"If you are affected by this bug, a fix in the keystone database will be "
+"needed so we recommend to dump the users' tables before doing this process:"
+
+msgid ""
+"If you are affected by this bug, you must remove stale role assignments "
+"manually. The following is an example SQL statement you can use to fix the "
+"issue, but you should verify it's applicability to your deployment's SQL "
+"implementation and version."
+msgstr ""
+"If you are affected by this bug, you must remove stale role assignments "
+"manually. The following is an example SQL statement you can use to fix the "
+"issue, but you should verify it's applicability to your deployment's SQL "
+"implementation and version."
+
+msgid ""
"In ``keystone-paste.ini``, using ``paste.filter_factory`` is deprecated in "
"favor of the \"use\" directive, specifying an entrypoint."
msgstr ""
@@ -681,6 +736,9 @@ msgstr ""
msgid "Queens Series Release Notes"
msgstr "Queens Series Release Notes"
+msgid "Rocky Series Release Notes"
+msgstr "Rocky Series Release Notes"
+
msgid ""
"Routes and SQL backends for the contrib extensions have been removed, they "
"have been incorporated into keystone and are no longer optional. This "
@@ -772,6 +830,9 @@ msgstr ""
"``validate_token(self, token_ref)``. If using a custom token provider, "
"update the custom provider accordingly."
+msgid "Stein Series Release Notes"
+msgstr "Stein Series Release Notes"
+
msgid ""
"Support for writing to LDAP has been removed. See ``Other Notes`` for more "
"details."
@@ -1398,6 +1459,9 @@ msgstr ""
msgid "Tokens can now be cached when issued."
msgstr "Tokens can now be cached when issued."
+msgid "Train Series Release Notes"
+msgstr "Train Series Release Notes"
+
msgid ""
"UUID token provider ``[token] provider=uuid`` has been deprecated in favor "
"of Fernet tokens ``[token] provider=fernet``. With Fernet tokens becoming "
@@ -1433,6 +1497,15 @@ msgstr ""
"Using the full path to the driver class is deprecated in favour of using the "
"entrypoint. In the Mitaka release, the entrypoint must be used."
+msgid "Ussuri Series Release Notes"
+msgstr "Ussuri Series Release Notes"
+
+msgid "Victoria Series Release Notes"
+msgstr "Victoria Series Release Notes"
+
+msgid "Wallaby Series Release Notes"
+msgstr "Wallaby Series Release Notes"
+
msgid ""
"We have added the ``password_expires_at`` attribute to the user response "
"object."
@@ -1454,6 +1527,12 @@ msgstr ""
"Write support for the LDAP has been removed in favour of read-only support. "
"The following operations are no longer supported for LDAP:"
+msgid "Xena Series Release Notes"
+msgstr "Xena Series Release Notes"
+
+msgid "Yoga Series Release Notes"
+msgstr "Yoga Series Release Notes"
+
msgid ""
"[`Bug 1645487 <https://bugs.launchpad.net/keystone/+bug/1645487>`_] Added a "
"new PCI-DSS feature that will require users to immediately change their "
@@ -1792,6 +1871,13 @@ msgstr "lt - password expires before the timestamp"
msgid "lte - password expires at or before timestamp"
msgstr "lte - password expires at or before timestamp"
+msgid ""
+"mysqldump -h <mysql host> -p -P <mysql port> -u keystone keystone "
+"federated_user local_user user > user_tables.sql"
+msgstr ""
+"mysqldump -h <mysql host> -p -P <mysql port> -u keystone keystone "
+"federated_user local_user user > user_tables.sql"
+
msgid "neq - password expires not at the timestamp"
msgstr "neq - password expires not at the timestamp"
diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst
new file mode 100644
index 000000000..7cd5e908a
--- /dev/null
+++ b/releasenotes/source/yoga.rst
@@ -0,0 +1,6 @@
+=========================
+Yoga Series Release Notes
+=========================
+
+.. release-notes::
+ :branch: stable/yoga
diff --git a/requirements.txt b/requirements.txt
index c7e4605f3..5688af2ff 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,7 @@
+# Requirements lower bounds listed here are our best effort to keep them up to
+# date but we do not test them so no guarantee of having them all correct. If
+# you find any incorrect lower bounds, let us know or propose a fix.
+
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
diff --git a/setup.cfg b/setup.cfg
index c5d1f2a18..be6b602f7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,7 +6,7 @@ description_file =
author = OpenStack
author_email = openstack-discuss@lists.openstack.org
home_page = https://docs.openstack.org/keystone/latest
-python_requires = >=3.6
+python_requires = >=3.8
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -17,8 +17,8 @@ classifier =
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.6
- Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
[files]
data_files =
diff --git a/tox.ini b/tox.ini
index 402cea905..b1b1fad4c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,12 +1,13 @@
[tox]
-minversion = 3.2.0
-envlist = py37,pep8,api-ref,docs,genconfig,genpolicy,releasenotes,protection
+minversion = 3.18.0
+envlist = py39,pep8,api-ref,docs,genconfig,genpolicy,releasenotes,protection
ignore_basepython_conflict = true
[testenv]
-usedevelop = True
basepython = python3
-setenv = VIRTUAL_ENV={envdir}
+usedevelop = True
+setenv =
+ PYTHONDONTWRITEBYTECODE=1
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/test-requirements.txt
@@ -14,7 +15,7 @@ deps =
commands =
find keystone -type f -name "*.pyc" -delete
stestr run {posargs}
-whitelist_externals =
+allowlist_externals =
bash
find
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY PBR_VERSION
@@ -139,7 +140,7 @@ commands=
[testenv:pdf-docs]
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
make
mkdir
rm
@@ -188,12 +189,6 @@ paths = ./keystone/tests/hacking
deps = bindep
commands = bindep test
-[testenv:lower-constraints]
-deps =
- -c{toxinidir}/lower-constraints.txt
- -r{toxinidir}/test-requirements.txt
- .[ldap,memcache,mongodb]
-
[testenv:protection]
commands =
find keystone -type f -name "*.pyc" -delete