summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml25
-rw-r--r--api-ref/source/v3/roles.inc2
-rw-r--r--api-ref/source/v3/unified_limits.inc4
-rw-r--r--devstack/files/oidc/apache_oidc.conf47
-rw-r--r--devstack/lib/oidc.sh160
-rw-r--r--devstack/plugin.sh29
-rw-r--r--devstack/tools/oidc/__init__.py0
-rw-r--r--devstack/tools/oidc/docker-compose.yaml33
-rw-r--r--devstack/tools/oidc/setup_keycloak_client.py61
-rw-r--r--doc/source/admin/auth-totp.rst4
-rw-r--r--doc/source/admin/credential-encryption.rst41
-rw-r--r--doc/source/admin/service-api-protection.rst2
-rw-r--r--doc/source/admin/upgrading.rst52
-rw-r--r--doc/source/conf.py14
-rw-r--r--doc/source/contributor/database-migrations.rst61
-rw-r--r--doc/source/contributor/programming-exercises.rst4
-rw-r--r--doc/source/contributor/services.rst2
-rw-r--r--doc/source/contributor/testing-keystone.rst29
-rw-r--r--doc/source/install/index-obs.rst8
-rw-r--r--doc/source/install/index-rdo.rst8
-rw-r--r--doc/source/install/index-ubuntu.rst8
-rw-r--r--doc/source/user/application_credentials.rst43
-rw-r--r--keystone/api/__init__.py3
-rw-r--r--keystone/api/_shared/json_home_relations.py8
-rw-r--r--keystone/api/ec2tokens.py6
-rw-r--r--keystone/api/os_ep_filter.py2
-rw-r--r--keystone/api/os_oauth2.py390
-rw-r--r--keystone/cmd/cli.py56
-rw-r--r--keystone/cmd/doctor/database.py2
-rw-r--r--keystone/common/password_hashing.py22
-rw-r--r--keystone/common/render_token.py4
-rw-r--r--keystone/common/sql/migrations/env.py21
-rw-r--r--keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py4
-rw-r--r--keystone/common/sql/upgrades.py375
-rw-r--r--keystone/common/utils.py68
-rw-r--r--keystone/conf/__init__.py2
-rw-r--r--keystone/conf/default.py3
-rw-r--r--keystone/conf/identity.py6
-rw-r--r--keystone/conf/oauth2.py52
-rw-r--r--keystone/exception.py33
-rw-r--r--keystone/federation/utils.py40
-rw-r--r--keystone/identity/backends/ldap/common.py21
-rw-r--r--keystone/locale/de/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/en_GB/LC_MESSAGES/keystone.po37
-rw-r--r--keystone/locale/es/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/fr/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/it/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ja/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ko_KR/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/pt_BR/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ru/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/tr_TR/LC_MESSAGES/keystone.po676
-rw-r--r--keystone/locale/zh_CN/LC_MESSAGES/keystone.po21
-rw-r--r--keystone/locale/zh_TW/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/models/token_model.py6
-rw-r--r--keystone/oauth2/__init__.py0
-rw-r--r--keystone/oauth2/handlers.py30
-rw-r--r--keystone/revoke/backends/base.py4
-rw-r--r--keystone/server/flask/application.py10
-rw-r--r--keystone/server/flask/request_processing/json_body.py7
-rw-r--r--keystone/tests/unit/base_classes.py9
-rw-r--r--keystone/tests/unit/common/sql/__init__.py0
-rw-r--r--keystone/tests/unit/common/sql/test_upgrades.py546
-rw-r--r--keystone/tests/unit/common/test_utils.py119
-rw-r--r--keystone/tests/unit/contrib/federation/test_utils.py18
-rw-r--r--keystone/tests/unit/core.py75
-rw-r--r--keystone/tests/unit/fakeldap.py9
-rw-r--r--keystone/tests/unit/ksfixtures/warnings.py17
-rw-r--r--keystone/tests/unit/mapping_fixtures.py6
-rw-r--r--keystone/tests/unit/test_backend_ldap_pool.py118
-rw-r--r--keystone/tests/unit/test_cli.py20
-rw-r--r--keystone/tests/unit/test_sql_banned_operations.py512
-rw-r--r--keystone/tests/unit/test_sql_upgrade.py357
-rw-r--r--keystone/tests/unit/test_v3.py10
-rw-r--r--keystone/tests/unit/test_v3_auth.py198
-rw-r--r--keystone/tests/unit/test_v3_oauth2.py2071
-rw-r--r--keystone/tests/unit/test_versions.py3
-rw-r--r--keystone/tests/unit/token/test_fernet_provider.py68
-rw-r--r--keystone/token/provider.py25
-rw-r--r--keystone/token/providers/base.py1
-rw-r--r--keystone/token/providers/fernet/core.py5
-rw-r--r--keystone/token/providers/jws/core.py12
-rw-r--r--keystone/token/token_formatters.py115
-rw-r--r--lower-constraints.txt68
-rw-r--r--releasenotes/notes/bp-oauth2-client-credentials-ext-c8933f00a7b45be8.yaml9
-rw-r--r--releasenotes/notes/bp-support-oauth2-mtls-8552892a8e0c72d2.yaml13
-rw-r--r--releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml7
-rw-r--r--releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml5
-rw-r--r--releasenotes/notes/max-password-length-truncation-and-warning-bd69090315ec18a7.yaml9
-rw-r--r--releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml23
-rw-r--r--releasenotes/notes/token_expiration_to_match_application_credential-56d058355a9f240d.yaml10
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/conf.py16
-rw-r--r--releasenotes/source/index.rst2
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po114
-rw-r--r--releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po120
-rw-r--r--releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po202
-rw-r--r--releasenotes/source/zed.rst6
-rw-r--r--requirements.txt4
-rw-r--r--setup.cfg6
-rw-r--r--test-requirements.txt11
-rw-r--r--tox.ini72
102 files changed, 5112 insertions, 2496 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 05db30ae8..4a3ccf244 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -35,10 +35,12 @@
- job:
name: keystone-dsvm-py3-functional-fips
parent: keystone-dsvm-py3-functional
- nodeset: devstack-single-node-centos-8-stream
+ nodeset: devstack-single-node-centos-9-stream
description: |
- Functional testing for a FIPS enabled Centos 8 system
+ Functional testing for a FIPS enabled Centos 9 system
pre-run: playbooks/enable-fips.yaml
+ vars:
+ nslookup_target: 'opendev.org'
- job:
name: keystone-dsvm-functional-federation-opensuse15
@@ -200,10 +202,25 @@
name: keystone-dsvm-py35-functional-federation
parent: keystone-dsvm-py35-functional-federation-ubuntu-xenial
+# Experimental
+- job:
+ name: keystone-dsvm-functional-oidc-federation
+ parent: keystone-dsvm-functional
+ vars:
+ devstack_localrc:
+ TEMPEST_PLUGINS: '/opt/stack/keystone-tempest-plugin'
+ USE_PYTHON3: True
+ OS_CACERT: '/opt/stack/data/ca_bundle.pem'
+ devstack_services:
+ tls-proxy: true
+ keystone-oidc-federation: true
+ devstack_plugins:
+ keystone: https://opendev.org/openstack/keystone
+
- project:
templates:
- openstack-cover-jobs
- - openstack-python3-xena-jobs
+ - openstack-python3-jobs
- publish-openstack-docs-pti
- periodic-stable-jobs
- check-requirements
@@ -277,3 +294,5 @@
irrelevant-files: *irrelevant-files
- keystone-dsvm-py35-functional-federation-ubuntu-xenial:
irrelevant-files: *irrelevant-files
+ - keystone-dsvm-functional-oidc-federation:
+ irrelevant-files: *irrelevant-files
diff --git a/api-ref/source/v3/roles.inc b/api-ref/source/v3/roles.inc
index 3073e241d..80092ec82 100644
--- a/api-ref/source/v3/roles.inc
+++ b/api-ref/source/v3/roles.inc
@@ -1002,7 +1002,7 @@ Status Codes
.. rest_status_code:: success status.yaml
- - 201
+ - 204
.. rest_status_code:: error status.yaml
diff --git a/api-ref/source/v3/unified_limits.inc b/api-ref/source/v3/unified_limits.inc
index ce32a0f1c..bdb1d1959 100644
--- a/api-ref/source/v3/unified_limits.inc
+++ b/api-ref/source/v3/unified_limits.inc
@@ -614,8 +614,8 @@ Example
:language: javascript
-Delete Registered Limit
-=======================
+Delete Limit
+============
.. rest_method:: DELETE /v3/limits/{limit_id}
diff --git a/devstack/files/oidc/apache_oidc.conf b/devstack/files/oidc/apache_oidc.conf
new file mode 100644
index 000000000..eab84fd07
--- /dev/null
+++ b/devstack/files/oidc/apache_oidc.conf
@@ -0,0 +1,47 @@
+# DO NOT USE THIS IN PRODUCTION ENVIRONMENTS!
+OIDCSSLValidateServer Off
+OIDCOAuthSSLValidateServer Off
+OIDCCookieSameSite On
+
+OIDCClaimPrefix "OIDC-"
+OIDCResponseType "id_token"
+OIDCScope "openid email profile"
+OIDCProviderMetadataURL "%OIDC_METADATA_URL%"
+OIDCClientID "%OIDC_CLIENT_ID%"
+OIDCClientSecret "%OIDC_CLIENT_SECRET%"
+OIDCPKCEMethod "S256"
+OIDCCryptoPassphrase "openstack"
+
+OIDCRedirectURI "https://%HOST_IP%/identity/v3/auth/OS-FEDERATION/identity_providers/%IDP_ID%/protocols/openid/websso"
+OIDCRedirectURI "https://%HOST_IP%/identity/v3/auth/OS-FEDERATION/websso/openid"
+
+<LocationMatch "/v3/auth/OS-FEDERATION/websso/openid">
+ AuthType "openid-connect"
+ Require valid-user
+ LogLevel debug
+</LocationMatch>
+
+<LocationMatch "/v3/auth/OS-FEDERATION/identity_providers/%IDP_ID%/protocols/openid/websso">
+ AuthType "openid-connect"
+ Require valid-user
+ LogLevel debug
+</LocationMatch>
+
+<LocationMatch "/v3/auth/OS-FEDERATION/identity_providers/%IDP_ID%/protocols/openid/auth">
+ AuthType "openid-connect"
+ Require valid-user
+ LogLevel debug
+</LocationMatch>
+
+<Location ~ "/v3/OS-FEDERATION/identity_providers/%IDP_ID%/protocols/openid/auth">
+ AuthType oauth20
+ Require valid-user
+</Location>
+
+OIDCOAuthClientID "%OIDC_CLIENT_ID%"
+OIDCOAuthClientSecret "%OIDC_CLIENT_SECRET%"
+OIDCOAuthIntrospectionEndpoint "%OIDC_INTROSPECTION_URL%"
+
+# Horizon favors the referrer to the Keystone URL that is set.
+# https://github.com/openstack/horizon/blob/5e4ca1a9fdec04db08552e9e93fe372b8b8b45ae/openstack_auth/views.py#L192
+Header always set Referrer-Policy "no-referrer"
diff --git a/devstack/lib/oidc.sh b/devstack/lib/oidc.sh
new file mode 100644
index 000000000..ab8731d98
--- /dev/null
+++ b/devstack/lib/oidc.sh
@@ -0,0 +1,160 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+DOMAIN_NAME=${DOMAIN_NAME:-federated_domain}
+PROJECT_NAME=${PROJECT_NAME:-federated_project}
+GROUP_NAME=${GROUP_NAME:-federated_users}
+
+OIDC_CLIENT_ID=${CLIENT_ID:-devstack}
+OIDC_CLIENT_SECRET=${OIDC_CLIENT_SECRET:-nomoresecret}
+
+OIDC_ISSUER=${OIDC_ISSUER:-"https://$HOST_IP:8443"}
+OIDC_ISSUER_BASE="${OIDC_ISSUER}/realms/master"
+
+OIDC_METADATA_URL=${OIDC_METADATA_URL:-"https://$HOST_IP:8443/realms/master/.well-known/openid-configuration"}
+OIDC_INTROSPECTION_URL=${OIDC_INTROSPECTION_URL:-"https://$HOST_IP:8443/realms/master/protocol/openid-connect/token/introspect"}
+
+IDP_ID=${IDP_ID:-sso}
+IDP_USERNAME=${IDP_USERNAME:-admin}
+IDP_PASSWORD=${IDP_PASSWORD:-nomoresecret}
+
+MAPPING_REMOTE_TYPE=${MAPPING_REMOTE_TYPE:-OIDC-preferred_username}
+MAPPING_USER_NAME=${MAPPING_USER_NAME:-"{0}"}
+PROTOCOL_ID=${PROTOCOL_ID:-openid}
+
+REDIRECT_URI="https://$HOST_IP/identity/v3/auth/OS-FEDERATION/identity_providers/$IDP_ID/protocols/openid/websso"
+
+OIDC_PLUGIN="$DEST/keystone/devstack"
+
+function install_federation {
+ if is_ubuntu; then
+ install_package libapache2-mod-auth-openidc
+ sudo a2enmod headers
+ install_package docker.io
+ install_package docker-compose
+ elif is_fedora; then
+ install_package mod_auth_openidc
+ install_package podman
+ install_package podman-docker
+ install_package docker-compose
+ sudo systemctl start podman.socket
+ else
+ echo "Skipping installation. Only supported on Ubuntu and RHEL based."
+ fi
+}
+
+function configure_federation {
+ # Specify the header that contains information about the identity provider
+ iniset $KEYSTONE_CONF openid remote_id_attribute "HTTP_OIDC_ISS"
+ iniset $KEYSTONE_CONF auth methods "password,token,openid,application_credential"
+ iniset $KEYSTONE_CONF federation trusted_dashboard "https://$HOST_IP/auth/websso/"
+
+ cp $DEST/keystone/etc/sso_callback_template.html /etc/keystone/
+
+ if [[ "$WSGI_MODE" == "uwsgi" ]]; then
+ restart_service "devstack@keystone"
+ fi
+
+ if [[ "$OIDC_ISSUER_BASE" == "https://$HOST_IP:8443/realms/master" ]]; then
+ # Assuming we want to setup a local keycloak here.
+ sed -i "s#DEVSTACK_DEST#${DATA_DIR}#" ${OIDC_PLUGIN}/tools/oidc/docker-compose.yaml
+ sudo docker-compose --file ${OIDC_PLUGIN}/tools/oidc/docker-compose.yaml up -d
+
+ # wait for the server to be up
+ attempt_counter=0
+ max_attempts=100
+ until $(curl --output /dev/null --silent --fail $OIDC_METADATA_URL); do
+ if [ ${attempt_counter} -eq ${max_attempts} ];then
+ echo "Keycloak server failed to come up in time"
+ exit 1
+ fi
+
+ attempt_counter=$(($attempt_counter+1))
+ sleep 5
+ done
+
+ KEYCLOAK_URL="https://$HOST_IP:8443" \
+ KEYCLOAK_USERNAME="admin" \
+ KEYCLOAK_PASSWORD="nomoresecret" \
+ HOST_IP="$HOST_IP" \
+ python3 $OIDC_PLUGIN/tools/oidc/setup_keycloak_client.py
+ fi
+
+ local keystone_apache_conf=$(apache_site_config_for keystone-wsgi-public)
+ cat $OIDC_PLUGIN/files/oidc/apache_oidc.conf | sudo tee -a $keystone_apache_conf
+ sudo sed -i -e "
+ s|%OIDC_CLIENT_ID%|$OIDC_CLIENT_ID|g;
+ s|%OIDC_CLIENT_SECRET%|$OIDC_CLIENT_SECRET|g;
+ s|%OIDC_METADATA_URL%|$OIDC_METADATA_URL|g;
+ s|%OIDC_INTROSPECTION_URL%|$OIDC_INTROSPECTION_URL|g;
+ s|%HOST_IP%|$HOST_IP|g;
+ s|%IDP_ID%|$IDP_ID|g;
+ " $keystone_apache_conf
+
+ restart_apache_server
+}
+
+function register_federation {
+ local federated_domain=$(get_or_create_domain $DOMAIN_NAME)
+ local federated_project=$(get_or_create_project $PROJECT_NAME $DOMAIN_NAME)
+ local federated_users=$(get_or_create_group $GROUP_NAME $DOMAIN_NAME)
+ local member_role=$(get_or_create_role Member)
+
+ openstack role add --group $federated_users --domain $federated_domain $member_role
+ openstack role add --group $federated_users --project $federated_project $member_role
+
+ openstack identity provider create \
+ --remote-id $OIDC_ISSUER_BASE \
+ --domain $DOMAIN_NAME $IDP_ID
+}
+
+function configure_tests_settings {
+ # Here we set any settings that might be need by the fed_scenario set of tests
+ iniset $TEMPEST_CONFIG identity-feature-enabled federation True
+
+ # we probably need an oidc version of this flag based on local oidc
+ iniset $TEMPEST_CONFIG identity-feature-enabled external_idp True
+
+ # Identity provider settings
+ iniset $TEMPEST_CONFIG fed_scenario idp_id $IDP_ID
+ iniset $TEMPEST_CONFIG fed_scenario idp_remote_ids $OIDC_ISSUER_BASE
+ iniset $TEMPEST_CONFIG fed_scenario idp_username $IDP_USERNAME
+ iniset $TEMPEST_CONFIG fed_scenario idp_password $IDP_PASSWORD
+ iniset $TEMPEST_CONFIG fed_scenario idp_oidc_url $OIDC_ISSUER
+ iniset $TEMPEST_CONFIG fed_scenario idp_client_id $OIDC_CLIENT_ID
+ iniset $TEMPEST_CONFIG fed_scenario idp_client_secret $OIDC_CLIENT_SECRET
+
+ # Mapping rules settings
+ iniset $TEMPEST_CONFIG fed_scenario mapping_remote_type $MAPPING_REMOTE_TYPE
+ iniset $TEMPEST_CONFIG fed_scenario mapping_user_name $MAPPING_USER_NAME
+ iniset $TEMPEST_CONFIG fed_scenario mapping_group_name $GROUP_NAME
+ iniset $TEMPEST_CONFIG fed_scenario mapping_group_domain_name $DOMAIN_NAME
+ iniset $TEMPEST_CONFIG fed_scenario enable_k2k_groups_mapping False
+
+ # Protocol settings
+ iniset $TEMPEST_CONFIG fed_scenario protocol_id $PROTOCOL_ID
+}
+
+function uninstall_federation {
+ # Ensure Keycloak is stopped and the containers are cleaned up
+ sudo docker-compose --file ${OIDC_PLUGIN}/tools/oidc/docker-compose.yaml down
+ if is_ubuntu; then
+ sudo docker rmi $(sudo docker images -a -q)
+ uninstall_package docker-compose
+ elif is_fedora; then
+ sudo podman rmi $(sudo podman images -a -q)
+ uninstall_package podman
+ else
+ echo "Skipping uninstallation of OIDC federation for non ubuntu nor fedora nor suse host"
+ fi
+}
+
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 8f7a38535..eca1d1ac0 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -14,7 +14,13 @@
# under the License.
KEYSTONE_PLUGIN=$DEST/keystone/devstack
-source $KEYSTONE_PLUGIN/lib/federation.sh
+
+if is_service_enabled keystone-saml2-federation; then
+ source $KEYSTONE_PLUGIN/lib/federation.sh
+elif is_service_enabled keystone-oidc-federation; then
+ source $KEYSTONE_PLUGIN/lib/oidc.sh
+fi
+
source $KEYSTONE_PLUGIN/lib/scope.sh
# For more information on Devstack plugins, including a more detailed
@@ -25,6 +31,10 @@ if [[ "$1" == "stack" && "$2" == "install" ]]; then
# This phase is executed after the projects have been installed
echo "Keystone plugin - Install phase"
if is_service_enabled keystone-saml2-federation; then
+ echo "installing saml2 federation"
+ install_federation
+ elif is_service_enabled keystone-oidc-federation; then
+ echo "installing oidc federation"
install_federation
fi
@@ -33,6 +43,10 @@ elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
# before they are started
echo "Keystone plugin - Post-config phase"
if is_service_enabled keystone-saml2-federation; then
+ echo "configuring saml2 federation"
+ configure_federation
+ elif is_service_enabled keystone-oidc-federation; then
+ echo "configuring oidc federation"
configure_federation
fi
@@ -40,12 +54,21 @@ elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# This phase is executed after the projects have been started
echo "Keystone plugin - Extra phase"
if is_service_enabled keystone-saml2-federation; then
+ echo "registering saml2 federation"
+ register_federation
+ elif is_service_enabled keystone-oidc-federation; then
+ echo "registering oidc federation"
register_federation
fi
+
elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
# This phase is executed after Tempest was configured
echo "Keystone plugin - Test-config phase"
if is_service_enabled keystone-saml2-federation; then
+ echo "config tests settings for saml"
+ configure_tests_settings
+ elif is_service_enabled keystone-oidc-federation; then
+ echo "config tests settings for oidc"
configure_tests_settings
fi
if [[ "$(trueorfalse False KEYSTONE_ENFORCE_SCOPE)" == "True" ]] ; then
@@ -66,6 +89,10 @@ if [[ "$1" == "clean" ]]; then
# Called by clean.sh after the "unstack" phase
# Undo what was performed during the "install" phase
if is_service_enabled keystone-saml2-federation; then
+ echo "uninstalling saml"
+ uninstall_federation
+ elif is_service_enabled keystone-oidc-federation; then
+ echo "uninstalling oidc"
uninstall_federation
fi
fi
diff --git a/devstack/tools/oidc/__init__.py b/devstack/tools/oidc/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/devstack/tools/oidc/__init__.py
diff --git a/devstack/tools/oidc/docker-compose.yaml b/devstack/tools/oidc/docker-compose.yaml
new file mode 100644
index 000000000..6e4a428c9
--- /dev/null
+++ b/devstack/tools/oidc/docker-compose.yaml
@@ -0,0 +1,33 @@
+version: "3"
+
+services:
+ keycloak:
+ image: quay.io/keycloak/keycloak:latest
+ command: start-dev --log-level debug --log=console,file --https-certificate-file=/etc/certs/devstack-cert.pem --https-certificate-key-file=/etc/certs/devstack-cert.pem
+ container_name: oidc_keycloak_1
+ environment:
+ KEYCLOAK_ADMIN: admin
+ KEYCLOAK_ADMIN_PASSWORD: nomoresecret
+ KEYCLOAK_USER: admin
+ KEYCLOAK_PASSWORD: nomoresecret
+ KEYCLOAK_LOG_LEVEL: DEBUG
+ DB_VENDOR: mariadb
+ DB_DATABASE: keycloak
+ DB_USER: keycloak
+ DB_PASSWORD: "nomoresecret"
+ DB_ADDR: "keycloak-database"
+ DB_PORT: "3306"
+ JAVA_OPTS: "-server -Xms128m -Xmx1024m -XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=512m -Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=org.jboss.byteman -Djava.awt.headless=true"
+ ports:
+ - "8088:8080" # host:container
+ - "8443:8443"
+ volumes:
+ - DEVSTACK_DEST:/etc/certs:rw
+
+ keycloak-database:
+ image: quay.io/metal3-io/mariadb:latest
+ environment:
+ MYSQL_ROOT_PASSWORD: nomoresecret
+ MYSQL_DATABASE: keycloak
+ MYSQL_USER: keycloak
+ MYSQL_PASSWORD: nomoresecret
diff --git a/devstack/tools/oidc/setup_keycloak_client.py b/devstack/tools/oidc/setup_keycloak_client.py
new file mode 100644
index 000000000..15fa37b41
--- /dev/null
+++ b/devstack/tools/oidc/setup_keycloak_client.py
@@ -0,0 +1,61 @@
+import os
+import requests
+
+KEYCLOAK_USERNAME = os.environ.get('KEYCLOAK_USERNAME')
+KEYCLOAK_PASSWORD = os.environ.get('KEYCLOAK_PASSWORD')
+KEYCLOAK_URL = os.environ.get('KEYCLOAK_URL')
+HOST_IP = os.environ.get('HOST_IP', 'localhost')
+
+class KeycloakClient(object):
+ def __init__(self):
+ self.session = requests.session()
+
+ @staticmethod
+ def construct_url(realm, path):
+ return f'{KEYCLOAK_URL}/admin/realms/{realm}/{path}'
+
+ @staticmethod
+ def token_endpoint(realm):
+ return f'{KEYCLOAK_URL}/realms/{realm}/protocol/openid-connect/token'
+
+ def _admin_auth(self, realm):
+ params = {
+ 'grant_type': 'password',
+ 'client_id': 'admin-cli',
+ 'username': KEYCLOAK_USERNAME,
+ 'password': KEYCLOAK_PASSWORD,
+ 'scope': 'openid',
+ }
+ r = requests.post(self.token_endpoint(realm), data=params).json()
+ headers = {
+ 'Authorization': ("Bearer %s" % r['access_token']),
+ 'Content-Type': 'application/json'
+ }
+ self.session.headers.update(headers)
+ return r
+
+ def create_client(self, realm, client_id, client_secret, redirect_uris):
+ self._admin_auth(realm)
+ data = {
+ 'clientId': client_id,
+ 'secret': client_secret,
+ 'redirectUris': redirect_uris,
+ 'implicitFlowEnabled': True,
+ 'directAccessGrantsEnabled': True,
+ }
+ return self.session.post(self.construct_url(realm, 'clients'), json=data)
+
+
+def main():
+ c = KeycloakClient()
+
+ redirect_uris = [
+ f'http://{HOST_IP}/identity/v3/auth/OS-FEDERATION/identity_providers/sso/protocols/openid/websso',
+ f'http://{HOST_IP}/identity/v3/auth/OS-FEDERATION/websso/openid'
+ ]
+
+ c.create_client('master', 'devstack', 'nomoresecret', redirect_uris)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/doc/source/admin/auth-totp.rst b/doc/source/admin/auth-totp.rst
index 3c331be96..c77ca64a0 100644
--- a/doc/source/admin/auth-totp.rst
+++ b/doc/source/admin/auth-totp.rst
@@ -40,8 +40,8 @@ secret:
.. code-block:: python
import base64
- message = '1234567890123456'
- print base64.b32encode(message).rstrip('=')
+ message = b'1234567890123456'
+ print(base64.b32encode(message).rstrip(b'='))
Example output::
diff --git a/doc/source/admin/credential-encryption.rst b/doc/source/admin/credential-encryption.rst
index 7a721ff70..d54209be3 100644
--- a/doc/source/admin/credential-encryption.rst
+++ b/doc/source/admin/credential-encryption.rst
@@ -60,47 +60,6 @@ the response. Neither the cipher text, nor the hash of the key used to encrypt
the ``blob`` are exposed through the API. Furthermore, the key is only used
internally to keystone.
-Encrypting existing credentials
--------------------------------
-
-When upgrading a Mitaka deployment to Newton, three database migrations will
-ensure all credentials are encrypted. The process is as follows:
-
-1. An additive schema change is made to create the new ``encrypted_blob`` and
- ``key_hash`` columns in the existing ``credential`` table using
- ``keystone-manage db_sync --expand``.
-2. A data migration will loop through all existing credentials, encrypt each
- ``blob`` and store the result in the new ``encrypted_blob`` column. The hash
- of the key used is also written to the ``key_hash`` column for that specific
- credential. This step is done using ``keystone-manage db_sync --migrate``.
-3. A contractive schema will remove the ``blob`` column that held the plain
- text representations of the credential using ``keystone-manage db_sync
- --contract``. This should only be done after all nodes in the deployment are
- running Newton. If any Mitaka nodes are running after the database is
- contracted, they won't be able to read credentials since they are looking
- for the ``blob`` column that no longer exists.
-
-.. NOTE::
-
- You may also use ``keystone-manage db_sync --check`` in order to check the
- current status of your rolling upgrades.
-
-If performing a rolling upgrade, please note that a limited service outage will
-take affect during this migration. When the migration is in place, credentials
-will become read-only until the database is contracted. After the contract
-phase is complete, credentials will be writeable to the backend. A
-``[credential] key_repository`` location must be specified through
-configuration and bootstrapped with keys using ``keystone-manage
-credential_setup`` prior to migrating any existing credentials. If a new key
-repository isn't setup using ``keystone-manage credential_setup`` keystone will
-assume a null key to encrypt and decrypt credentials until a proper key
-repository is present. The null key is a key consisting of all null bytes and
-its only purpose is to ease the upgrade process from Mitaka to Newton. It is
-highly recommended that the null key isn't used. It is no more secure than
-storing credentials in plain text. If the null key is used, you should migrate
-to a proper key repository using ``keystone-manage credential_setup`` and
-``keystone-manage credential_migrate``.
-
Encryption key management
-------------------------
diff --git a/doc/source/admin/service-api-protection.rst b/doc/source/admin/service-api-protection.rst
index 47886aeb0..249944354 100644
--- a/doc/source/admin/service-api-protection.rst
+++ b/doc/source/admin/service-api-protection.rst
@@ -31,7 +31,7 @@ custom policies.
Roles Definitions
-----------------
-The default roles provided by keystone, via ``keystone-manage boostrap``, are
+The default roles provided by keystone, via ``keystone-manage bootstrap``, are
related through role implications. The ``admin`` role implies the ``member``
role, and the ``member`` role implies the ``reader`` role. These implications
mean users with the ``admin`` role automatically have the ``member`` and
diff --git a/doc/source/admin/upgrading.rst b/doc/source/admin/upgrading.rst
index 709d98dac..e20071436 100644
--- a/doc/source/admin/upgrading.rst
+++ b/doc/source/admin/upgrading.rst
@@ -155,7 +155,7 @@ downtime if it is required.
Upgrading without downtime
--------------------------
-.. NOTE:
+.. versionadded:: 10.0.0 (Newton)
Upgrading without downtime is only supported in deployments upgrading
*from* Newton or a newer release.
@@ -166,6 +166,12 @@ Upgrading without downtime
``keystone-manage db_sync``), as it runs legacy (downtime-incurring)
migrations prior to running schema expansions.
+.. versionchanged:: 21.0.0 (Yoga)
+
+ The migration tooling was changed from *SQLAlchemy-Migrate* to *Alembic*.
+ As part of this change, the data migration phase of the database upgrades
+ was dropped.
+
This is a high-level description of our upgrade strategy built around
additional options in ``keystone-manage db_sync``. Although it is much more
complex than the upgrade process described above, it assumes that you are not
@@ -187,11 +193,11 @@ authenticate requests normally.
#. Update your configuration files on the first node (``/etc/keystone/``) with
those corresponding to the latest release.
-#. (*New in Newton*) Run ``keystone-manage doctor`` on the first node to
+#. Run ``keystone-manage doctor`` on the first node to
diagnose symptoms of common deployment issues and receive instructions for
resolving them.
-#. (*New in Newton*) Run ``keystone-manage db_sync --expand`` on the first node
+#. Run ``keystone-manage db_sync --expand`` on the first node
to expand the database schema to a superset of what both the previous and
next release can utilize, and create triggers to facilitate the live
migration process.
@@ -210,14 +216,12 @@ authenticate requests normally.
triggers will live migrate the data to the new schema so it can be read by
the next release.
-#. (*New in Newton*) Run ``keystone-manage db_sync --migrate`` on the first
- node to forcefully perform data migrations. This process will migrate all
- data from the old schema to the new schema while the previous release
- continues to operate normally.
+ .. note::
- When this process completes, all data will be available in both the new
- schema and the old schema, so both the previous release and the next release
- will be capable of operating normally.
+ Prior to Yoga, data migrations were treated separatly and required the
+ use of the ``keystone-manage db_sync --migrate`` command after applying
+ the expand migrations. This is no longer necessary and the
+ ``keystone-manage db_sync --migrate`` command is now a no-op.
#. Update your configuration files (``/etc/keystone/``) on all nodes (except
the first node, which you've already done) with those corresponding to the
@@ -230,20 +234,27 @@ authenticate requests normally.
As the next release begins writing to the new schema, database triggers will
also migrate the data to the old schema, keeping both data schemas in sync.
-#. (*New in Newton*) Run ``keystone-manage db_sync --contract`` to remove the
- old schema and all data migration triggers.
+#. Run ``keystone-manage db_sync --contract`` to remove the old schema and all
+ data migration triggers.
When this process completes, the database will no longer be able to support
the previous release.
-Using db_sync check
-~~~~~~~~~~~~~~~~~~~
+Using ``db_sync check``
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 12.0.0 (Pike)
-(*New in Pike*) In order to check the current state of your rolling upgrades,
-you may run the command ``keystone-manage db_sync --check``. This will inform
-you of any outstanding actions you have left to take as well as any possible
-upgrades you can make from your current version. Here are a list of possible
-return codes.
+.. versionchanged:: 21.0.0 (Yoga)
+
+ Previously this command would return ``3`` if data migrations were
+ required. Data migrations are now part of the expand schema migrations,
+ therefore this step is no longer necessary.
+
+In order to check the current state of your rolling upgrades, you may run the
+command ``keystone-manage db_sync --check``. This will inform you of any
+outstanding actions you have left to take as well as any possible upgrades you
+can make from your current version. Here are a list of possible return codes.
* A return code of ``0`` means you are currently up to date with the latest
migration script version and all ``db_sync`` commands are complete.
@@ -256,8 +267,5 @@ return codes.
or the database is already under control. Your first step is to run
``keystone-manage db_sync --expand``.
-* A return code of ``3`` means that the expansion stage is complete, and the
- next step is to run ``keystone-manage db_sync --migrate``.
-
* A return code of ``4`` means that the expansion and data migration stages are
complete, and the next step is to run ``keystone-manage db_sync --contract``.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 41a245632..1eb530ae3 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -88,7 +88,7 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-copyright = u'2012, OpenStack Foundation'
+copyright = '2012, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -131,8 +131,8 @@ modindex_common_prefix = ['keystone.']
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
- ('cli/keystone-manage', 'keystone-manage', u'Keystone Management Utility',
- [u'OpenStack'], 1)
+ ('cli/keystone-manage', 'keystone-manage', 'Keystone Management Utility',
+ ['OpenStack'], 1)
]
@@ -232,8 +232,8 @@ latex_elements = {
# NOTE(gyee): Specify toctree_only=True for a better document structure of
# the generated PDF file.
latex_documents = [
- ('index', 'doc-keystone.tex', u'Keystone Documentation',
- u'OpenStack', 'manual', True)
+ ('index', 'doc-keystone.tex', 'Keystone Documentation',
+ 'OpenStack', 'manual', True)
]
# The name of an image file (relative to this directory) to place at the top of
@@ -263,8 +263,8 @@ latex_documents = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'keystone', u'Keystone Documentation',
- u'OpenStack', 'keystone', 'One line description of project.',
+ ('index', 'keystone', 'Keystone Documentation',
+ 'OpenStack', 'keystone', 'One line description of project.',
'Miscellaneous'),
]
diff --git a/doc/source/contributor/database-migrations.rst b/doc/source/contributor/database-migrations.rst
index 3827ea8e6..59afb2d0c 100644
--- a/doc/source/contributor/database-migrations.rst
+++ b/doc/source/contributor/database-migrations.rst
@@ -17,52 +17,45 @@
Database Migrations
===================
-.. note::
+.. versionchanged:: 21.0.0 (Yoga)
- The framework being used is currently being migrated from
- SQLAlchemy-Migrate to Alembic, meaning this information will change in the
- near-term.
+ The database migration framework was changed from SQLAlchemy-Migrate to
+ Alembic in the Yoga release. Previously there were three SQLAlchemy-Migrate
+ repos, corresponding to different type of migration operation: the *expand*
+ repo, the *data migration* repo, and the *contract* repo. There are now
+ only two Alembic branches, the *expand* branch and the *contract* branch,
+ and data migration operations have been folded into the former
Starting with Newton, keystone supports upgrading both with and without
-downtime. In order to support this, there are three separate migration
-repositories (all under ``keystone/common/sql/legacy_migrations``) that match
-the three phases of an upgrade (schema expansion, data migration, and schema
-contraction):
+downtime. In order to support this, there are two separate branches (all under
+``keystone/common/sql/migrations``): the *expand* and the *contract* branch.
-``expand_repo``
+*expand*
For additive schema modifications and triggers to ensure data is kept in
sync between the old and new schema until the point when there are no
keystone instances running old code.
-``data_migration_repo``
- To ensure new tables/columns are fully populated with data from the old
- schema.
+ May also contain data migrations to ensure new tables/columns are fully
+ populated with data from the old schema.
-``contract_repo``
+*contract*
Run after all old code versions have been upgraded to running the new code,
so remove any old schema columns/tables that are not used by the new
version of the code. Drop any triggers added in the expand phase.
-All migrations are required to have a migration script in each of these repos,
-each with the same version number (which is indicated by the first three digits
-of the name of the script, e.g. ``003_add_X_table.py``). If there is no work to
-do in a specific phase, then include a no-op migration to simply ``pass`` (in
-fact the ``001`` migration in each of these repositories is a no-op migration,
-so that can be used as a template).
+A migration script must belong to one branch. If a migration has both additive
+and destruction operations, it must be split into two migrations scripts, one
+in each branch.
In order to support rolling upgrades, where two releases of keystone briefly
operate side-by-side using the same database without downtime, each phase of
the migration must adhere to following constraints:
-These triggers should be removed in the contract phase. There are further
-restrictions as to what can and cannot be included in migration scripts in each
-phase:
-
Expand phase:
- Only additive schema changes are allowed, such as new columns, tables,
- indices, and triggers.
+ Only additive schema changes, such as new columns, tables, indices, and
+ triggers, and data insertion are allowed.
- Data insertion, modification, and removal is not allowed.
+ Data modification or removal is not allowed.
Triggers must be created to keep data in sync between the previous release
and the next release. Data written by the previous release must be readable
@@ -72,20 +65,14 @@ Expand phase:
In cases it is not possible for triggers to maintain data integrity across
multiple schemas, writing data should be forbidden using triggers.
-Data Migration phase:
- Data is allowed to be inserted, updated, and deleted.
-
- No schema changes are allowed.
-
Contract phase:
- Only destructive schema changes are allowed, such as dropping or altering
- columns, tables, indices, and triggers.
-
- Data insertion, modification, and removal is not allowed.
+ Only destructive schema changes, such as dropping or altering
+ columns, tables, indices, and triggers, or data modification or removal are
+ allowed.
Triggers created during the expand phase must be dropped.
For more information on writing individual migration scripts refer to
-`SQLAlchemy-migrate`_.
+`Alembic`_.
-.. _SQLAlchemy-migrate: https://opendev.org/openstack/sqlalchemy-migrate
+.. _Alembic: https://alembic.sqlalchemy.org/
diff --git a/doc/source/contributor/programming-exercises.rst b/doc/source/contributor/programming-exercises.rst
index b51725d08..77a91bc74 100644
--- a/doc/source/contributor/programming-exercises.rst
+++ b/doc/source/contributor/programming-exercises.rst
@@ -53,9 +53,7 @@ Refer to the :doc:`API Change tutorial <api_change_tutorial>`. In short, you wil
steps:
#. Create a SQL migration to add the parameter to the database table
- (:py:mod:`keystone.common.sql.legacy_migration.expand_repo.versions`,
- :py:mod:`keystone.common.sql.legacy_migration.data_migration_repo.versions`,
- :py:mod:`keystone.common.sql.legacy_migration.contract_repo.versions`)
+ (:py:mod:`keystone.common.sql.migrations.versions`)
#. Add a SQL migration unit test (`keystone/tests/unit/test_sql_upgrade.py`)
diff --git a/doc/source/contributor/services.rst b/doc/source/contributor/services.rst
index bdca28b15..c1c397e30 100644
--- a/doc/source/contributor/services.rst
+++ b/doc/source/contributor/services.rst
@@ -99,7 +99,7 @@ The "default" domain
The v2.0 API has been removed as of the Queens release. While this section
references the v2.0 API, it is purely for historical reasons that clarify
- the existance of the *default* domain.
+ the existence of the *default* domain.
Domains were introduced as a v3-only feature. As a result, the v2.0 API didn't
understand the concept of domains. To allow for both versions of the Identity
diff --git a/doc/source/contributor/testing-keystone.rst b/doc/source/contributor/testing-keystone.rst
index 72575fbcb..721562a3a 100644
--- a/doc/source/contributor/testing-keystone.rst
+++ b/doc/source/contributor/testing-keystone.rst
@@ -138,32 +138,9 @@ Identity module.
Testing Schema Migrations
-------------------------
-.. note::
-
- The framework being used is currently being migrated from
- SQLAlchemy-Migrate to Alembic, meaning this information will change in the
- near-term.
-
-The application of schema migrations can be tested using SQLAlchemy Migrate's
-built-in test runner, one migration at a time.
-
-.. WARNING::
-
- This may leave your database in an inconsistent state; attempt this in
- non-production environments only!
-
-This is useful for testing the *next* migration in sequence in a database under
-version control:
-
-.. code-block:: bash
-
- $ python keystone/common/sql/legacy_migrations/expand_repo/manage.py test \
- --url=sqlite:///test.db \
- --repository=keystone/common/sql/legacy_migrations/expand_repo/
-
-This command references to a SQLite database (test.db) to be used. Depending on
-the migration, this command alone does not make assertions as to the integrity
-of your data during migration.
+Tests for database migrations can be found in
+``keystone/tests/unit/test_sql_upgrade.py`` and
+``keystone/tests/unit/test_sql_banned_operations.py``.
LDAP Tests
----------
diff --git a/doc/source/install/index-obs.rst b/doc/source/install/index-obs.rst
index c67974d74..46129285a 100644
--- a/doc/source/install/index-obs.rst
+++ b/doc/source/install/index-obs.rst
@@ -12,14 +12,6 @@ both SP1 and SP2 - through the Open Build Service Cloud repository.
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/install/index-rdo.rst b/doc/source/install/index-rdo.rst
index 6e0e3984f..dc48e890f 100644
--- a/doc/source/install/index-rdo.rst
+++ b/doc/source/install/index-rdo.rst
@@ -12,14 +12,6 @@ the RDO repository.
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/install/index-ubuntu.rst b/doc/source/install/index-ubuntu.rst
index b3e5cb064..d1c7fe138 100644
--- a/doc/source/install/index-ubuntu.rst
+++ b/doc/source/install/index-ubuntu.rst
@@ -12,14 +12,6 @@ Ubuntu 16.04 (LTS).
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/user/application_credentials.rst b/doc/source/user/application_credentials.rst
index eff86f7b3..5455a04e7 100644
--- a/doc/source/user/application_credentials.rst
+++ b/doc/source/user/application_credentials.rst
@@ -174,8 +174,47 @@ Access Rules
============
In addition to delegating a subset of roles to an application credential, you
-may also delegate more fine-grained access control by using access rules. For
-example, to create an application credential that is constricted to creating
+may also delegate more fine-grained access control by using access rules.
+
+.. note::
+
+ Application credentials with access rules require additional configuration
+ of each service that will use it. See below for details.
+
+If application credentials with access rules are required, an OpenStack
+service using keystonemiddleware to authenticate with keystone, needs to
+define ``service_type`` in its configuration file. Following is an example for the
+cinder V3 service:
+
+.. code-block:: ini
+
+ [keystone_authtoken]
+ service_type = volumev3
+
+For other OpenStack sevices, their types can be obtained using the OpenStack
+client. For example:
+
+.. code-block:: console
+
+ $ openstack service list -c Name -c Type
+ +-----------+-----------+
+ | Name | Type |
+ +-----------+-----------+
+ | glance | image |
+ | cinderv3 | volumev3 |
+ | cinderv2 | volumev2 |
+ | keystone | identity |
+ | nova | compute |
+ | neutron | network |
+ | placement | placement |
+ +-----------+-----------+
+
+.. note::
+
+ Updates to the configuration files of a service require restart of the appropriate
+ services for the changes to take effect.
+
+In order to create an example application credential that is constricted to creating
servers in nova, the user can add the following access rules:
.. code-block:: console
diff --git a/keystone/api/__init__.py b/keystone/api/__init__.py
index c3c5628a3..9c0e01050 100644
--- a/keystone/api/__init__.py
+++ b/keystone/api/__init__.py
@@ -22,6 +22,7 @@ from keystone.api import os_ep_filter
from keystone.api import os_federation
from keystone.api import os_inherit
from keystone.api import os_oauth1
+from keystone.api import os_oauth2
from keystone.api import os_revoke
from keystone.api import os_simple_cert
from keystone.api import policy
@@ -50,6 +51,7 @@ __all__ = (
'os_federation',
'os_inherit',
'os_oauth1',
+ 'os_oauth2',
'os_revoke',
'os_simple_cert',
'policy',
@@ -79,6 +81,7 @@ __apis__ = (
os_federation,
os_inherit,
os_oauth1,
+ os_oauth2,
os_revoke,
os_simple_cert,
policy,
diff --git a/keystone/api/_shared/json_home_relations.py b/keystone/api/_shared/json_home_relations.py
index d37ec27fb..997fcca52 100644
--- a/keystone/api/_shared/json_home_relations.py
+++ b/keystone/api/_shared/json_home_relations.py
@@ -45,6 +45,14 @@ os_oauth1_parameter_rel_func = functools.partial(
json_home.build_v3_extension_parameter_relation,
extension_name='OS-OAUTH1', extension_version='1.0')
+# OS-OAUTH2 "extension"
+os_oauth2_resource_rel_func = functools.partial(
+ json_home.build_v3_extension_resource_relation,
+ extension_name='OS-OAUTH2', extension_version='1.0')
+os_oauth2_parameter_rel_func = functools.partial(
+ json_home.build_v3_extension_parameter_relation,
+ extension_name='OS-OAUTH2', extension_version='1.0')
+
# OS-REVOKE "extension"
os_revoke_resource_rel_func = functools.partial(
json_home.build_v3_extension_resource_relation,
diff --git a/keystone/api/ec2tokens.py b/keystone/api/ec2tokens.py
index 12096db9e..d21673a03 100644
--- a/keystone/api/ec2tokens.py
+++ b/keystone/api/ec2tokens.py
@@ -12,6 +12,8 @@
# This file handles all flask-restful resources for /v3/ec2tokens
+import urllib.parse
+
import flask
import http.client
from keystoneclient.contrib.ec2 import utils as ec2_utils
@@ -42,8 +44,8 @@ class EC2TokensResource(EC2_S3_Resource.ResourceBase):
# NOTE(vish): Some client libraries don't use the port when
# signing requests, so try again without the port.
elif ':' in credentials['host']:
- hostname, _port = credentials.split(':')
- credentials['host'] = hostname
+ parsed = urllib.parse.urlsplit('//' + credentials['host'])
+ credentials['host'] = parsed.hostname
# NOTE(davechen): we need to reinitialize 'signer' to avoid
# contaminated status of signature, this is similar with
# other programming language libraries, JAVA for example.
diff --git a/keystone/api/os_ep_filter.py b/keystone/api/os_ep_filter.py
index c26098347..055d21deb 100644
--- a/keystone/api/os_ep_filter.py
+++ b/keystone/api/os_ep_filter.py
@@ -110,7 +110,7 @@ class EndpointGroupsResource(ks_flask.ResourceBase):
class EPFilterEndpointProjectsResource(flask_restful.Resource):
def get(self, endpoint_id):
- """"Return a list of projects associated with the endpoint."""
+ """Return a list of projects associated with the endpoint."""
ENFORCER.enforce_call(action='identity:list_projects_for_endpoint')
PROVIDERS.catalog_api.get_endpoint(endpoint_id)
refs = PROVIDERS.catalog_api.list_projects_for_endpoint(endpoint_id)
diff --git a/keystone/api/os_oauth2.py b/keystone/api/os_oauth2.py
new file mode 100644
index 000000000..81f3dbd3d
--- /dev/null
+++ b/keystone/api/os_oauth2.py
@@ -0,0 +1,390 @@
+# Copyright 2022 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import flask
+from flask import make_response
+import http.client
+from oslo_log import log
+from oslo_serialization import jsonutils
+
+from keystone.api._shared import authentication
+from keystone.api._shared import json_home_relations
+from keystone.common import provider_api
+from keystone.common import utils
+from keystone.conf import CONF
+from keystone import exception
+from keystone.federation import utils as federation_utils
+from keystone.i18n import _
+from keystone.server import flask as ks_flask
+
+LOG = log.getLogger(__name__)
+
+PROVIDERS = provider_api.ProviderAPIs
+
+_build_resource_relation = json_home_relations.os_oauth2_resource_rel_func
+
+
+class AccessTokenResource(ks_flask.ResourceBase):
+
+ def _method_not_allowed(self):
+ """Raise a method not allowed error."""
+ raise exception.OAuth2OtherError(
+ int(http.client.METHOD_NOT_ALLOWED),
+ http.client.responses[http.client.METHOD_NOT_ALLOWED],
+ _('The method is not allowed for the requested URL.'))
+
+ @ks_flask.unenforced_api
+ def get(self):
+ """The method is not allowed."""
+ self._method_not_allowed()
+
+ @ks_flask.unenforced_api
+ def head(self):
+ """The method is not allowed."""
+ self._method_not_allowed()
+
+ @ks_flask.unenforced_api
+ def put(self):
+ """The method is not allowed."""
+ self._method_not_allowed()
+
+ @ks_flask.unenforced_api
+ def patch(self):
+ """The method is not allowed."""
+ self._method_not_allowed()
+
+ @ks_flask.unenforced_api
+ def delete(self):
+ """The method is not allowed."""
+ self._method_not_allowed()
+
+ @ks_flask.unenforced_api
+ def post(self):
+ """Get an OAuth2.0 Access Token.
+
+ POST /v3/OS-OAUTH2/token
+ """
+ grant_type = flask.request.form.get('grant_type')
+ if grant_type is None:
+ error = exception.OAuth2InvalidRequest(
+ int(http.client.BAD_REQUEST),
+ http.client.responses[http.client.BAD_REQUEST],
+ _('The parameter grant_type is required.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ f'{error.message_format}')
+ raise error
+ if grant_type != 'client_credentials':
+ error = exception.OAuth2UnsupportedGrantType(
+ int(http.client.BAD_REQUEST),
+ http.client.responses[http.client.BAD_REQUEST],
+ _('The parameter grant_type %s is not supported.'
+ ) % grant_type)
+ LOG.info('Get OAuth2.0 Access Token API: '
+ f'{error.message_format}')
+ raise error
+
+ auth_method = ''
+ client_id = flask.request.form.get('client_id')
+ client_secret = flask.request.form.get('client_secret')
+ client_cert = flask.request.environ.get("SSL_CLIENT_CERT")
+ client_auth = flask.request.authorization
+ if not client_cert and client_auth and client_auth.type == 'basic':
+ client_id = client_auth.username
+ client_secret = client_auth.password
+
+ if not client_id:
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'failed to get a client_id from the request.')
+ raise error
+ if client_cert:
+ auth_method = 'tls_client_auth'
+ elif client_secret:
+ auth_method = 'client_secret_basic'
+
+ if auth_method in CONF.oauth2.oauth2_authn_methods:
+ if auth_method == 'tls_client_auth':
+ return self._tls_client_auth(client_id, client_cert)
+ if auth_method == 'client_secret_basic':
+ return self._client_secret_basic(client_id, client_secret)
+
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'failed to get client credentials from the request.')
+ raise error
+
+ def _client_secret_basic(self, client_id, client_secret):
+ """Get an OAuth2.0 basic Access Token."""
+ auth_data = {
+ 'identity': {
+ 'methods': ['application_credential'],
+ 'application_credential': {
+ 'id': client_id,
+ 'secret': client_secret
+ }
+ }
+ }
+ try:
+ token = authentication.authenticate_for_token(auth_data)
+ except exception.Error as error:
+ if error.code == 401:
+ error = exception.OAuth2InvalidClient(
+ error.code, error.title,
+ str(error))
+ elif error.code == 400:
+ error = exception.OAuth2InvalidRequest(
+ error.code, error.title,
+ str(error))
+ else:
+ error = exception.OAuth2OtherError(
+ error.code, error.title,
+ 'An unknown error occurred and failed to get an OAuth2.0 '
+ 'access token.')
+ LOG.exception(error)
+ raise error
+ except Exception as error:
+ error = exception.OAuth2OtherError(
+ int(http.client.INTERNAL_SERVER_ERROR),
+ http.client.responses[http.client.INTERNAL_SERVER_ERROR],
+ str(error))
+ LOG.exception(error)
+ raise error
+
+ resp = make_response({
+ 'access_token': token.id,
+ 'token_type': 'Bearer',
+ 'expires_in': CONF.token.expiration
+ })
+ resp.status = '200 OK'
+ return resp
+
+ def _check_mapped_properties(self, cert_dn, user, user_domain):
+ mapping_id = CONF.oauth2.get('oauth2_cert_dn_mapping_id')
+ try:
+ mapping = PROVIDERS.federation_api.get_mapping(mapping_id)
+ except exception.MappingNotFound:
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'mapping id %s is not found. ',
+ mapping_id)
+ raise error
+
+ rule_processor = federation_utils.RuleProcessor(
+ mapping.get('id'), mapping.get('rules'))
+ try:
+ mapped_properties = rule_processor.process(cert_dn)
+ except exception.Error as error:
+ LOG.exception(error)
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed. '
+ 'mapping_id: %s, rules: %s, data: %s.',
+ mapping_id, mapping.get('rules'),
+ jsonutils.dumps(cert_dn))
+ raise error
+ except Exception as error:
+ LOG.exception(error)
+ error = exception.OAuth2OtherError(
+ int(http.client.INTERNAL_SERVER_ERROR),
+ http.client.responses[http.client.INTERNAL_SERVER_ERROR],
+ str(error))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed. '
+ 'mapping_id: %s, rules: %s, data: %s.',
+ mapping_id, mapping.get('rules'),
+ jsonutils.dumps(cert_dn))
+ raise error
+
+ mapping_user = mapped_properties.get('user', {})
+ mapping_user_name = mapping_user.get('name')
+ mapping_user_id = mapping_user.get('id')
+ mapping_user_email = mapping_user.get('email')
+ mapping_domain = mapping_user.get('domain', {})
+ mapping_user_domain_id = mapping_domain.get('id')
+ mapping_user_domain_name = mapping_domain.get('name')
+ if mapping_user_name and mapping_user_name != user.get('name'):
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.',
+ 'user name', mapping_user_name, user.get('name'))
+ raise error
+ if mapping_user_id and mapping_user_id != user.get('id'):
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.',
+ 'user id', mapping_user_id, user.get('id'))
+ raise error
+ if mapping_user_email and mapping_user_email != user.get('email'):
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.',
+ 'user email', mapping_user_email, user.get('email'))
+ raise error
+ if (mapping_user_domain_id and
+ mapping_user_domain_id != user_domain.get('id')):
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.',
+ 'user domain id', mapping_user_domain_id,
+ user_domain.get('id'))
+ raise error
+ if (mapping_user_domain_name and
+ mapping_user_domain_name != user_domain.get('name')):
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.',
+ 'user domain name', mapping_user_domain_name,
+ user_domain.get('name'))
+ raise error
+
+ def _tls_client_auth(self, client_id, client_cert):
+ """Get an OAuth2.0 certificate-bound Access Token."""
+ try:
+ cert_subject_dn = utils.get_certificate_subject_dn(client_cert)
+ except exception.ValidationError:
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'failed to get the subject DN from the certificate.')
+ raise error
+ try:
+ cert_issuer_dn = utils.get_certificate_issuer_dn(client_cert)
+ except exception.ValidationError:
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'failed to get the issuer DN from the certificate.')
+ raise error
+ client_cert_dn = {}
+ for key in cert_subject_dn:
+ client_cert_dn['SSL_CLIENT_SUBJECT_DN_%s' %
+ key.upper()] = cert_subject_dn.get(key)
+ for key in cert_issuer_dn:
+ client_cert_dn['SSL_CLIENT_ISSUER_DN_%s' %
+ key.upper()] = cert_issuer_dn.get(key)
+
+ try:
+ user = PROVIDERS.identity_api.get_user(client_id)
+ except exception.UserNotFound:
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'the user does not exist. user id: %s.',
+ client_id)
+ raise error
+ project_id = user.get('default_project_id')
+ if not project_id:
+ error = exception.OAuth2InvalidClient(
+ int(http.client.UNAUTHORIZED),
+ http.client.responses[http.client.UNAUTHORIZED],
+ _('Client authentication failed.'))
+ LOG.info('Get OAuth2.0 Access Token API: '
+ 'the user does not have default project. user id: %s.',
+ client_id)
+ raise error
+
+ user_domain = PROVIDERS.resource_api.get_domain(
+ user.get('domain_id'))
+ self._check_mapped_properties(client_cert_dn, user, user_domain)
+ thumbprint = utils.get_certificate_thumbprint(client_cert)
+ LOG.debug(f'The mTLS certificate thumbprint: {thumbprint}')
+ try:
+ token = PROVIDERS.token_provider_api.issue_token(
+ user_id=client_id,
+ method_names=['oauth2_credential'],
+ project_id=project_id,
+ thumbprint=thumbprint
+ )
+ except exception.Error as error:
+ if error.code == 401:
+ error = exception.OAuth2InvalidClient(
+ error.code, error.title,
+ str(error))
+ elif error.code == 400:
+ error = exception.OAuth2InvalidRequest(
+ error.code, error.title,
+ str(error))
+ else:
+ error = exception.OAuth2OtherError(
+ error.code, error.title,
+ 'An unknown error occurred and failed to get an OAuth2.0 '
+ 'access token.')
+ LOG.exception(error)
+ raise error
+ except Exception as error:
+ error = exception.OAuth2OtherError(
+ int(http.client.INTERNAL_SERVER_ERROR),
+ http.client.responses[http.client.INTERNAL_SERVER_ERROR],
+ str(error))
+ LOG.exception(error)
+ raise error
+
+ resp = make_response({
+ 'access_token': token.id,
+ 'token_type': 'Bearer',
+ 'expires_in': CONF.token.expiration
+ })
+ resp.status = '200 OK'
+ return resp
+
+
+class OSAuth2API(ks_flask.APIBase):
+ _name = 'OS-OAUTH2'
+ _import_name = __name__
+ _api_url_prefix = '/OS-OAUTH2'
+
+ resource_mapping = [
+ ks_flask.construct_resource_map(
+ resource=AccessTokenResource,
+ url='/token',
+ rel='token',
+ resource_kwargs={},
+ resource_relation_func=_build_resource_relation
+ )]
+
+
+APIs = (OSAuth2API,)
diff --git a/keystone/cmd/cli.py b/keystone/cmd/cli.py
index 1e866d76a..ad65b2622 100644
--- a/keystone/cmd/cli.py
+++ b/keystone/cmd/cli.py
@@ -281,61 +281,53 @@ class DbSync(BaseApp):
except db_exception.DBMigrationError:
LOG.info(
'Your database is not currently under version '
- 'control or the database is already controlled. Your '
- 'first step is to run `keystone-manage db_sync --expand`.'
+ 'control or the database is already controlled. '
+ 'Your first step is to run `keystone-manage db_sync --expand`.'
)
return 2
- try:
- migrate_version = upgrades.get_db_version(
- branch='data_migration')
- except db_exception.DBMigrationError:
- migrate_version = 0
+ if isinstance(expand_version, int):
+ # we're still using sqlalchemy-migrate
+ LOG.info(
+ 'Your database is currently using legacy version control. '
+ 'Your first step is to run `keystone-manage db_sync --expand`.'
+ )
+ return 2
try:
contract_version = upgrades.get_db_version(branch='contract')
except db_exception.DBMigrationError:
- contract_version = 0
+ contract_version = None
- migration_script_version = upgrades.LATEST_VERSION
+ heads = upgrades.get_current_heads()
if (
- contract_version > migrate_version or
- migrate_version > expand_version
+ upgrades.EXPAND_BRANCH not in heads or
+ heads[upgrades.EXPAND_BRANCH] != expand_version
):
- LOG.info('Your database is out of sync. For more information '
- 'refer to https://docs.openstack.org/keystone/'
- 'latest/admin/identity-upgrading.html')
- status = 1
- elif migration_script_version > expand_version:
LOG.info('Your database is not up to date. Your first step is '
'to run `keystone-manage db_sync --expand`.')
status = 2
- elif expand_version > migrate_version:
- LOG.info('Expand version is ahead of migrate. Your next step '
- 'is to run `keystone-manage db_sync --migrate`.')
- status = 3
- elif migrate_version > contract_version:
- LOG.info('Migrate version is ahead of contract. Your next '
- 'step is to run `keystone-manage db_sync --contract`.')
- status = 4
elif (
- migration_script_version == expand_version == migrate_version ==
- contract_version
+ upgrades.CONTRACT_BRANCH not in heads or
+ heads[upgrades.CONTRACT_BRANCH] != contract_version
):
+ LOG.info('Expand version is ahead of contract. Your next '
+ 'step is to run `keystone-manage db_sync --contract`.')
+ status = 4
+ else:
LOG.info('All db_sync commands are upgraded to the same '
'version and up-to-date.')
+
LOG.info(
- 'The latest installed migration script version is: %(script)d.\n'
'Current repository versions:\n'
- 'Expand: %(expand)d\n'
- 'Migrate: %(migrate)d\n'
- 'Contract: %(contract)d',
+ 'Expand: %(expand)s (head: %(expand_head)s)\n'
+ 'Contract: %(contract)s (head: %(contract_head)s)',
{
- 'script': migration_script_version,
'expand': expand_version,
- 'migrate': migrate_version,
+ 'expand_head': heads.get(upgrades.EXPAND_BRANCH),
'contract': contract_version,
+ 'contract_head': heads.get(upgrades.CONTRACT_BRANCH),
},
)
return status
diff --git a/keystone/cmd/doctor/database.py b/keystone/cmd/doctor/database.py
index e0def5d63..95c5bdd87 100644
--- a/keystone/cmd/doctor/database.py
+++ b/keystone/cmd/doctor/database.py
@@ -23,7 +23,7 @@ def symptom_database_connection_is_not_SQLite():
migrations, making it unsuitable for use in keystone. Please change your
`keystone.conf [database] connection` value to point to a supported
database driver, such as MySQL.
- """
+ """ # noqa: D403
return (
CONF.database.connection is not None
and 'sqlite' in CONF.database.connection)
diff --git a/keystone/common/password_hashing.py b/keystone/common/password_hashing.py
index 4e62d9c38..b38d3cba7 100644
--- a/keystone/common/password_hashing.py
+++ b/keystone/common/password_hashing.py
@@ -57,8 +57,26 @@ def _get_hasher_from_ident(hashed):
def verify_length_and_trunc_password(password):
- """Verify and truncate the provided password to the max_password_length."""
- max_length = CONF.identity.max_password_length
+ """Verify and truncate the provided password to the max_password_length.
+
+ We also need to check that the configured password hashing algorithm does
+ not silently truncate the password. For example, passlib.hash.bcrypt does
+ this:
+ https://passlib.readthedocs.io/en/stable/lib/passlib.hash.bcrypt.html#security-issues
+
+ """
+ # When using bcrypt, we limit the password length to 54 to ensure all
+ # bytes are fully mixed. See:
+ # https://passlib.readthedocs.io/en/stable/lib/passlib.hash.bcrypt.html#security-issues
+ BCRYPT_MAX_LENGTH = 54
+ if (CONF.identity.password_hash_algorithm == 'bcrypt' and # nosec: B105
+ CONF.identity.max_password_length > BCRYPT_MAX_LENGTH):
+ msg = "Truncating password to algorithm specific maximum length %d characters."
+ LOG.warning(msg, BCRYPT_MAX_LENGTH)
+ max_length = BCRYPT_MAX_LENGTH
+ else:
+ max_length = CONF.identity.max_password_length
+
try:
if len(password) > max_length:
if CONF.strict_password_check:
diff --git a/keystone/common/render_token.py b/keystone/common/render_token.py
index 320260b1f..4a84f5c0c 100644
--- a/keystone/common/render_token.py
+++ b/keystone/common/render_token.py
@@ -142,5 +142,9 @@ def render_token_response_from_model(token, include_catalog=True):
token_reference['token'][key]['access_rules'] = (
token.application_credential['access_rules']
)
+ if token.oauth2_thumbprint:
+ token_reference['token']['oauth2_credential'] = {
+ 'x5t#S256': token.oauth2_thumbprint
+ }
return token_reference
diff --git a/keystone/common/sql/migrations/env.py b/keystone/common/sql/migrations/env.py
index 2d116f1bd..f5547a4e4 100644
--- a/keystone/common/sql/migrations/env.py
+++ b/keystone/common/sql/migrations/env.py
@@ -59,15 +59,24 @@ def run_migrations_online():
In this scenario we need to create an Engine and associate a connection
with the context.
"""
- connectable = engine_from_config(
- config.get_section(config.config_ini_section),
- prefix="sqlalchemy.",
- poolclass=pool.NullPool,
- )
+ connectable = config.attributes.get('connection', None)
+
+ if connectable is None:
+ # only create Engine if we don't have a Connection from the outside
+ connectable = engine_from_config(
+ config.get_section(config.config_ini_section),
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ # when connectable is already a Connection object, calling connect() gives
+ # us a *branched connection*.
with connectable.connect() as connection:
context.configure(
- connection=connection, target_metadata=target_metadata
+ connection=connection,
+ target_metadata=target_metadata,
+ render_as_batch=True,
)
with context.begin_transaction():
diff --git a/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py b/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
index c57cdf13d..0f4994903 100644
--- a/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
+++ b/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Initial version
+"""Initial version.
Revision ID: 27e647c0fad4
Revises:
@@ -1100,7 +1100,7 @@ def upgrade():
bind = op.get_bind()
meta = sql.MetaData()
- project = sql.Table('project', meta, autoload_with=bind.engine)
+ project = sql.Table('project', meta, autoload_with=bind)
root_domain_project = _generate_root_domain_project()
op.execute(project.insert().values(**root_domain_project))
diff --git a/keystone/common/sql/upgrades.py b/keystone/common/sql/upgrades.py
index f463771f2..a075716e9 100644
--- a/keystone/common/sql/upgrades.py
+++ b/keystone/common/sql/upgrades.py
@@ -16,24 +16,47 @@
import os
+from alembic import command as alembic_api
+from alembic import config as alembic_config
+from alembic import migration as alembic_migration
+from alembic import script as alembic_script
from migrate import exceptions as migrate_exceptions
from migrate.versioning import api as migrate_api
from migrate.versioning import repository as migrate_repository
from oslo_db import exception as db_exception
-import sqlalchemy as sa
+from oslo_log import log as logging
from keystone.common import sql
-from keystone import exception
-from keystone.i18n import _
+import keystone.conf
+
+CONF = keystone.conf.CONF
+LOG = logging.getLogger(__name__)
+
+ALEMBIC_INIT_VERSION = '27e647c0fad4'
+MIGRATE_INIT_VERSION = 72
-INITIAL_VERSION = 72
-LATEST_VERSION = 79
EXPAND_BRANCH = 'expand'
DATA_MIGRATION_BRANCH = 'data_migration'
CONTRACT_BRANCH = 'contract'
+RELEASES = (
+ 'yoga',
+)
+MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH)
+VERSIONS_PATH = os.path.join(
+ os.path.dirname(sql.__file__),
+ 'migrations',
+ 'versions',
+)
+
-def _get_migrate_repo_path(branch):
+def _find_migrate_repo(branch):
+ """Get the project's change script repository.
+
+ :param branch: Name of the repository "branch" to be used; this will be
+ transformed to repository path.
+ :returns: An instance of ``migrate.versioning.repository.Repository``
+ """
abs_path = os.path.abspath(
os.path.join(
os.path.dirname(sql.__file__),
@@ -41,203 +64,273 @@ def _get_migrate_repo_path(branch):
f'{branch}_repo',
)
)
+ if not os.path.exists(abs_path):
+ raise db_exception.DBMigrationError("Path %s not found" % abs_path)
+ return migrate_repository.Repository(abs_path)
- if not os.path.isdir(abs_path):
- raise exception.MigrationNotProvided(sql.__name__, abs_path)
- return abs_path
+def _find_alembic_conf():
+ """Get the project's alembic configuration.
+ :returns: An instance of ``alembic.config.Config``
+ """
+ path = os.path.join(
+ os.path.abspath(os.path.dirname(__file__)), 'alembic.ini',
+ )
-def _find_migrate_repo(abs_path):
- """Get the project's change script repository
+ config = alembic_config.Config(os.path.abspath(path))
- :param abs_path: Absolute path to migrate repository
- """
- if not os.path.exists(abs_path):
- raise db_exception.DBMigrationError("Path %s not found" % abs_path)
- return migrate_repository.Repository(abs_path)
+ config.set_main_option('sqlalchemy.url', CONF.database.connection)
+ # we don't want to use the logger configuration from the file, which is
+ # only really intended for the CLI
+ # https://stackoverflow.com/a/42691781/613428
+ config.attributes['configure_logger'] = False
-def _migrate_db_version_control(engine, abs_path, version=None):
- """Mark a database as under this repository's version control.
+ # we want to scan all the versioned subdirectories
+ version_paths = [VERSIONS_PATH]
+ for release in RELEASES:
+ for branch in MIGRATION_BRANCHES:
+ version_path = os.path.join(VERSIONS_PATH, release, branch)
+ version_paths.append(version_path)
+ config.set_main_option('version_locations', ' '.join(version_paths))
- Once a database is under version control, schema changes should
- only be done via change scripts in this repository.
+ return config
- :param engine: SQLAlchemy engine instance for a given database
- :param abs_path: Absolute path to migrate repository
- :param version: Initial database version
- """
- repository = _find_migrate_repo(abs_path)
- try:
- migrate_api.version_control(engine, repository, version)
- except migrate_exceptions.InvalidVersionError as ex:
- raise db_exception.DBMigrationError("Invalid version : %s" % ex)
- except migrate_exceptions.DatabaseAlreadyControlledError:
- raise db_exception.DBMigrationError("Database is already controlled.")
+def _get_current_heads(engine, config):
+ script = alembic_script.ScriptDirectory.from_config(config)
- return version
+ with engine.connect() as conn:
+ context = alembic_migration.MigrationContext.configure(conn)
+ heads = context.get_current_heads()
+ heads_map = {}
-def _migrate_db_version(engine, abs_path, init_version):
- """Show the current version of the repository.
+ for head in heads:
+ if CONTRACT_BRANCH in script.get_revision(head).branch_labels:
+ heads_map[CONTRACT_BRANCH] = head
+ else:
+ heads_map[EXPAND_BRANCH] = head
- :param engine: SQLAlchemy engine instance for a given database
- :param abs_path: Absolute path to migrate repository
- :param init_version: Initial database version
- """
- repository = _find_migrate_repo(abs_path)
- try:
- return migrate_api.db_version(engine, repository)
- except migrate_exceptions.DatabaseNotControlledError:
- pass
+ return heads_map
- meta = sa.MetaData()
- meta.reflect(bind=engine)
- tables = meta.tables
- if (
- len(tables) == 0 or
- 'alembic_version' in tables or
- 'migrate_version' in tables
- ):
- _migrate_db_version_control(engine, abs_path, version=init_version)
- return migrate_api.db_version(engine, repository)
- msg = _(
- "The database is not under version control, but has tables. "
- "Please stamp the current version of the schema manually."
- )
- raise db_exception.DBMigrationError(msg)
+def get_current_heads():
+ """Get the current head of each the expand and contract branches."""
+ config = _find_alembic_conf()
+ with sql.session_for_read() as session:
+ engine = session.get_bind()
-def _migrate_db_sync(engine, abs_path, version=None, init_version=0):
- """Upgrade or downgrade a database.
+ # discard the URL encoded in alembic.ini in favour of the URL
+ # configured for the engine by the database fixtures, casting from
+ # 'sqlalchemy.engine.url.URL' to str in the process. This returns a
+ # RFC-1738 quoted URL, which means that a password like "foo@" will be
+ # turned into "foo%40". This in turns causes a problem for
+ # set_main_option() because that uses ConfigParser.set, which (by
+ # design) uses *python* interpolation to write the string out ... where
+ # "%" is the special python interpolation character! Avoid this
+ # mismatch by quoting all %'s for the set below.
+ engine_url = str(engine.url).replace('%', '%%')
+ config.set_main_option('sqlalchemy.url', str(engine_url))
- Function runs the upgrade() or downgrade() functions in change scripts.
+ heads = _get_current_heads(engine, config)
- :param engine: SQLAlchemy engine instance for a given database
- :param abs_path: Absolute path to migrate repository.
- :param version: Database will upgrade/downgrade until this version.
- If None - database will update to the latest available version.
- :param init_version: Initial database version
- """
+ return heads
- if version is not None:
- try:
- version = int(version)
- except ValueError:
- msg = _("version should be an integer")
- raise db_exception.DBMigrationError(msg)
- current_version = _migrate_db_version(engine, abs_path, init_version)
- repository = _find_migrate_repo(abs_path)
+def _is_database_under_migrate_control(engine):
+ # if any of the repos is present, they're all present (in theory, at least)
+ repository = _find_migrate_repo('expand')
+ try:
+ migrate_api.db_version(engine, repository)
+ return True
+ except migrate_exceptions.DatabaseNotControlledError:
+ return False
- if version is None or version > current_version:
- try:
- return migrate_api.upgrade(engine, repository, version)
- except Exception as ex:
- raise db_exception.DBMigrationError(ex)
- else:
- return migrate_api.downgrade(engine, repository, version)
+def _is_database_under_alembic_control(engine):
+ with engine.connect() as conn:
+ context = alembic_migration.MigrationContext.configure(conn)
+ return bool(context.get_current_heads())
-def get_db_version(branch=EXPAND_BRANCH):
- abs_path = _get_migrate_repo_path(branch)
- with sql.session_for_read() as session:
- return _migrate_db_version(
- session.get_bind(),
- abs_path,
- INITIAL_VERSION,
- )
+def _init_alembic_on_legacy_database(engine, config):
+ """Init alembic in an existing environment with sqlalchemy-migrate."""
+ LOG.info(
+ 'The database is still under sqlalchemy-migrate control; '
+ 'applying any remaining sqlalchemy-migrate-based migrations '
+ 'and fake applying the initial alembic migration'
+ )
-def _db_sync(branch):
- abs_path = _get_migrate_repo_path(branch)
- with sql.session_for_write() as session:
- engine = session.get_bind()
- _migrate_db_sync(
- engine=engine,
- abs_path=abs_path,
- init_version=INITIAL_VERSION,
- )
+ # bring all repos up to date; note that we're relying on the fact that
+ # there aren't any "real" contract migrations left (since the great squash
+ # of migrations in yoga) so we're really only applying the expand side of
+ # '079_expand_update_local_id_limit' and the rest are for completeness'
+ # sake
+ for branch in (EXPAND_BRANCH, DATA_MIGRATION_BRANCH, CONTRACT_BRANCH):
+ repository = _find_migrate_repo(branch or 'expand')
+ migrate_api.upgrade(engine, repository)
+
+ # re-use the connection rather than creating a new one
+ with engine.begin() as connection:
+ config.attributes['connection'] = connection
+ alembic_api.stamp(config, ALEMBIC_INIT_VERSION)
+
+
+def _upgrade_alembic(engine, config, branch):
+ revision = 'heads'
+ if branch:
+ revision = f'{branch}@head'
+
+ # re-use the connection rather than creating a new one
+ with engine.begin() as connection:
+ config.attributes['connection'] = connection
+ alembic_api.upgrade(config, revision)
+
+
+def get_db_version(branch=EXPAND_BRANCH, *, engine=None):
+ config = _find_alembic_conf()
+
+ if engine is None:
+ with sql.session_for_read() as session:
+ engine = session.get_bind()
+
+ # discard the URL encoded in alembic.ini in favour of the URL
+ # configured for the engine by the database fixtures, casting from
+ # 'sqlalchemy.engine.url.URL' to str in the process. This returns a
+ # RFC-1738 quoted URL, which means that a password like "foo@" will be
+ # turned into "foo%40". This in turns causes a problem for
+ # set_main_option() because that uses ConfigParser.set, which (by
+ # design) uses *python* interpolation to write the string out ... where
+ # "%" is the special python interpolation character! Avoid this
+ # mismatch by quoting all %'s for the set below.
+ engine_url = str(engine.url).replace('%', '%%')
+ config.set_main_option('sqlalchemy.url', str(engine_url))
+
+ migrate_version = None
+ if _is_database_under_migrate_control(engine):
+ repository = _find_migrate_repo(branch)
+ migrate_version = migrate_api.db_version(engine, repository)
+
+ alembic_version = None
+ if _is_database_under_alembic_control(engine):
+ # we use '.get' since the particular branch might not have been created
+ alembic_version = _get_current_heads(engine, config).get(branch)
+
+ return alembic_version or migrate_version
+
+
+def _db_sync(branch=None, *, engine=None):
+ config = _find_alembic_conf()
+
+ if engine is None:
+ with sql.session_for_write() as session:
+ engine = session.get_bind()
+
+ # discard the URL encoded in alembic.ini in favour of the URL
+ # configured for the engine by the database fixtures, casting from
+ # 'sqlalchemy.engine.url.URL' to str in the process. This returns a
+ # RFC-1738 quoted URL, which means that a password like "foo@" will be
+ # turned into "foo%40". This in turns causes a problem for
+ # set_main_option() because that uses ConfigParser.set, which (by
+ # design) uses *python* interpolation to write the string out ... where
+ # "%" is the special python interpolation character! Avoid this
+ # mismatch by quoting all %'s for the set below.
+ engine_url = str(engine.url).replace('%', '%%')
+ config.set_main_option('sqlalchemy.url', str(engine_url))
+
+ # if we're in a deployment where sqlalchemy-migrate is already present,
+ # then apply all the updates for that and fake apply the initial
+ # alembic migration; if we're not then 'upgrade' will take care of
+ # everything this should be a one-time operation
+ if (
+ not _is_database_under_alembic_control(engine) and
+ _is_database_under_migrate_control(engine)
+ ):
+ _init_alembic_on_legacy_database(engine, config)
+
+ _upgrade_alembic(engine, config, branch)
-def _validate_upgrade_order(branch, target_repo_version=None):
- """Validate the state of the migration repositories.
+def _validate_upgrade_order(branch, *, engine=None):
+ """Validate the upgrade order of the migration branches.
This is run before allowing the db_sync command to execute. Ensure the
- upgrade step and version specified by the operator remains consistent with
- the upgrade process. I.e. expand's version is greater or equal to
- migrate's, migrate's version is greater or equal to contract's.
-
- :param branch: The name of the repository that the user is trying to
- upgrade.
- :param target_repo_version: The version to upgrade the repo. Otherwise, the
- version will be upgraded to the latest version
- available.
- """
- # Initialize a dict to have each key assigned a repo with their value being
- # the repo that comes before.
- db_sync_order = {
- DATA_MIGRATION_BRANCH: EXPAND_BRANCH,
- CONTRACT_BRANCH: DATA_MIGRATION_BRANCH,
- }
+ expand steps have been run before the contract steps.
+ :param branch: The name of the branch that the user is trying to
+ upgrade.
+ """
if branch == EXPAND_BRANCH:
return
- # find the latest version that the current command will upgrade to if there
- # wasn't a version specified for upgrade.
- if not target_repo_version:
- abs_path = _get_migrate_repo_path(branch)
- repo = _find_migrate_repo(abs_path)
- target_repo_version = int(repo.latest)
+ if branch == DATA_MIGRATION_BRANCH:
+ # this is a no-op in alembic land
+ return
+
+ config = _find_alembic_conf()
+
+ if engine is None:
+ with sql.session_for_read() as session:
+ engine = session.get_bind()
+
+ script = alembic_script.ScriptDirectory.from_config(config)
+ expand_head = None
+ for head in script.get_heads():
+ if EXPAND_BRANCH in script.get_revision(head).branch_labels:
+ expand_head = head
+ break
- # get current version of the command that runs before the current command.
- dependency_repo_version = get_db_version(branch=db_sync_order[branch])
+ with engine.connect() as conn:
+ context = alembic_migration.MigrationContext.configure(conn)
+ current_heads = context.get_current_heads()
- if dependency_repo_version < target_repo_version:
+ if expand_head not in current_heads:
raise db_exception.DBMigrationError(
- 'You are attempting to upgrade %s ahead of %s. Please refer to '
+ 'You are attempting to upgrade contract ahead of expand. '
+ 'Please refer to '
'https://docs.openstack.org/keystone/latest/admin/'
'identity-upgrading.html '
- 'to see the proper steps for rolling upgrades.' % (
- branch, db_sync_order[branch]))
+ 'to see the proper steps for rolling upgrades.'
+ )
-def expand_schema():
+def expand_schema(engine=None):
"""Expand the database schema ahead of data migration.
This is run manually by the keystone-manage command before the first
keystone node is migrated to the latest release.
"""
- _validate_upgrade_order(EXPAND_BRANCH)
- _db_sync(branch=EXPAND_BRANCH)
+ _validate_upgrade_order(EXPAND_BRANCH, engine=engine)
+ _db_sync(EXPAND_BRANCH, engine=engine)
-def migrate_data():
+def migrate_data(engine=None):
"""Migrate data to match the new schema.
This is run manually by the keystone-manage command once the keystone
schema has been expanded for the new release.
"""
- _validate_upgrade_order(DATA_MIGRATION_BRANCH)
- _db_sync(branch=DATA_MIGRATION_BRANCH)
+ print(
+ 'Data migrations are no longer supported with alembic. '
+ 'This is now a no-op.'
+ )
-def contract_schema():
+def contract_schema(engine=None):
"""Contract the database.
This is run manually by the keystone-manage command once the keystone
nodes have been upgraded to the latest release and will remove any old
tables/columns that are no longer required.
"""
- _validate_upgrade_order(CONTRACT_BRANCH)
- _db_sync(branch=CONTRACT_BRANCH)
+ _validate_upgrade_order(CONTRACT_BRANCH, engine=engine)
+ _db_sync(CONTRACT_BRANCH, engine=engine)
-def offline_sync_database_to_version(version=None):
+def offline_sync_database_to_version(version=None, *, engine=None):
"""Perform and off-line sync of the database.
Migrate the database up to the latest version, doing the equivalent of
@@ -252,6 +345,4 @@ def offline_sync_database_to_version(version=None):
if version:
raise Exception('Specifying a version is no longer supported')
- expand_schema()
- migrate_data()
- contract_schema()
+ _db_sync(engine=engine)
diff --git a/keystone/common/utils.py b/keystone/common/utils.py
index 70d277e52..3f8088f27 100644
--- a/keystone/common/utils.py
+++ b/keystone/common/utils.py
@@ -15,7 +15,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import base64
import collections.abc
import contextlib
import grp
@@ -25,6 +25,7 @@ import os
import pwd
import uuid
+from cryptography import x509
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import reflection
@@ -60,6 +61,14 @@ hash_user_password = password_hashing.hash_user_password
check_password = password_hashing.check_password
+# NOTE(hiromu): This dict defines alternative DN string for X.509. When
+# retriving DN from X.509, converting attributes types that are not listed
+# in the RFC4514 to a corresponding alternative DN string.
+ATTR_NAME_OVERRIDES = {
+ x509.NameOID.EMAIL_ADDRESS: "emailAddress",
+}
+
+
def resource_uuid(value):
"""Convert input to valid UUID hex digits."""
try:
@@ -458,6 +467,63 @@ def check_endpoint_url(url):
raise exception.URLValidationError(url=url)
+def get_certificate_subject_dn(cert_pem):
+ """Get subject DN from the PEM certificate content.
+
+ :param str cert_pem: the PEM certificate content
+ :rtype: JSON data for subject DN
+ :raises keystone.exception.ValidationError: if the PEM certificate content
+ is invalid
+ """
+ dn_dict = {}
+ try:
+ cert = x509.load_pem_x509_certificate(cert_pem.encode('utf-8'))
+ for item in cert.subject:
+ name, value = item.rfc4514_string().split('=')
+ if item.oid in ATTR_NAME_OVERRIDES:
+ name = ATTR_NAME_OVERRIDES[item.oid]
+ dn_dict[name] = value
+ except Exception as error:
+ LOG.exception(error)
+ message = _('The certificate content is not PEM format.')
+ raise exception.ValidationError(message=message)
+ return dn_dict
+
+
+def get_certificate_issuer_dn(cert_pem):
+ """Get issuer DN from the PEM certificate content.
+
+ :param str cert_pem: the PEM certificate content
+ :rtype: JSON data for issuer DN
+ :raises keystone.exception.ValidationError: if the PEM certificate content
+ is invalid
+ """
+ dn_dict = {}
+ try:
+ cert = x509.load_pem_x509_certificate(cert_pem.encode('utf-8'))
+ for item in cert.issuer:
+ name, value = item.rfc4514_string().split('=')
+ if item.oid in ATTR_NAME_OVERRIDES:
+ name = ATTR_NAME_OVERRIDES[item.oid]
+ dn_dict[name] = value
+ except Exception as error:
+ LOG.exception(error)
+ message = _('The certificate content is not PEM format.')
+ raise exception.ValidationError(message=message)
+ return dn_dict
+
+
+def get_certificate_thumbprint(cert_pem):
+ """Get certificate thumbprint from the PEM certificate content.
+
+ :param str cert_pem: the PEM certificate content
+ :rtype: certificate thumbprint
+ """
+ thumb_sha256 = hashlib.sha256(cert_pem.encode('ascii')).digest()
+ thumbprint = base64.urlsafe_b64encode(thumb_sha256).decode('ascii')
+ return thumbprint
+
+
def create_directory(directory, keystone_user_id=None, keystone_group_id=None):
"""Attempt to create a directory if it doesn't exist.
diff --git a/keystone/conf/__init__.py b/keystone/conf/__init__.py
index 5de0ec183..de4e745d6 100644
--- a/keystone/conf/__init__.py
+++ b/keystone/conf/__init__.py
@@ -40,6 +40,7 @@ from keystone.conf import jwt_tokens
from keystone.conf import ldap
from keystone.conf import memcache
from keystone.conf import oauth1
+from keystone.conf import oauth2
from keystone.conf import policy
from keystone.conf import receipt
from keystone.conf import resource
@@ -78,6 +79,7 @@ conf_modules = [
ldap,
memcache,
oauth1,
+ oauth2,
policy,
receipt,
resource,
diff --git a/keystone/conf/default.py b/keystone/conf/default.py
index 0fa17d7a0..529a46986 100644
--- a/keystone/conf/default.py
+++ b/keystone/conf/default.py
@@ -120,8 +120,7 @@ auditing use cases.
notification_opt_out = cfg.MultiStrOpt(
'notification_opt_out',
default=["identity.authenticate.success",
- "identity.authenticate.pending",
- "identity.authenticate.failed"],
+ "identity.authenticate.pending"],
help=utils.fmt("""
You can reduce the number of notifications keystone emits by explicitly
opting out. Keystone will not emit notifications that match the patterns
diff --git a/keystone/conf/identity.py b/keystone/conf/identity.py
index 0dffe58d6..5cce78cf9 100644
--- a/keystone/conf/identity.py
+++ b/keystone/conf/identity.py
@@ -99,7 +99,11 @@ max_password_length = cfg.IntOpt(
max=passlib.utils.MAX_PASSWORD_SIZE,
help=utils.fmt("""
Maximum allowed length for user passwords. Decrease this value to improve
-performance. Changing this value does not effect existing passwords.
+performance. Changing this value does not effect existing passwords. This value
+can also be overridden by certain hashing algorithms maximum allowed length
+which takes precedence over the configured value.
+
+The bcrypt max_password_length is 54.
"""))
list_limit = cfg.IntOpt(
diff --git a/keystone/conf/oauth2.py b/keystone/conf/oauth2.py
new file mode 100644
index 000000000..dbe26cf59
--- /dev/null
+++ b/keystone/conf/oauth2.py
@@ -0,0 +1,52 @@
+# Copyright 2022 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+from keystone.conf import utils
+
+oauth2_authn_methods = cfg.ListOpt(
+ 'oauth2_authn_methods',
+ default=['tls_client_auth', 'client_secret_basic'],
+ help=utils.fmt("""
+The OAuth2.0 authentication method supported by the system when user obtains
+an access token through the OAuth2.0 token endpoint. This option can be set to
+certificate or secret. If the option is not set, the default value is
+certificate. When the option is set to secret, the OAuth2.0 token endpoint
+uses client_secret_basic method for authentication, otherwise tls_client_auth
+method is used for authentication.
+"""))
+
+oauth2_cert_dn_mapping_id = cfg.StrOpt(
+ 'oauth2_cert_dn_mapping_id',
+ default='oauth2_mapping',
+ help=utils.fmt("""
+Used to define the mapping rule id. When not set, the mapping rule id is
+oauth2_mapping.
+"""))
+
+
+GROUP_NAME = __name__.split('.')[-1]
+ALL_OPTS = [
+ oauth2_authn_methods,
+ oauth2_cert_dn_mapping_id
+]
+
+
+def register_opts(conf):
+ conf.register_opts(ALL_OPTS, group=GROUP_NAME)
+
+
+def list_opts():
+ return {GROUP_NAME: ALL_OPTS}
diff --git a/keystone/exception.py b/keystone/exception.py
index c62338b89..43e55beb9 100644
--- a/keystone/exception.py
+++ b/keystone/exception.py
@@ -722,3 +722,36 @@ class ResourceDeleteForbidden(ForbiddenNotSecurity):
message_format = _('Unable to delete immutable %(type)s resource: '
'`%(resource_id)s. Set resource option "immutable" '
'to false first.')
+
+
+class OAuth2Error(Error):
+
+ def __init__(self, code, title, error_title, message):
+ self.code = code
+ self.title = title
+ self.error_title = error_title
+ self.message_format = message
+
+
+class OAuth2InvalidClient(OAuth2Error):
+ def __init__(self, code, title, message):
+ error_title = 'invalid_client'
+ super().__init__(code, title, error_title, message)
+
+
+class OAuth2InvalidRequest(OAuth2Error):
+ def __init__(self, code, title, message):
+ error_title = 'invalid_request'
+ super().__init__(code, title, error_title, message)
+
+
+class OAuth2UnsupportedGrantType(OAuth2Error):
+ def __init__(self, code, title, message):
+ error_title = 'unsupported_grant_type'
+ super().__init__(code, title, error_title, message)
+
+
+class OAuth2OtherError(OAuth2Error):
+ def __init__(self, code, title, message):
+ error_title = 'other_error'
+ super().__init__(code, title, error_title, message)
diff --git a/keystone/federation/utils.py b/keystone/federation/utils.py
index 5f53dfbb5..71e6318a4 100644
--- a/keystone/federation/utils.py
+++ b/keystone/federation/utils.py
@@ -251,7 +251,7 @@ class DirectMaps(object):
self._matches = []
def __str__(self):
- """return the direct map array as a string."""
+ """Return the direct map array as a string."""
return '%s' % self._matches
def add(self, values):
@@ -562,17 +562,31 @@ class RuleProcessor(object):
LOG.debug('mapped_properties: %s', mapped_properties)
return mapped_properties
+ def _ast_literal_eval(self, value):
+ # This is a workaround for the fact that ast.literal_eval handles the
+ # case of either a string or a list of strings, but not a potential
+ # list of ints.
+
+ try:
+ values = ast.literal_eval(value)
+ # NOTE(mnaser): It's possible that the group_names_list is a
+ # numerical value which would successfully parse
+ # and not raise an exception, so we forcefully
+ # raise is here.
+ if not isinstance(values, list):
+ raise ValueError
+ except (ValueError, SyntaxError):
+ values = [value]
+
+ return values
+
def _normalize_groups(self, identity_value):
# In this case, identity_value['groups'] is a string
# representation of a list, and we want a real list. This is
# due to the way we do direct mapping substitutions today (see
# function _update_local_mapping() )
if 'name' in identity_value['groups']:
- try:
- group_names_list = ast.literal_eval(
- identity_value['groups'])
- except (ValueError, SyntaxError):
- group_names_list = [identity_value['groups']]
+ group_names_list = self._ast_literal_eval(identity_value['groups'])
def convert_json(group):
if group.startswith('JSON:'):
@@ -594,11 +608,8 @@ class RuleProcessor(object):
"specified.")
msg = msg % {'identity_value': identity_value}
raise exception.ValidationError(msg)
- try:
- group_names_list = ast.literal_eval(
- identity_value['groups'])
- except (ValueError, SyntaxError):
- group_names_list = [identity_value['groups']]
+ group_names_list = self._ast_literal_eval(
+ identity_value['groups'])
domain = identity_value['domain']
group_dicts = [{'name': name, 'domain': domain} for name in
group_names_list]
@@ -699,11 +710,8 @@ class RuleProcessor(object):
# group_ids parameter contains only one element, it will be
# parsed as a simple string, and not a list or the
# representation of a list.
- try:
- group_ids.update(
- ast.literal_eval(identity_value['group_ids']))
- except (ValueError, SyntaxError):
- group_ids.update([identity_value['group_ids']])
+ group_ids.update(
+ self._ast_literal_eval(identity_value['group_ids']))
if 'projects' in identity_value:
projects = identity_value['projects']
diff --git a/keystone/identity/backends/ldap/common.py b/keystone/identity/backends/ldap/common.py
index e1399ec6a..7a366ef01 100644
--- a/keystone/identity/backends/ldap/common.py
+++ b/keystone/identity/backends/ldap/common.py
@@ -865,11 +865,22 @@ class PooledLDAPHandler(LDAPHandler):
cleaned up when message.clean() is called.
"""
- results = message.connection.result3(message.id, all, timeout)
-
- # Now that we have the results from the LDAP server for the message, we
- # don't need the the context manager used to create the connection.
- message.clean()
+ # message.connection.result3 might throw an exception
+ # so the code must ensure that message.clean() is invoked
+ # regardless of the result3's result. Otherwise, the
+ # connection will be marked as active forever, which
+ # ultimately renders the pool unusable, causing a DoS.
+ try:
+ results = message.connection.result3(message.id, all, timeout)
+ except Exception:
+ # We don't want to ignore thrown
+ # exceptions, raise them
+ raise
+ finally:
+ # Now that we have the results from the LDAP server for
+ # the message, we don't need the the context manager used
+ # to create the connection.
+ message.clean()
return results
diff --git a/keystone/locale/de/LC_MESSAGES/keystone.po b/keystone/locale/de/LC_MESSAGES/keystone.po
index 8c3b16303..a126f83dc 100644
--- a/keystone/locale/de/LC_MESSAGES/keystone.po
+++ b/keystone/locale/de/LC_MESSAGES/keystone.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 10:31+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -806,9 +806,6 @@ msgstr ""
"Region %(region_id)s kann nicht gelöscht werden, da sie oder ihr "
"untergeordnete Regionen über zugeordnete Endpunkte verfügen. "
-msgid "Unable to downgrade schema"
-msgstr "Das Schema konnte nicht herabgestuft werden."
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Domänenkonfigurationsverzeichnis wurde nicht gefunden: %s"
diff --git a/keystone/locale/en_GB/LC_MESSAGES/keystone.po b/keystone/locale/en_GB/LC_MESSAGES/keystone.po
index 191ed5596..160da437e 100644
--- a/keystone/locale/en_GB/LC_MESSAGES/keystone.po
+++ b/keystone/locale/en_GB/LC_MESSAGES/keystone.po
@@ -8,15 +8,16 @@
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2019. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2021-01-08 19:57+0000\n"
+"POT-Creation-Date: 2022-09-07 16:14+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-10-28 02:12+0000\n"
+"PO-Revision-Date: 2022-09-05 10:31+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language: en_GB\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
@@ -134,15 +135,6 @@ msgstr "Access token not found"
msgid "Additional authentications steps required."
msgstr "Additional authentications steps required."
-msgid ""
-"All extensions have been moved into keystone core and as such its migrations "
-"are maintained by the main keystone database control. Use the command: "
-"keystone-manage db_sync"
-msgstr ""
-"All extensions have been moved into Keystone core and as such its migrations "
-"are maintained by the main Keystone database control. Use the command: "
-"keystone-manage db_sync"
-
msgid "An unexpected error occurred when retrieving domain configs"
msgstr "An unexpected error occurred when retrieving domain configs"
@@ -982,6 +974,16 @@ msgstr ""
"Number of User/Group entities returned by LDAP exceeded size limit. Contact "
"your LDAP administrator."
+msgid "OAuth2.0 client authorization is invalid."
+msgstr "OAuth2.0 client authorisation is invalid."
+
+msgid "OAuth2.0 client authorization is required."
+msgstr "OAuth2.0 client authorisation is required."
+
+#, python-format
+msgid "OAuth2.0 client authorization type %s is not supported."
+msgstr "OAuth2.0 client authorisation type %s is not supported."
+
msgid "One of the trust agents is disabled or deleted"
msgstr "One of the trust agents is disabled or deleted"
@@ -1213,6 +1215,9 @@ msgstr ""
"The given operator %(_op)s is not valid. It must be one of the following: "
"'eq', 'neq', 'lt', 'lte', 'gt', or 'gte'."
+msgid "The method is not allowed for the requested URL."
+msgstr "The method is not allowed for the requested URL."
+
#, python-format
msgid ""
"The new password cannot be identical to a previous password. The total "
@@ -1222,6 +1227,13 @@ msgstr ""
"number which includes the new password must be unique is %(unique_count)s."
#, python-format
+msgid "The parameter grant_type %s is not supported."
+msgstr "The parameter grant_type %s is not supported."
+
+msgid "The parameter grant_type is required."
+msgstr "The parameter grant_type is required."
+
+#, python-format
msgid "The password does not match the requirements: %(detail)s."
msgstr "The password does not match the requirements: %(detail)s."
@@ -1406,9 +1418,6 @@ msgstr ""
"Unable to delete region %(region_id)s because it or its child regions have "
"associated endpoints."
-msgid "Unable to downgrade schema"
-msgstr "Unable to downgrade schema"
-
#, python-format
msgid "Unable to establish a connection to LDAP Server (%(url)s)."
msgstr "Unable to establish a connection to LDAP Server (%(url)s)."
diff --git a/keystone/locale/es/LC_MESSAGES/keystone.po b/keystone/locale/es/LC_MESSAGES/keystone.po
index 6bce54265..d585f728a 100644
--- a/keystone/locale/es/LC_MESSAGES/keystone.po
+++ b/keystone/locale/es/LC_MESSAGES/keystone.po
@@ -15,7 +15,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -748,9 +748,6 @@ msgstr ""
"No se puede suprimir la región %(region_id)s porque sus regiones secundarias "
"tienen puntos finales asociados."
-msgid "Unable to downgrade schema"
-msgstr "No se ha podido degradar el esquema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "No se ha podido localizar el directorio config de dominio: %s"
diff --git a/keystone/locale/fr/LC_MESSAGES/keystone.po b/keystone/locale/fr/LC_MESSAGES/keystone.po
index 66540fd97..6d69341b5 100644
--- a/keystone/locale/fr/LC_MESSAGES/keystone.po
+++ b/keystone/locale/fr/LC_MESSAGES/keystone.po
@@ -14,7 +14,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -741,9 +741,6 @@ msgstr ""
"Impossible de supprimer la région %(region_id)s car la région ou ses régions "
"enfant ont des noeuds finals associés."
-msgid "Unable to downgrade schema"
-msgstr "Impossible de rétrograder le schéma"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossible de localiser le répertoire de configuration domaine: %s"
diff --git a/keystone/locale/it/LC_MESSAGES/keystone.po b/keystone/locale/it/LC_MESSAGES/keystone.po
index 2bc580c20..c9384b0b8 100644
--- a/keystone/locale/it/LC_MESSAGES/keystone.po
+++ b/keystone/locale/it/LC_MESSAGES/keystone.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -740,9 +740,6 @@ msgstr ""
"Impossibile eliminare la regione %(region_id)s perché la regione o le "
"relative regioni child hanno degli endpoint associati."
-msgid "Unable to downgrade schema"
-msgstr "Impossibile eseguire il downgrade dello schema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossibile individuare la directory config del dominio: %s"
diff --git a/keystone/locale/ja/LC_MESSAGES/keystone.po b/keystone/locale/ja/LC_MESSAGES/keystone.po
index 433c673b8..e62f4f492 100644
--- a/keystone/locale/ja/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ja/LC_MESSAGES/keystone.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -729,9 +729,6 @@ msgstr ""
"リージョン %(region_id)s またはその子リージョンがエンドポイントに関連付けられ"
"ているため、このリージョンを削除できません。"
-msgid "Unable to downgrade schema"
-msgstr "スキーマをダウングレードすることができません"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "ドメイン設定ディレクトリーが見つかりません: %s"
diff --git a/keystone/locale/ko_KR/LC_MESSAGES/keystone.po b/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
index 102b67fa6..8c278558c 100644
--- a/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -712,9 +712,6 @@ msgstr ""
"리젼 %(region_id)s 또는 하위 리젼에 연관된 엔드포인트가 있어 삭제할 수 없습니"
"다."
-msgid "Unable to downgrade schema"
-msgstr "스키마를 다운그레이드할 수 없음"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "%s: 도메인 설정 디렉토리를 찾을 수 없습니다."
diff --git a/keystone/locale/pt_BR/LC_MESSAGES/keystone.po b/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
index 853478f93..7516816b7 100644
--- a/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
+++ b/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -728,9 +728,6 @@ msgstr ""
"Não foi possível excluir a região %(region_id)s, uma vez que ela ou suas "
"regiões filhas possuem terminais associados."
-msgid "Unable to downgrade schema"
-msgstr "Não é possível fazer downgrade do esquema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Não é possível localizar diretório de configuração de domínio: %s"
diff --git a/keystone/locale/ru/LC_MESSAGES/keystone.po b/keystone/locale/ru/LC_MESSAGES/keystone.po
index 542b138f2..56e50d9c0 100644
--- a/keystone/locale/ru/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ru/LC_MESSAGES/keystone.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -723,9 +723,6 @@ msgstr ""
"Не удалось удалить регион %(region_id)s: регион или его дочерние регионы "
"имеют связанные конечные точки."
-msgid "Unable to downgrade schema"
-msgstr "Не удается понизить версию схемы"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Не удалось найти каталог конфигурации домена: %s"
diff --git a/keystone/locale/tr_TR/LC_MESSAGES/keystone.po b/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
deleted file mode 100644
index 9100fed51..000000000
--- a/keystone/locale/tr_TR/LC_MESSAGES/keystone.po
+++ /dev/null
@@ -1,676 +0,0 @@
-# Translations template for keystone.
-# Copyright (C) 2015 OpenStack Foundation
-# This file is distributed under the same license as the keystone project.
-#
-# Translators:
-# Alper Çiftçi <alprciftci@gmail.com>, 2015
-# Andreas Jaeger <jaegerandi@gmail.com>, 2015
-# catborise <muhammetalisag@gmail.com>, 2013
-# catborise <muhammetalisag@gmail.com>, 2013
-# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: keystone VERSION\n"
-"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2016-04-07 06:27+0000\n"
-"Last-Translator: Copied by Zanata <copied-by-zanata@zanata.org>\n"
-"Language: tr_TR\n"
-"Plural-Forms: nplurals=1; plural=0;\n"
-"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 4.3.3\n"
-"Language-Team: Turkish (Turkey)\n"
-
-#, python-format
-msgid "%(detail)s"
-msgstr "%(detail)s"
-
-#, python-format
-msgid ""
-"%(event)s is not a valid notification event, must be one of: %(actions)s"
-msgstr ""
-"%(event)s geçerli bir bilgilendirme olayı değil, şunlardan biri olmalı: "
-"%(actions)s"
-
-#, python-format
-msgid "%(host)s is not a trusted dashboard host"
-msgstr "%(host)s güvenilir bir gösterge paneli istemcisi değil"
-
-#, python-format
-msgid "%(message)s %(amendment)s"
-msgstr "%(message)s %(amendment)s"
-
-#, python-format
-msgid ""
-"%(mod_name)s doesn't provide database migrations. The migration repository "
-"path at %(path)s doesn't exist or isn't a directory."
-msgstr ""
-"%(mod_name)s veri tabanı göçü sağlamıyor. %(path)s yolundaki göç deposu yolu "
-"mevcut değil ya da bir dizin değil."
-
-#, python-format
-msgid "%s field is required and cannot be empty"
-msgstr "%s alanı gerekli ve boş olamaz"
-
-msgid "--all option cannot be mixed with other options"
-msgstr "--all seçeneği diğer seçeneklerle birleştirilemez"
-
-msgid "A project-scoped token is required to produce a service catalog."
-msgstr "Servis kataloğu oluşturmak için proje-kapsamlı bir jeton gerekli."
-
-msgid "Access token is expired"
-msgstr "Erişim jetonunun süresi dolmuş"
-
-msgid "Access token not found"
-msgstr "Erişim jetonu bulunamadı"
-
-msgid "Additional authentications steps required."
-msgstr "Ek kimlik doğrulama adımları gerekli."
-
-msgid "An unexpected error occurred when retrieving domain configs"
-msgstr "Alan yapılandırmaları alınırken beklenmedik hata oluştu"
-
-#, python-format
-msgid "An unexpected error occurred when trying to store %s"
-msgstr "%s depolanırken beklenmedik bir hata oluştu"
-
-msgid "An unexpected error prevented the server from fulfilling your request."
-msgstr "Beklenmedik bir hata sunucunun isteğinizi tamamlamasını engelledi."
-
-msgid "At least one option must be provided"
-msgstr "En az bir seçenek sağlanmalıdır"
-
-msgid "At least one option must be provided, use either --all or --domain-name"
-msgstr "En az bir seçenek sağlanmalıdır, ya --all ya da --domain-name kullanın"
-
-msgid "Attempted to authenticate with an unsupported method."
-msgstr "Desteklenmeyen yöntem ile doğrulama girişiminde bulunuldu."
-
-msgid "Authentication plugin error."
-msgstr "Kimlik doğrulama eklenti hatası."
-
-msgid "Cannot authorize a request token with a token issued via delegation."
-msgstr "Vekil ile sağlanan bir jeton ile istek yetkilendirilemez."
-
-#, python-format
-msgid "Cannot change %(option_name)s %(attr)s"
-msgstr "%(option_name)s %(attr)s değiştirilemiyor"
-
-msgid "Cannot change Domain ID"
-msgstr "Alan ID'si değiştirilemez"
-
-msgid "Cannot change user ID"
-msgstr "Kullanıcı ID'si değiştirilemiyor"
-
-msgid "Cannot change user name"
-msgstr "Kullanıcı adı değiştirilemiyor"
-
-msgid "Cannot list request tokens with a token issued via delegation."
-msgstr "Vekalet ile sağlanan bir jeton ile istek jetonları listelenemez."
-
-#, python-format
-msgid "Cannot remove role that has not been granted, %s"
-msgstr "Verilmemiş rol silinemez, %s"
-
-msgid ""
-"Cannot truncate a driver call without hints list as first parameter after "
-"self "
-msgstr ""
-"self'den sonra ilk parametre olarak ipucu listesi verilmeden bir sürücü "
-"çağrısı kırpılamıyor "
-
-msgid ""
-"Cannot use parents_as_list and parents_as_ids query params at the same time."
-msgstr ""
-"parents_as_list ve parents_as_ids sorgu parametreleri aynı anda kullanılamaz."
-
-msgid ""
-"Cannot use subtree_as_list and subtree_as_ids query params at the same time."
-msgstr ""
-"subtree_as_list ve subtree_as_ids sorgu parametreleri aynı anda kullanılamaz."
-
-msgid ""
-"Combining effective and group filter will always result in an empty list."
-msgstr ""
-"Efektif ve grup filtresini birleştirmek her zaman boş bir listeye yol açar."
-
-msgid ""
-"Combining effective, domain and inherited filters will always result in an "
-"empty list."
-msgstr ""
-"Efektif, alan ve miras filtrelerin birleştirilmesi her zaman boş bir listeye "
-"yol açar."
-
-#, python-format
-msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\""
-msgstr "Çatışan bölge kimlikleri belirtildi: \"%(url_id)s\" != \"%(ref_id)s\""
-
-msgid "Consumer not found"
-msgstr "Tüketici bulunamadı"
-
-msgid "Could not find Identity Provider identifier in environment"
-msgstr "Kimlik Sağlayıcı tanımlayıcısı ortamda bulunamıyor"
-
-msgid "Could not validate the access token"
-msgstr "Erişim jetonu doğrulanamadı"
-
-msgid ""
-"Disabling an entity where the 'enable' attribute is ignored by configuration."
-msgstr ""
-"'enable' özniteliği yapılandırma tarafından göz ardı edilen bir öğe "
-"kapatılıyor."
-
-#, python-format
-msgid "Domain cannot be named %s"
-msgstr "Alan %s olarak adlandırılamaz"
-
-#, python-format
-msgid "Domain cannot have ID %s"
-msgstr "Alan %s ID'sine sahip olamaz"
-
-#, python-format
-msgid "Domain is disabled: %s"
-msgstr "Alan kapalı: %s"
-
-#, python-format
-msgid ""
-"Domain: %(domain)s already has a configuration defined - ignoring file: "
-"%(file)s."
-msgstr ""
-"Alan: %(domain)s zaten tanımlanmış bir yapılandırmaya sahip - dosya "
-"atlanıyor: %(file)s."
-
-#, python-format
-msgid "Duplicate ID, %s."
-msgstr "Kopya ID, %s"
-
-#, python-format
-msgid "Duplicate name, %s."
-msgstr "Kopya isim, %s."
-
-#, python-format
-msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s"
-msgstr "Bitiş noktası %(endpoint_id)s %(project_id)s projesinde bulunamadı"
-
-msgid "Endpoint Group Project Association not found"
-msgstr "Bitiş Noktası Grup Proje İlişkisi bulunamadı"
-
-msgid "Ensure configuration option idp_entity_id is set."
-msgstr "idp_entity_id yapılandırma seçeneğinin ayarlandığına emin olun."
-
-msgid "Ensure configuration option idp_sso_endpoint is set."
-msgstr "idp_sso_endpoint yapılandırma seçeneğinin ayarlandığına emin olun."
-
-#, python-format
-msgid ""
-"Error parsing configuration file for domain: %(domain)s, file: %(file)s."
-msgstr ""
-"Alan: %(domain)s için yapılandırma dosyası ayrıştırılırken hata, dosya: "
-"%(file)s."
-
-#, python-format
-msgid "Error while opening file %(path)s: %(err)s"
-msgstr "Dosya açılırken hata %(path)s: %(err)s"
-
-#, python-format
-msgid "Error while parsing rules %(path)s: %(err)s"
-msgstr "Kurallar ayrıştırılırken hata %(path)s: %(err)s"
-
-#, python-format
-msgid "Expected dict or list: %s"
-msgstr "Sözlük ya da liste beklendi: %s"
-
-msgid "Failed to validate token"
-msgstr "Jeton doğrulama başarısız"
-
-msgid "Federation token is expired"
-msgstr "Federasyon jetonunun süresi dolmuş"
-
-#, python-format
-msgid ""
-"Field \"remaining_uses\" is set to %(value)s while it must not be set in "
-"order to redelegate a trust"
-msgstr ""
-"\"remaining_uses\" alanı %(value)s olarak ayarlanmış, bir güvene tekrar "
-"yetki vermek için böyle ayarlanmamalı"
-
-#, python-format
-msgid "Group %(group)s is not supported for domain specific configurations"
-msgstr "%(group)s grubu alana özel yapılandırmalar için desteklenmiyor"
-
-#, python-format
-msgid ""
-"Group %(group_id)s returned by mapping %(mapping_id)s was not found in the "
-"backend."
-msgstr ""
-"%(mapping_id)s eşleştirmesi tarafından döndürülen %(group_id)s grubu arka "
-"uçta bulunamadı."
-
-#, python-format
-msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s"
-msgstr "ID özniteliği %(id_attr)s %(dn)s LDAP nesnesinde bulunamadı"
-
-#, python-format
-msgid "Identity Provider %(idp)s is disabled"
-msgstr "Kimlik Sağlayıcı %(idp)s kapalı"
-
-msgid ""
-"Incoming identity provider identifier not included among the accepted "
-"identifiers."
-msgstr ""
-"Gelen kimlik sağlayıcı tanımlayıcısı kabul edilen tanımlayıcılar arasında "
-"yok."
-
-#, python-format
-msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s"
-msgstr ""
-"Geçersiz LDAP TLS sertifika seçeneği: %(option)s. Şunlardan birini seçin: "
-"%(options)s"
-
-#, python-format
-msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available"
-msgstr "Geçersiz LDAP TLS_AVAIL seçeneği: %s. TLS kullanılabilir değil"
-
-#, python-format
-msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s"
-msgstr ""
-"Geçersiz LDAP referans kaldırma seçeneği: %(option)s. Şunlardan birini "
-"seçin: %(options)s"
-
-#, python-format
-msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s"
-msgstr "Geçersiz LDAP kapsamı: %(scope)s. Şunlardan birini seçin: %(options)s"
-
-msgid "Invalid TLS / LDAPS combination"
-msgstr "Geçersiz TLS / LDAPS kombinasyonu"
-
-msgid "Invalid blob in credential"
-msgstr "Kimlik bilgisinde geçersiz düğüm"
-
-#, python-format
-msgid ""
-"Invalid domain name: %(domain)s found in config file name: %(file)s - "
-"ignoring this file."
-msgstr ""
-"Yapılandırma dosyası isminde: %(file)s geçersiz alan adı: %(domain)s bulundu "
-"- bu dosya atlanıyor."
-
-#, python-format
-msgid ""
-"Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must "
-"be specified."
-msgstr ""
-"Geçersiz kural: %(identity_value)s. Hem 'gruplar' hem 'alan' anahtar "
-"kelimeleri belirtilmeli."
-
-msgid "Invalid signature"
-msgstr "Geçersiz imza"
-
-msgid "Invalid user / password"
-msgstr "Geçersiz kullanıcı / parola"
-
-msgid "Invalid username or password"
-msgstr "Geçersiz kullanıcı adı ve parola"
-
-#, python-format
-msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details."
-msgstr ""
-"Kusurlu bitiş noktası URL'si (%(endpoint)s), detaylar için HATA kaydına "
-"bakın."
-
-#, python-format
-msgid "Member %(member)s is already a member of group %(group)s"
-msgstr "Üye %(member)s zaten %(group)s grubunun üyesi"
-
-#, python-format
-msgid "Method not callable: %s"
-msgstr "Metod çağrılabilir değil: %s"
-
-msgid "Missing entity ID from environment"
-msgstr "Öğe kimliği ortamdan eksik"
-
-msgid ""
-"Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting "
-"this parameter is advised."
-msgstr ""
-"Tekrar yetkilendirme üzerine \"redelegation_count\" değiştirmeye izin "
-"verilmez. Tavsiye edildiği gibi bu parametre atlanıyor."
-
-msgid "Multiple domains are not supported"
-msgstr "Birden çok alan desteklenmiyor"
-
-msgid "Must specify either domain or project"
-msgstr "Alan ya da projeden biri belirtilmelidir"
-
-msgid "No authenticated user"
-msgstr "Kimlik denetimi yapılmamış kullanıcı"
-
-msgid ""
-"No encryption keys found; run keystone-manage fernet_setup to bootstrap one."
-msgstr ""
-"Şifreleme anahtarları bulundu; birini yükletmek için keystone-manage "
-"fernet_setup çalıştırın."
-
-msgid "No options specified"
-msgstr "Hiçbir seçenek belirtilmedi"
-
-#, python-format
-msgid "No policy is associated with endpoint %(endpoint_id)s."
-msgstr "Hiçbir ilke %(endpoint_id)s bitiş noktasıyla ilişkilendirilmemiş."
-
-msgid "One of the trust agents is disabled or deleted"
-msgstr "Güven ajanlarından biri kapalı ya da silinmiş"
-
-#, python-format
-msgid ""
-"Option %(option)s found with no group specified while checking domain "
-"configuration request"
-msgstr ""
-"%(option)s seçeneği alan yapılandırma isteği kontrol edilirken hiçbir grup "
-"belirtilmemiş şekilde bulundu"
-
-#, python-format
-msgid ""
-"Option %(option)s in group %(group)s is not supported for domain specific "
-"configurations"
-msgstr ""
-"%(group)s grubundaki %(option)s seçeneği alana özel yapılandırmalarda "
-"desteklenmiyor"
-
-msgid "Project field is required and cannot be empty."
-msgstr "Proje alanı gerekli ve boş olamaz."
-
-#, python-format
-msgid "Project is disabled: %s"
-msgstr "Proje kapalı: %s"
-
-msgid "Redelegation allowed for delegated by trust only"
-msgstr ""
-"Tekrar yetki vermeye yalnızca güven tarafından yetki verilenler için izin "
-"verilir"
-
-#, python-format
-msgid ""
-"Remaining redelegation depth of %(redelegation_depth)d out of allowed range "
-"of [0..%(max_count)d]"
-msgstr ""
-"izin verilen [0..%(max_count)d] aralığı içinden %(redelegation_depth)d izin "
-"verilen tekrar yetki verme derinliği"
-
-msgid "Request must have an origin query parameter"
-msgstr "İstek bir başlangıç noktası sorgu parametresine sahip olmalı"
-
-msgid "Request token is expired"
-msgstr "İstek jetonunun süresi dolmuş"
-
-msgid "Request token not found"
-msgstr "İstek jetonu bulunamadı"
-
-msgid "Requested expiration time is more than redelegated trust can provide"
-msgstr ""
-"İstenen zaman bitim süresi tekrar yetkilendirilen güvenin "
-"sağlayabileceğinden fazla"
-
-#, python-format
-msgid ""
-"Requested redelegation depth of %(requested_count)d is greater than allowed "
-"%(max_count)d"
-msgstr ""
-"%(requested_count)d istenen tekrar yetki verme derinliği izin verilen "
-"%(max_count)d den fazla"
-
-msgid "Scoping to both domain and project is not allowed"
-msgstr "Hem alan hem projeye kapsamlamaya izin verilmez"
-
-msgid "Scoping to both domain and trust is not allowed"
-msgstr "Hem alan hem güvene kapsamlamaya izin verilmez"
-
-msgid "Scoping to both project and trust is not allowed"
-msgstr "Hem proje hem güvene kapsamlamaya izin verilmez"
-
-#, python-format
-msgid "Service Provider %(sp)s is disabled"
-msgstr "Servis Sağlayıcı %(sp)s kapalı"
-
-msgid "Some of requested roles are not in redelegated trust"
-msgstr "İstenen rollerin bazıları tekrar yetki verilen güven içinde değil"
-
-msgid "Specify a domain or project, not both"
-msgstr "Bir alan ya da proje belirtin, ya da her ikisini"
-
-msgid "Specify a user or group, not both"
-msgstr "Bir kullanıcı ya da grup belirtin, ikisini birden değil"
-
-msgid ""
-"The 'expires_at' must not be before now. The server could not comply with "
-"the request since it is either malformed or otherwise incorrect. The client "
-"is assumed to be in error."
-msgstr ""
-"'expires_at' şu andan önce olmamalı. Sunucu talebi yerine getiremedi çünkü "
-"istek ya kusurlu ya da geçersiz. İstemcinin hata durumunda olduğu "
-"varsayılıyor."
-
-msgid "The --all option cannot be used with the --domain-name option"
-msgstr "--all seçeneği --domain-name seçeneğiyle kullanılamaz"
-
-#, python-format
-msgid "The Keystone configuration file %(config_file)s could not be found."
-msgstr "Keystone yapılandırma dosyası %(config_file)s bulunamadı."
-
-#, python-format
-msgid ""
-"The Keystone domain-specific configuration has specified more than one SQL "
-"driver (only one is permitted): %(source)s."
-msgstr ""
-"Keystone alana özel yapılandırması birden fazla SQL sürücüsü belirtti "
-"(yalnızca birine izin verilir): %(source)s."
-
-msgid "The action you have requested has not been implemented."
-msgstr "İstediğiniz eylem uygulanmamış."
-
-#, python-format
-msgid ""
-"The password length must be less than or equal to %(size)i. The server could "
-"not comply with the request because the password is invalid."
-msgstr ""
-"Parola uzunluğu %(size)i ye eşit ya da daha küçük olmalı. Sunucu talebe "
-"cevap veremedi çünkü parola geçersiz."
-
-msgid "The request you have made requires authentication."
-msgstr "Yaptığınız istek kimlik doğrulama gerektiriyor."
-
-msgid ""
-"The revoke call must not have both domain_id and project_id. This is a bug "
-"in the Keystone server. The current request is aborted."
-msgstr ""
-"İptal etme çağrısı hem domain_id hem project_id'ye sahip olmamalı. Bu "
-"Keystone sunucudaki bir hata. Mevcut istek iptal edildi."
-
-msgid "The service you have requested is no longer available on this server."
-msgstr "İstediğiniz servis artık bu sunucu üzerinde kullanılabilir değil."
-
-#, python-format
-msgid ""
-"The specified parent region %(parent_region_id)s would create a circular "
-"region hierarchy."
-msgstr ""
-"Belirtilen üst bölge %(parent_region_id)s dairesel bölge sıralı dizisi "
-"oluştururdu."
-
-#, python-format
-msgid ""
-"The value of group %(group)s specified in the config should be a dictionary "
-"of options"
-msgstr ""
-"Yapılandırmada belirtilen %(group)s grubunun değeri seçenekler sözlüğü olmalı"
-
-#, python-format
-msgid "This is not a recognized Fernet payload version: %s"
-msgstr "Bu bilinen bir Fernet faydalı yük sürümü değil: %s"
-
-msgid ""
-"Timestamp not in expected format. The server could not comply with the "
-"request since it is either malformed or otherwise incorrect. The client is "
-"assumed to be in error."
-msgstr ""
-"Zaman damgası beklenen biçimde değil. Sunucu talebi yerine getiremedi çünkü "
-"istek ya kusurlu ya da geçersiz. İstemcinin hata durumunda olduğu "
-"varsayılıyor."
-
-msgid "Trustee has no delegated roles."
-msgstr "Yedieminin emanet edilen kuralları yok."
-
-msgid "Trustor is disabled."
-msgstr "Güven kurucu kapalı."
-
-#, python-format
-msgid ""
-"Trying to update group %(group)s, so that, and only that, group must be "
-"specified in the config"
-msgstr ""
-"%(group)s grubu güncellenmeye çalışılıyor, böylece yapılandırmada yalnızca "
-"grup belirtilmeli"
-
-#, python-format
-msgid ""
-"Trying to update option %(option)s in group %(group)s, but config provided "
-"contains option %(option_other)s instead"
-msgstr ""
-"%(group)s grubundaki %(option)s seçeneği güncellenmeye çalışılıyor, ama "
-"sağlanan yapılandırma %(option_other)s seçeneğini içeriyor"
-
-#, python-format
-msgid ""
-"Trying to update option %(option)s in group %(group)s, so that, and only "
-"that, option must be specified in the config"
-msgstr ""
-"%(group)s grubundaki %(option)s seçeneği güncellenmeye çalışıldı, böylece, "
-"yapılandırmada yalnızca bu seçenek belirtilmeli"
-
-msgid ""
-"Unable to access the keystone database, please check it is configured "
-"correctly."
-msgstr ""
-"Keystone veri tabanına erişilemiyor, lütfen doğru yapılandırıldığından emin "
-"olun."
-
-#, python-format
-msgid ""
-"Unable to delete region %(region_id)s because it or its child regions have "
-"associated endpoints."
-msgstr ""
-"Bölge %(region_id)s silinemedi çünkü kendisi ya da alt bölgelerinin "
-"ilişkilendirilmiş bitiş noktaları var."
-
-#, python-format
-msgid "Unable to locate domain config directory: %s"
-msgstr "Alan yapılandırma dizini bulunamıyor: %s"
-
-#, python-format
-msgid "Unable to lookup user %s"
-msgstr "%s kullanıcısı aranamadı"
-
-#, python-format
-msgid ""
-"Unable to reconcile identity attribute %(attribute)s as it has conflicting "
-"values %(new)s and %(old)s"
-msgstr ""
-"Kimlik özniteliği %(attribute)s bağdaştırılamıyor çünkü çatışan değerleri "
-"var %(new)s ve %(old)s"
-
-#, python-format
-msgid "Unexpected assignment type encountered, %s"
-msgstr "Beklenmedik atama türüyle karşılaşıldı, %s"
-
-#, python-format
-msgid "Unexpected status requested for JSON Home response, %s"
-msgstr "JSON Home yanıtı için beklenmedik durum istendi, %s"
-
-#, python-format
-msgid "Unknown domain '%(name)s' specified by --domain-name"
-msgstr "--domain-name ile bilinmeyen alan '%(name)s' belirtilmiş"
-
-msgid "Update of `parent_id` is not allowed."
-msgstr "`parent_id` güncellemesine izin verilmiyor."
-
-#, python-format
-msgid "User %(user_id)s has no access to domain %(domain_id)s"
-msgstr "%(user_id)s kullanıcısının %(domain_id)s alanına erişimi yok"
-
-#, python-format
-msgid "User %(user_id)s has no access to project %(project_id)s"
-msgstr "%(user_id)s kullanıcısının %(project_id)s projesine erişimi yok"
-
-#, python-format
-msgid "User %(user_id)s is already a member of group %(group_id)s"
-msgstr "Kullanıcı %(user_id)s zaten %(group_id)s grubu üyesi"
-
-#, python-format
-msgid "User '%(user_id)s' not found in group '%(group_id)s'"
-msgstr "Kullanıcı '%(user_id)s' '%(group_id)s' grubunda bulunamadı"
-
-msgid "User IDs do not match"
-msgstr "Kullanıcı ID leri uyuşmuyor"
-
-#, python-format
-msgid "User is disabled: %s"
-msgstr "Kullanıcı kapalı: %s"
-
-msgid "User is not a trustee."
-msgstr "Kullanıcı güvenilir değil."
-
-#, python-format
-msgid "User type %s not supported"
-msgstr "Kullanıcı türü %s desteklenmiyor"
-
-msgid "You are not authorized to perform the requested action."
-msgstr "İstenen eylemi gerçekleştirmek için yetkili değilsiniz."
-
-msgid "any options"
-msgstr "herhangi bir seçenek"
-
-msgid "auth_type is not Negotiate"
-msgstr "auth_type Negotiate değil"
-
-msgid "authorizing user does not have role required"
-msgstr "yetkilendiren kullanıcı gerekli role sahip değil"
-
-#, python-format
-msgid "cannot create a project in a branch containing a disabled project: %s"
-msgstr "kapalı bir proje içeren bir alt grupta proje oluşturulamaz: %s"
-
-#, python-format
-msgid "group %(group)s"
-msgstr "grup %(group)s"
-
-#, python-format
-msgid "option %(option)s in group %(group)s"
-msgstr "%(group)s grubundaki %(option)s seçeneği"
-
-msgid "remaining_uses must be a positive integer or null."
-msgstr "remaining_uses pozitif bir değer ya da null olmalı."
-
-msgid "remaining_uses must not be set if redelegation is allowed"
-msgstr "tekrar yetkilendirmeye izin veriliyorsa remaining_uses ayarlanmamalı"
-
-#, python-format
-msgid ""
-"request to update group %(group)s, but config provided contains group "
-"%(group_other)s instead"
-msgstr ""
-"%(group)s grubunu güncelleme isteği, ama sağlanan yapılandırma "
-"%(group_other)s grubunu içeriyor"
-
-msgid "rescope a scoped token"
-msgstr "kapsamlı bir jeton tekrar kapsamlandı"
-
-#, python-format
-msgid "tls_cacertdir %s not found or is not a directory"
-msgstr "tls_cacertdir %s bulunamadı ya da bir dizin"
-
-#, python-format
-msgid "tls_cacertfile %s not found or is not a file"
-msgstr "tls_cacertfile %s bulunamadı ya da bir dosya değil"
diff --git a/keystone/locale/zh_CN/LC_MESSAGES/keystone.po b/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
index 27b9c6f4e..cb194dc71 100644
--- a/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
+++ b/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
@@ -11,16 +11,18 @@
# 颜海峰 <yanheven@gmail.com>, 2014
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Eric Lei <1165970798@qq.com>, 2016. #zanata
+# Research and Development Center UnitedStack <dev@unitedstack.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-07-01 18:11+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2016-09-28 03:23+0000\n"
-"Last-Translator: Eric Lei <1165970798@qq.com>\n"
+"PO-Revision-Date: 2022-06-14 12:29+0000\n"
+"Last-Translator: Research and Development Center UnitedStack "
+"<dev@unitedstack.com>\n"
"Language: zh_CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@@ -654,9 +656,6 @@ msgid ""
"associated endpoints."
msgstr "无法删除区域 %(region_id)s,因为它或它的子区域具有关联的端点。"
-msgid "Unable to downgrade schema"
-msgstr "无法对模式进行降级"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "找不到指定的域配置目录:%s"
@@ -732,6 +731,14 @@ msgstr "用户类型 %s 不受支持"
msgid "You are not authorized to perform the requested action."
msgstr "您没有授权完成所请求的操作。"
+msgid ""
+"You cannot change your password at this time due to password policy "
+"disallowing password changes. Please contact your administrator to reset "
+"your password."
+msgstr ""
+"因为密码策略被设置为禁止修改密码,目前您不能更改密码。请联系管理员重置您的密"
+"码。"
+
#, python-format
msgid ""
"You cannot change your password at this time due to the minimum password "
@@ -740,7 +747,7 @@ msgid ""
"contact your administrator to reset your password."
msgstr ""
"没有达到密码最小使用时长,目前您不能更改密码。一旦您修改了密码,在下次可被修"
-"改前该密码必须使用%(min_age_days)d天.请在%(days_left)d天后重试,或者联系管理"
+"改前该密码必须使用%(min_age_days)d天。请在%(days_left)d天后重试,或者联系管理"
"员重置您的密码。"
msgid ""
diff --git a/keystone/locale/zh_TW/LC_MESSAGES/keystone.po b/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
index 4d0399c9d..4529c4bc0 100644
--- a/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
+++ b/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -638,9 +638,6 @@ msgid ""
"associated endpoints."
msgstr "無法刪除區域 %(region_id)s,因為此區域或其子區域具有相關聯的端點。"
-msgid "Unable to downgrade schema"
-msgstr "無法將綱目降級"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "找不到網域配置目錄:%s"
diff --git a/keystone/models/token_model.py b/keystone/models/token_model.py
index d68b8eb96..78146295d 100644
--- a/keystone/models/token_model.py
+++ b/keystone/models/token_model.py
@@ -79,6 +79,9 @@ class TokenModel(object):
self.application_credential_id = None
self.__application_credential = None
+ self.oauth2_credential_id = None
+ self.oauth2_thumbprint = None
+
def __repr__(self):
"""Return string representation of TokenModel."""
desc = ('<%(type)s (audit_id=%(audit_id)s, '
@@ -440,6 +443,9 @@ class TokenModel(object):
return roles
+ def _get_oauth2_credential_roles(self):
+ return self._get_project_roles()
+
@property
def roles(self):
if self.system_scoped:
diff --git a/keystone/oauth2/__init__.py b/keystone/oauth2/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/oauth2/__init__.py
diff --git a/keystone/oauth2/handlers.py b/keystone/oauth2/handlers.py
new file mode 100644
index 000000000..e2c16c5cf
--- /dev/null
+++ b/keystone/oauth2/handlers.py
@@ -0,0 +1,30 @@
+# Copyright 2022 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import flask
+from keystone.server import flask as ks_flask
+
+
+def build_response(error):
+ response = flask.make_response((
+ {
+ 'error': error.error_title,
+ 'error_description': error.message_format
+ },
+ f"{error.code} {error.title}"))
+
+ if error.code == 401:
+ response.headers['WWW-Authenticate'] = \
+ 'Keystone uri="%s"' % ks_flask.base_url()
+ return response
diff --git a/keystone/revoke/backends/base.py b/keystone/revoke/backends/base.py
index 228db4d5c..52ee957dc 100644
--- a/keystone/revoke/backends/base.py
+++ b/keystone/revoke/backends/base.py
@@ -36,7 +36,7 @@ class RevokeDriverBase(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def list_events(self, last_fetch=None, token=None):
- """return the revocation events, as a list of objects.
+ """Return the revocation events, as a list of objects.
:param last_fetch: Time of last fetch. Return all events newer.
:param token: dictionary of values from a token, normalized for
@@ -52,7 +52,7 @@ class RevokeDriverBase(object, metaclass=abc.ABCMeta):
@abc.abstractmethod
def revoke(self, event):
- """register a revocation event.
+ """Register a revocation event.
:param event: An instance of
keystone.revoke.model.RevocationEvent
diff --git a/keystone/server/flask/application.py b/keystone/server/flask/application.py
index 12d59b289..5c4c0b065 100644
--- a/keystone/server/flask/application.py
+++ b/keystone/server/flask/application.py
@@ -27,12 +27,12 @@ except ImportError:
import keystone.api
from keystone import exception
+from keystone.oauth2 import handlers as oauth2_handlers
+from keystone.receipt import handlers as receipt_handlers
from keystone.server.flask import common as ks_flask
from keystone.server.flask.request_processing import json_body
from keystone.server.flask.request_processing import req_logging
-from keystone.receipt import handlers as receipt_handlers
-
LOG = log.getLogger(__name__)
@@ -75,16 +75,16 @@ def _handle_keystone_exception(error):
# TODO(adriant): register this with its own specific handler:
if isinstance(error, exception.InsufficientAuthMethods):
return receipt_handlers.build_receipt(error)
+ elif isinstance(error, exception.OAuth2Error):
+ return oauth2_handlers.build_response(error)
# Handle logging
if isinstance(error, exception.Unauthorized):
LOG.warning(
"Authorization failed. %(exception)s from %(remote_addr)s",
{'exception': error, 'remote_addr': flask.request.remote_addr})
- elif isinstance(error, exception.UnexpectedError):
- LOG.exception(str(error))
else:
- LOG.warning(str(error))
+ LOG.exception(str(error))
# Render the exception to something user "friendly"
error_message = error.args[0]
diff --git a/keystone/server/flask/request_processing/json_body.py b/keystone/server/flask/request_processing/json_body.py
index cce0763d3..746d88cfd 100644
--- a/keystone/server/flask/request_processing/json_body.py
+++ b/keystone/server/flask/request_processing/json_body.py
@@ -29,6 +29,13 @@ def json_body_before_request():
# exit if there is nothing to be done, (no body)
if not flask.request.get_data():
return None
+ elif flask.request.path and flask.request.path.startswith(
+ '/v3/OS-OAUTH2/'):
+ # When the user makes a request to the OAuth2.0 token endpoint,
+ # the user should use the "application/x-www-form-urlencoded" format
+ # with a character encoding of UTF-8 in the HTTP request entity-body.
+ # At the scenario there is nothing to be done and exit.
+ return None
try:
# flask does loading for us for json, use the flask default loader
diff --git a/keystone/tests/unit/base_classes.py b/keystone/tests/unit/base_classes.py
index 95bf7fa02..9bf3b50eb 100644
--- a/keystone/tests/unit/base_classes.py
+++ b/keystone/tests/unit/base_classes.py
@@ -31,7 +31,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
Re-implementation of TestCase that doesn't load a bunch of fixtures by
hand and instead uses the bootstrap process. This makes it so that our base
tests have the same things available to us as operators after they run
- boostrap. It also makes our tests DRY and pushes setup required for
+ bootstrap. It also makes our tests DRY and pushes setup required for
specific tests into the actual test class, instead of pushing it into a
generic structure that gets loaded for every test.
@@ -46,7 +46,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
- CONF.fernet_tokens.max_active_keys
+ CONF.fernet_tokens.max_active_keys,
)
)
@@ -54,7 +54,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_receipts',
- CONF.fernet_receipts.max_active_keys
+ CONF.fernet_receipts.max_active_keys,
)
)
@@ -72,7 +72,8 @@ class TestCaseWithBootstrap(core.BaseTestCase):
try:
PROVIDERS.resource_api.create_domain(
default_fixtures.ROOT_DOMAIN['id'],
- default_fixtures.ROOT_DOMAIN)
+ default_fixtures.ROOT_DOMAIN,
+ )
except exception.Conflict:
pass
diff --git a/keystone/tests/unit/common/sql/__init__.py b/keystone/tests/unit/common/sql/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/tests/unit/common/sql/__init__.py
diff --git a/keystone/tests/unit/common/sql/test_upgrades.py b/keystone/tests/unit/common/sql/test_upgrades.py
index c6c4a2e56..bb53cbd23 100644
--- a/keystone/tests/unit/common/sql/test_upgrades.py
+++ b/keystone/tests/unit/common/sql/test_upgrades.py
@@ -10,243 +10,331 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-import tempfile
-from unittest import mock
+"""Tests for database migrations for the database.
-from migrate import exceptions as migrate_exception
+These are "opportunistic" tests which allow testing against all three databases
+(sqlite in memory, mysql, pg) in a properly configured unit test environment.
+
+For the opportunistic testing you need to set up DBs named 'openstack_citest'
+with user 'openstack_citest' and password 'openstack_citest' on localhost. The
+test will then use that DB and username/password combo to run the tests.
+"""
+
+import fixtures
from migrate.versioning import api as migrate_api
-from migrate.versioning import repository as migrate_repository
-from oslo_db import exception as db_exception
+from oslo_db import options as db_options
from oslo_db.sqlalchemy import enginefacade
-from oslo_db.sqlalchemy import test_fixtures as db_fixtures
-from oslotest import base as test_base
-import sqlalchemy
+from oslo_db.sqlalchemy import test_fixtures
+from oslo_db.sqlalchemy import test_migrations
+from oslo_log.fixture import logging_error as log_fixture
+from oslo_log import log as logging
+from oslotest import base
+from keystone.common import sql
from keystone.common.sql import upgrades
-from keystone.common import utils
+import keystone.conf
+from keystone.tests.unit import ksfixtures
+
+# We need to import all of these so the tables are registered. It would be
+# easier if these were all in a central location :(
+import keystone.application_credential.backends.sql # noqa: F401
+import keystone.assignment.backends.sql # noqa: F401
+import keystone.assignment.role_backends.sql_model # noqa: F401
+import keystone.catalog.backends.sql # noqa: F401
+import keystone.credential.backends.sql # noqa: F401
+import keystone.endpoint_policy.backends.sql # noqa: F401
+import keystone.federation.backends.sql # noqa: F401
+import keystone.identity.backends.sql_model # noqa: F401
+import keystone.identity.mapping_backends.sql # noqa: F401
+import keystone.limit.backends.sql # noqa: F401
+import keystone.oauth1.backends.sql # noqa: F401
+import keystone.policy.backends.sql # noqa: F401
+import keystone.resource.backends.sql_model # noqa: F401
+import keystone.resource.config_backends.sql # noqa: F401
+import keystone.revoke.backends.sql # noqa: F401
+import keystone.trust.backends.sql # noqa: F401
+
+CONF = keystone.conf.CONF
+LOG = logging.getLogger(__name__)
+
+
+class KeystoneModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
+ """Test sqlalchemy-migrate migrations."""
+
+ # Migrations can take a long time, particularly on underpowered CI nodes.
+ # Give them some breathing room.
+ TIMEOUT_SCALING_FACTOR = 4
+ def setUp(self):
+ # Ensure BaseTestCase's ConfigureLogging fixture is disabled since
+ # we're using our own (StandardLogging).
+ with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'):
+ super().setUp()
-class TestMigrationCommon(
- db_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase,
+ self.useFixture(log_fixture.get_logging_handle_error_fixture())
+ self.useFixture(ksfixtures.WarningsFixture())
+ self.useFixture(ksfixtures.StandardLogging())
+
+ self.engine = enginefacade.writer.get_engine()
+
+ # Configure our connection string in CONF and enable SQLite fkeys
+ db_options.set_defaults(CONF, connection=self.engine.url)
+
+ # TODO(stephenfin): Do we need this? I suspect not since we're using
+ # enginefacade.write.get_engine() directly above
+ # Override keystone's context manager to be oslo.db's global context
+ # manager.
+ sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True
+ self.addCleanup(setattr,
+ sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False)
+ self.addCleanup(sql.cleanup)
+
+ def db_sync(self, engine):
+ upgrades.offline_sync_database_to_version(engine=engine)
+
+ def get_engine(self):
+ return self.engine
+
+ def get_metadata(self):
+ return sql.ModelBase.metadata
+
+ def include_object(self, object_, name, type_, reflected, compare_to):
+ if type_ == 'table':
+ # migrate_version is a sqlalchemy-migrate control table and
+ # isn't included in the models
+ if name == 'migrate_version':
+ return False
+
+ # This is created in tests and isn't a "real" table
+ if name == 'test_table':
+ return False
+
+ # FIXME(stephenfin): This was dropped in commit 93aff6e42 but the
+ # migrations were never adjusted
+ if name == 'token':
+ return False
+
+ return True
+
+ def filter_metadata_diff(self, diff):
+ """Filter changes before assert in test_models_sync().
+
+ :param diff: a list of differences (see `compare_metadata()` docs for
+ details on format)
+ :returns: a list of differences
+ """
+ new_diff = []
+ for element in diff:
+ # The modify_foo elements are lists; everything else is a tuple
+ if isinstance(element, list):
+ if element[0][0] == 'modify_nullable':
+ if (element[0][2], element[0][3]) in (
+ ('credential', 'encrypted_blob'),
+ ('credential', 'key_hash'),
+ ('federated_user', 'user_id'),
+ ('federated_user', 'idp_id'),
+ ('local_user', 'user_id'),
+ ('nonlocal_user', 'user_id'),
+ ('password', 'local_user_id'),
+ ):
+ continue # skip
+
+ if element[0][0] == 'modify_default':
+ if (element[0][2], element[0][3]) in (
+ ('password', 'created_at_int'),
+ ('password', 'self_service'),
+ ('project', 'is_domain'),
+ ('service_provider', 'relay_state_prefix'),
+ ):
+ continue # skip
+ else:
+ if element[0] == 'add_constraint':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('project_tag', ['project_id', 'name']),
+ (
+ 'trust',
+ [
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ ],
+ ),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These have a different name on PostgreSQL.
+ # Resolve by renaming the constraint on the models.
+ if element[0] == 'remove_constraint':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('access_rule', ['external_id']),
+ (
+ 'trust',
+ [
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ 'expires_at_int',
+ ],
+ ),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These indexes are present in the
+ # migrations but not on the equivalent models. Resolve by
+ # updating the models.
+ if element[0] == 'add_index':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('access_rule', ['external_id']),
+ ('access_rule', ['user_id']),
+ ('revocation_event', ['revoked_at']),
+ ('system_assignment', ['actor_id']),
+ ('user', ['default_project_id']),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These indexes are present on the models
+ # but not in the migrations. Resolve by either removing from
+ # the models or adding new migrations.
+ if element[0] == 'remove_index':
+ if (
+ element[1].table.name,
+ [x.name for x in element[1].columns],
+ ) in (
+ ('access_rule', ['external_id']),
+ ('access_rule', ['user_id']),
+ ('access_token', ['consumer_id']),
+ ('endpoint', ['service_id']),
+ ('revocation_event', ['revoked_at']),
+ ('user', ['default_project_id']),
+ ('user_group_membership', ['group_id']),
+ (
+ 'trust',
+ [
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ 'expires_at_int',
+ ],
+ ),
+ (),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These fks are present in the
+ # migrations but not on the equivalent models. Resolve by
+ # updating the models.
+ if element[0] == 'add_fk':
+ if (element[1].table.name, element[1].column_keys) in (
+ (
+ 'application_credential_access_rule',
+ ['access_rule_id'],
+ ),
+ ('limit', ['registered_limit_id']),
+ ('registered_limit', ['service_id']),
+ ('registered_limit', ['region_id']),
+ ('endpoint', ['region_id']),
+ ):
+ continue # skip
+
+ # FIXME(stephenfin): These indexes are present on the models
+ # but not in the migrations. Resolve by either removing from
+ # the models or adding new migrations.
+ if element[0] == 'remove_fk':
+ if (element[1].table.name, element[1].column_keys) in (
+ (
+ 'application_credential_access_rule',
+ ['access_rule_id'],
+ ),
+ ('endpoint', ['region_id']),
+ ('assignment', ['role_id']),
+ ):
+ continue # skip
+
+ new_diff.append(element)
+
+ return new_diff
+
+
+class TestModelsSyncSQLite(
+ KeystoneModelsMigrationsSync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
):
+ pass
- def setUp(self):
- super().setUp()
- self.engine = enginefacade.writer.get_engine()
+class TestModelsSyncMySQL(
+ KeystoneModelsMigrationsSync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.MySQLOpportunisticFixture
- self.path = tempfile.mkdtemp('test_migration')
- self.path1 = tempfile.mkdtemp('test_migration')
- self.return_value = '/home/openstack/migrations'
- self.return_value1 = '/home/extension/migrations'
- self.init_version = 1
- self.test_version = 123
-
- self.patcher_repo = mock.patch.object(migrate_repository, 'Repository')
- self.repository = self.patcher_repo.start()
- self.repository.side_effect = [self.return_value, self.return_value1]
-
- self.mock_api_db = mock.patch.object(migrate_api, 'db_version')
- self.mock_api_db_version = self.mock_api_db.start()
- self.mock_api_db_version.return_value = self.test_version
-
- def tearDown(self):
- os.rmdir(self.path)
- self.mock_api_db.stop()
- self.patcher_repo.stop()
- super().tearDown()
-
- def test_find_migrate_repo_path_not_found(self):
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._find_migrate_repo,
- "/foo/bar/",
- )
-
- def test_find_migrate_repo_called_once(self):
- my_repository = upgrades._find_migrate_repo(self.path)
- self.repository.assert_called_once_with(self.path)
- self.assertEqual(self.return_value, my_repository)
-
- def test_find_migrate_repo_called_few_times(self):
- repo1 = upgrades._find_migrate_repo(self.path)
- repo2 = upgrades._find_migrate_repo(self.path1)
- self.assertNotEqual(repo1, repo2)
-
- def test_db_version_control(self):
- with utils.nested_contexts(
- mock.patch.object(upgrades, '_find_migrate_repo'),
- mock.patch.object(migrate_api, 'version_control'),
- ) as (mock_find_repo, mock_version_control):
- mock_find_repo.return_value = self.return_value
-
- version = upgrades._migrate_db_version_control(
- self.engine, self.path, self.test_version)
-
- self.assertEqual(self.test_version, version)
- mock_version_control.assert_called_once_with(
- self.engine, self.return_value, self.test_version)
-
- @mock.patch.object(upgrades, '_find_migrate_repo')
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_control_version_less_than_actual_version(
- self, mock_version_control, mock_find_repo,
- ):
- mock_find_repo.return_value = self.return_value
- mock_version_control.side_effect = \
- migrate_exception.DatabaseAlreadyControlledError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_version_control, self.engine,
- self.path, self.test_version - 1)
-
- @mock.patch.object(upgrades, '_find_migrate_repo')
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_control_version_greater_than_actual_version(
- self, mock_version_control, mock_find_repo,
- ):
- mock_find_repo.return_value = self.return_value
- mock_version_control.side_effect = \
- migrate_exception.InvalidVersionError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_version_control, self.engine,
- self.path, self.test_version + 1)
-
- def test_db_version_return(self):
- ret_val = upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
- self.assertEqual(self.test_version, ret_val)
-
- def test_db_version_raise_not_controlled_error_first(self):
- with mock.patch.object(
- upgrades, '_migrate_db_version_control',
- ) as mock_ver:
- self.mock_api_db_version.side_effect = [
- migrate_exception.DatabaseNotControlledError('oups'),
- self.test_version]
-
- ret_val = upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
- self.assertEqual(self.test_version, ret_val)
- mock_ver.assert_called_once_with(
- self.engine, self.path, version=self.init_version)
-
- def test_db_version_raise_not_controlled_error_tables(self):
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = \
- migrate_exception.DatabaseNotControlledError('oups')
- my_meta = mock.MagicMock()
- my_meta.tables = {'a': 1, 'b': 2}
- mock_meta.return_value = my_meta
-
- self.assertRaises(
- db_exception.DBMigrationError, upgrades._migrate_db_version,
- self.engine, self.path, self.init_version)
-
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_raise_not_controlled_error_no_tables(self, mock_vc):
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = (
- migrate_exception.DatabaseNotControlledError('oups'),
- self.init_version)
- my_meta = mock.MagicMock()
- my_meta.tables = {}
- mock_meta.return_value = my_meta
-
- upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
-
- mock_vc.assert_called_once_with(
- self.engine, self.return_value1, self.init_version)
-
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_raise_not_controlled_alembic_tables(self, mock_vc):
- # When there are tables but the alembic control table
- # (alembic_version) is present, attempt to version the db.
- # This simulates the case where there is are multiple repos (different
- # abs_paths) and a different path has been versioned already.
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = [
- migrate_exception.DatabaseNotControlledError('oups'), None]
- my_meta = mock.MagicMock()
- my_meta.tables = {'alembic_version': 1, 'b': 2}
- mock_meta.return_value = my_meta
-
- upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
-
- mock_vc.assert_called_once_with(
- self.engine, self.return_value1, self.init_version)
-
- @mock.patch.object(migrate_api, 'version_control')
- def test_db_version_raise_not_controlled_migrate_tables(self, mock_vc):
- # When there are tables but the sqlalchemy-migrate control table
- # (migrate_version) is present, attempt to version the db.
- # This simulates the case where there is are multiple repos (different
- # abs_paths) and a different path has been versioned already.
- with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
- self.mock_api_db_version.side_effect = [
- migrate_exception.DatabaseNotControlledError('oups'), None]
- my_meta = mock.MagicMock()
- my_meta.tables = {'migrate_version': 1, 'b': 2}
- mock_meta.return_value = my_meta
-
- upgrades._migrate_db_version(
- self.engine, self.path, self.init_version)
-
- mock_vc.assert_called_once_with(
- self.engine, self.return_value1, self.init_version)
-
- def test_db_sync_wrong_version(self):
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_sync, self.engine, self.path, 'foo')
-
- @mock.patch.object(migrate_api, 'upgrade')
- def test_db_sync_script_not_present(self, upgrade):
- # For non existent upgrades script file sqlalchemy-migrate will raise
- # VersionNotFoundError which will be wrapped in DBMigrationError.
- upgrade.side_effect = migrate_exception.VersionNotFoundError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_sync, self.engine, self.path,
- self.test_version + 1)
-
- @mock.patch.object(migrate_api, 'upgrade')
- def test_db_sync_known_error_raised(self, upgrade):
- upgrade.side_effect = migrate_exception.KnownError
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._migrate_db_sync, self.engine, self.path,
- self.test_version + 1)
-
- def test_db_sync_upgrade(self):
- init_ver = 55
- with utils.nested_contexts(
- mock.patch.object(upgrades, '_find_migrate_repo'),
- mock.patch.object(migrate_api, 'upgrade')
- ) as (mock_find_repo, mock_upgrade):
- mock_find_repo.return_value = self.return_value
- self.mock_api_db_version.return_value = self.test_version - 1
-
- upgrades._migrate_db_sync(
- self.engine, self.path, self.test_version, init_ver)
-
- mock_upgrade.assert_called_once_with(
- self.engine, self.return_value, self.test_version)
-
- def test_db_sync_downgrade(self):
- with utils.nested_contexts(
- mock.patch.object(upgrades, '_find_migrate_repo'),
- mock.patch.object(migrate_api, 'downgrade')
- ) as (mock_find_repo, mock_downgrade):
- mock_find_repo.return_value = self.return_value
- self.mock_api_db_version.return_value = self.test_version + 1
-
- upgrades._migrate_db_sync(
- self.engine, self.path, self.test_version)
-
- mock_downgrade.assert_called_once_with(
- self.engine, self.return_value, self.test_version)
+
+class TestModelsSyncPostgreSQL(
+ KeystoneModelsMigrationsSync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
+
+
+class KeystoneModelsMigrationsLegacySync(KeystoneModelsMigrationsSync):
+ """Test that the models match the database after old migrations are run."""
+
+ def db_sync(self, engine):
+ # the 'upgrades._db_sync' method will not use the legacy
+ # sqlalchemy-migrate-based migration flow unless the database is
+ # already controlled with sqlalchemy-migrate, so we need to manually
+ # enable version controlling with this tool to test this code path
+ for branch in (
+ upgrades.EXPAND_BRANCH,
+ upgrades.DATA_MIGRATION_BRANCH,
+ upgrades.CONTRACT_BRANCH,
+ ):
+ repository = upgrades._find_migrate_repo(branch)
+ migrate_api.version_control(
+ engine, repository, upgrades.MIGRATE_INIT_VERSION)
+
+ # now we can apply migrations as expected and the legacy path will be
+ # followed
+ super().db_sync(engine)
+
+
+class TestModelsLegacySyncSQLite(
+ KeystoneModelsMigrationsLegacySync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ pass
+
+
+class TestModelsLegacySyncMySQL(
+ KeystoneModelsMigrationsLegacySync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.MySQLOpportunisticFixture
+
+
+class TestModelsLegacySyncPostgreSQL(
+ KeystoneModelsMigrationsLegacySync,
+ test_fixtures.OpportunisticDBTestMixin,
+ base.BaseTestCase,
+):
+ FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
diff --git a/keystone/tests/unit/common/test_utils.py b/keystone/tests/unit/common/test_utils.py
index 39962b4f6..673175aea 100644
--- a/keystone/tests/unit/common/test_utils.py
+++ b/keystone/tests/unit/common/test_utils.py
@@ -134,6 +134,17 @@ class UtilsTestCase(unit.BaseTestCase):
common_utils.hash_password,
invalid_length_password)
+ def test_max_algo_length_truncates_password(self):
+ self.config_fixture.config(strict_password_check=True)
+ self.config_fixture.config(group='identity',
+ password_hash_algorithm='bcrypt')
+ self.config_fixture.config(group='identity',
+ max_password_length='64')
+ invalid_length_password = '0' * 64
+ self.assertRaises(exception.PasswordVerificationError,
+ common_utils.hash_password,
+ invalid_length_password)
+
def _create_test_user(self, password=OPTIONAL):
user = {"name": "hthtest"}
if password is not self.OPTIONAL:
@@ -214,6 +225,114 @@ class UtilsTestCase(unit.BaseTestCase):
expected_string_ending = str(time.second) + 'Z'
self.assertTrue(string_time.endswith(expected_string_ending))
+ def test_get_certificate_subject_dn(self):
+ cert_pem = unit.create_pem_certificate(
+ unit.create_dn(
+ common_name='test',
+ organization_name='dev',
+ locality_name='suzhou',
+ state_or_province_name='jiangsu',
+ country_name='cn',
+ user_id='user_id',
+ domain_component='test.com',
+ email_address='user@test.com'
+ ))
+
+ dn = common_utils.get_certificate_subject_dn(cert_pem)
+ self.assertEqual('test', dn.get('CN'))
+ self.assertEqual('dev', dn.get('O'))
+ self.assertEqual('suzhou', dn.get('L'))
+ self.assertEqual('jiangsu', dn.get('ST'))
+ self.assertEqual('cn', dn.get('C'))
+ self.assertEqual('user_id', dn.get('UID'))
+ self.assertEqual('test.com', dn.get('DC'))
+ self.assertEqual('user@test.com', dn.get('emailAddress'))
+
+ def test_get_certificate_issuer_dn(self):
+ root_cert, root_key = unit.create_certificate(
+ unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organization_name='fujitsu',
+ organizational_unit_name='test',
+ common_name='root'
+ ))
+
+ cert_pem = unit.create_pem_certificate(
+ unit.create_dn(
+ common_name='test',
+ organization_name='dev',
+ locality_name='suzhou',
+ state_or_province_name='jiangsu',
+ country_name='cn',
+ user_id='user_id',
+ domain_component='test.com',
+ email_address='user@test.com'
+ ), ca=root_cert, ca_key=root_key)
+
+ dn = common_utils.get_certificate_subject_dn(cert_pem)
+ self.assertEqual('test', dn.get('CN'))
+ self.assertEqual('dev', dn.get('O'))
+ self.assertEqual('suzhou', dn.get('L'))
+ self.assertEqual('jiangsu', dn.get('ST'))
+ self.assertEqual('cn', dn.get('C'))
+ self.assertEqual('user_id', dn.get('UID'))
+ self.assertEqual('test.com', dn.get('DC'))
+ self.assertEqual('user@test.com', dn.get('emailAddress'))
+
+ dn = common_utils.get_certificate_issuer_dn(cert_pem)
+ self.assertEqual('root', dn.get('CN'))
+ self.assertEqual('fujitsu', dn.get('O'))
+ self.assertEqual('kawasaki', dn.get('L'))
+ self.assertEqual('kanagawa', dn.get('ST'))
+ self.assertEqual('jp', dn.get('C'))
+ self.assertEqual('test', dn.get('OU'))
+
+ def test_get_certificate_subject_dn_not_pem_format(self):
+ self.assertRaises(
+ exception.ValidationError,
+ common_utils.get_certificate_subject_dn,
+ 'MIIEkTCCAnkCFDIzsgpdRGF//5ukMuueXnRxQALhMA0GCSqGSIb3DQEBCwUAMIGC')
+
+ def test_get_certificate_issuer_dn_not_pem_format(self):
+ self.assertRaises(
+ exception.ValidationError,
+ common_utils.get_certificate_issuer_dn,
+ 'MIIEkTCCAnkCFDIzsgpdRGF//5ukMuueXnRxQALhMA0GCSqGSIb3DQEBCwUAMIGC')
+
+ def test_get_certificate_thumbprint(self):
+ cert_pem = '''-----BEGIN CERTIFICATE-----
+ MIIEkTCCAnkCFDIzsgpdRGF//5ukMuueXnRxQALhMA0GCSqGSIb3DQEBCwUAMIGC
+ MQswCQYDVQQGEwJjbjEQMA4GA1UECAwHamlhbmdzdTEPMA0GA1UEBwwGc3V6aG91
+ MQ0wCwYDVQQKDARqZnR0MQwwCgYDVQQLDANkZXYxEzARBgNVBAMMCnJvb3QubG9j
+ YWwxHjAcBgkqhkiG9w0BCQEWD3Rlc3RAcm9vdC5sb2NhbDAeFw0yMjA2MTYwNzM3
+ NTZaFw0yMjEyMTMwNzM3NTZaMIGGMQswCQYDVQQGEwJjbjEQMA4GA1UECAwHamlh
+ bmdzdTEPMA0GA1UEBwwGc3V6aG91MQ0wCwYDVQQKDARqZnR0MQwwCgYDVQQLDANk
+ ZXYxFTATBgNVBAMMDGNsaWVudC5sb2NhbDEgMB4GCSqGSIb3DQEJARYRdGVzdEBj
+ bGllbnQubG9jYWwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCah1Uz
+ 2OVbk8zLslxxGV+AR6FTy9b/VoinmB6A0jJA1Zz2D6rsjN2S5xQ5wHIO2WSVX9Ry
+ SonOmeZZqRA9faNJcNNcrBhJICScAhMGHCuli3EUMry/6xK0OYHGgI2X6mcTaIjv
+ tFKHO1BCb5YGdNBa+ff+ncTeVX/PeN3nKjA4xvQb9JZxJTgY0JVhledbaoepFSdW
+ EFW0nbUF+8lj1gCo5E4cAX1eTcUKs43FnWGCJcJT6FB1vP9x8e4h9p0RWbb9GMrU
+ DXKbzF5e28qIiCkYHv2/A/G/J+aeg2K4Cbqy+8908I5BdWZEsJBhWJ0+CEtC3n91
+ fU6dnAyipO496aa/AgMBAAEwDQYJKoZIhvcNAQELBQADggIBABoOOmLrWNlQzodS
+ n2wfkiF0Lz+pj3FKFPz3sYUYWkAiKXU/6RRu1Md7INRo0MFau4iAN8Raq4JFdbnU
+ HRN9G/UU58ETqi/8cYfOA2+MHHRif1Al9YSvTgHQa6ljZPttGeigOqmGlovPd+7R
+ vLXlKtcr5XBVk9pWPmVpwtAN3bMVlphgEqBO26Ff9J3G5PaNQ6UdpwXC19mRqk6r
+ BUsFBRwy7EeeGNy8DvoHTJfMc2JUbLjesSMOmIkaOGbhe327iRd/GJe4dO91+prE
+ HNWVR/bVoGiUZvSLPqrwU173XbdNd6yMKC+fULICI34eaWDe1zHrg9XdRxtessUx
+ OyJw5bgH09lOs8DSYXjFyx5lDxtERKHaLRgpSNd5foQO/mHiegC2qmdtxqKyOwub
+ V/h6vziDsFZfciwmo6iw3ZpdBvjbYqw32joURQ1IVh1naY6ZzMwq/PsyYVhMYUNB
+ XYPKvm68YfKuYmpwF7Z5Wll4EWm5DTq1dbmjdo+OQsMyiwWepWE0WV7Ng+AEbTqP
+ /akzUXt/AEbbBpZskB6v5q/YOcglWuAQVXs2viguyDvOQVbEB7JKDi4xzlZg3kQP
+ apjt17fip7wQi2jJkwdyAqvrdi/xLhK5+6BSo04lNc8sGZ9wToIoNkgv0cG+BrVU
+ 4cJHNiTQl8bxfSgwemgSYnnyXM4k
+ -----END CERTIFICATE-----'''
+ thumbprint = common_utils.get_certificate_thumbprint(cert_pem)
+ self.assertEqual('dMmoJKE9MIJK9VcyahYCb417JDhDfdtTiq_krco8-tk=',
+ thumbprint)
+
class ServiceHelperTests(unit.BaseTestCase):
diff --git a/keystone/tests/unit/contrib/federation/test_utils.py b/keystone/tests/unit/contrib/federation/test_utils.py
index f9153cb09..4d9f98f2d 100644
--- a/keystone/tests/unit/contrib/federation/test_utils.py
+++ b/keystone/tests/unit/contrib/federation/test_utils.py
@@ -764,6 +764,24 @@ class MappingRuleEngineTests(unit.BaseTestCase):
self.assertEqual('ALL USERS',
mapped_properties['group_names'][0]['name'])
+ def test_rule_engine_groups_mapping_only_one_numerical_group(self):
+ """Test mapping engine when groups is explicitly set.
+
+ If the groups list has only one group,
+ test if the transformation is done correctly
+
+ """
+ mapping = mapping_fixtures.MAPPING_GROUPS_WITH_EMAIL
+ assertion = mapping_fixtures.GROUPS_ASSERTION_ONLY_ONE_NUMERICAL_GROUP
+ rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules'])
+ mapped_properties = rp.process(assertion)
+ self.assertIsNotNone(mapped_properties)
+ self.assertEqual('jsmith', mapped_properties['user']['name'])
+ self.assertEqual('jill@example.com',
+ mapped_properties['user']['email'])
+ self.assertEqual('1234',
+ mapped_properties['group_names'][0]['name'])
+
def test_rule_engine_group_ids_mapping_whitelist(self):
"""Test mapping engine when group_ids is explicitly set.
diff --git a/keystone/tests/unit/core.py b/keystone/tests/unit/core.py
index 2a6c12038..6e0cad62e 100644
--- a/keystone/tests/unit/core.py
+++ b/keystone/tests/unit/core.py
@@ -28,6 +28,10 @@ import socket
import sys
import uuid
+from cryptography.hazmat.primitives.asymmetric import rsa
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.serialization import Encoding
+from cryptography import x509
import fixtures
import flask
from flask import testing as flask_testing
@@ -433,6 +437,77 @@ def new_totp_credential(user_id, project_id=None, blob=None):
return credential
+def create_dn(
+ common_name=None,
+ locality_name=None,
+ state_or_province_name=None,
+ organization_name=None,
+ organizational_unit_name=None,
+ country_name=None,
+ street_address=None,
+ domain_component=None,
+ user_id=None,
+ email_address=None,
+):
+ oid = x509.NameOID
+ attr = x509.NameAttribute
+ dn = []
+ if common_name:
+ dn.append(attr(oid.COMMON_NAME, common_name))
+ if locality_name:
+ dn.append(attr(oid.LOCALITY_NAME, locality_name))
+ if state_or_province_name:
+ dn.append(attr(oid.STATE_OR_PROVINCE_NAME, state_or_province_name))
+ if organization_name:
+ dn.append(attr(oid.ORGANIZATION_NAME, organization_name))
+ if organizational_unit_name:
+ dn.append(attr(oid.ORGANIZATIONAL_UNIT_NAME, organizational_unit_name))
+ if country_name:
+ dn.append(attr(oid.COUNTRY_NAME, country_name))
+ if street_address:
+ dn.append(attr(oid.STREET_ADDRESS, street_address))
+ if domain_component:
+ dn.append(attr(oid.DOMAIN_COMPONENT, domain_component))
+ if user_id:
+ dn.append(attr(oid.USER_ID, user_id))
+ if email_address:
+ dn.append(attr(oid.EMAIL_ADDRESS, email_address))
+ return x509.Name(dn)
+
+
+def update_dn(dn1, dn2):
+ dn1_attrs = {attr.oid: attr for attr in dn1}
+ dn2_attrs = {attr.oid: attr for attr in dn2}
+ dn1_attrs.update(dn2_attrs)
+ return x509.Name([attr for attr in dn1_attrs.values()])
+
+
+def create_certificate(subject_dn, ca=None, ca_key=None):
+ private_key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ )
+ issuer = ca.subject if ca else subject_dn
+ if not ca_key:
+ ca_key = private_key
+ today = datetime.datetime.today()
+ cert = x509.CertificateBuilder(
+ issuer_name=issuer,
+ subject_name=subject_dn,
+ public_key=private_key.public_key(),
+ serial_number=x509.random_serial_number(),
+ not_valid_before=today,
+ not_valid_after=today + datetime.timedelta(365, 0, 0),
+ ).sign(ca_key, hashes.SHA256())
+
+ return cert, private_key
+
+
+def create_pem_certificate(subject_dn, ca=None, ca_key=None):
+ cert, _ = create_certificate(subject_dn, ca=ca, ca_key=ca_key)
+ return cert.public_bytes(Encoding.PEM).decode('ascii')
+
+
def new_application_credential_ref(roles=None,
name=None,
expires=None,
diff --git a/keystone/tests/unit/fakeldap.py b/keystone/tests/unit/fakeldap.py
index f374322d1..5119305a7 100644
--- a/keystone/tests/unit/fakeldap.py
+++ b/keystone/tests/unit/fakeldap.py
@@ -296,6 +296,9 @@ class FakeLdap(common.LDAPHandler):
raise ldap.SERVER_DOWN
whos = ['cn=Admin', CONF.ldap.user]
if (who in whos and cred in ['password', CONF.ldap.password]):
+ self.connected = True
+ self.who = who
+ self.cred = cred
return
attrs = self.db.get(self.key(who))
@@ -316,6 +319,9 @@ class FakeLdap(common.LDAPHandler):
def unbind_s(self):
"""Provide for compatibility but this method is ignored."""
+ self.connected = False
+ self.who = None
+ self.cred = None
if server_fail:
raise ldap.SERVER_DOWN
@@ -534,7 +540,7 @@ class FakeLdap(common.LDAPHandler):
raise exception.NotImplemented()
# only passing a single server control is supported by this fake ldap
- if len(serverctrls) > 1:
+ if serverctrls and len(serverctrls) > 1:
raise exception.NotImplemented()
# search_ext is async and returns an identifier used for
@@ -589,6 +595,7 @@ class FakeLdapPool(FakeLdap):
def __init__(self, uri, retry_max=None, retry_delay=None, conn=None):
super(FakeLdapPool, self).__init__(conn=conn)
self.url = uri
+ self._uri = uri
self.connected = None
self.conn = self
self._connection_time = 5 # any number greater than 0
diff --git a/keystone/tests/unit/ksfixtures/warnings.py b/keystone/tests/unit/ksfixtures/warnings.py
index 9e3a9c4d4..43519925f 100644
--- a/keystone/tests/unit/ksfixtures/warnings.py
+++ b/keystone/tests/unit/ksfixtures/warnings.py
@@ -35,6 +35,23 @@ class WarningsFixture(fixtures.Fixture):
module='^keystone\\.',
)
+ warnings.filterwarnings(
+ 'ignore',
+ message=(
+ 'Policy enforcement is depending on the value of '
+ '(token|group_ids). '
+ 'This key is deprecated. Please update your policy '
+ 'file to use the standard policy values.'
+ ),
+ )
+
+ # NOTE(stephenfin): Ignore scope check UserWarnings from oslo.policy.
+ warnings.filterwarnings(
+ 'ignore',
+ message="Policy .* failed scope check",
+ category=UserWarning,
+ )
+
# TODO(stephenfin): This will be fixed once we drop sqlalchemy-migrate
warnings.filterwarnings(
'ignore',
diff --git a/keystone/tests/unit/mapping_fixtures.py b/keystone/tests/unit/mapping_fixtures.py
index 51f1526bb..5a6dbf8c3 100644
--- a/keystone/tests/unit/mapping_fixtures.py
+++ b/keystone/tests/unit/mapping_fixtures.py
@@ -1735,6 +1735,12 @@ GROUPS_ASSERTION_ONLY_ONE_GROUP = {
'groups': 'ALL USERS'
}
+GROUPS_ASSERTION_ONLY_ONE_NUMERICAL_GROUP = {
+ 'userEmail': 'jill@example.com',
+ 'UserName': 'jsmith',
+ 'groups': '1234'
+}
+
GROUPS_DOMAIN_ASSERTION = {
'openstack_user': 'bwilliams',
'openstack_user_domain': 'default',
diff --git a/keystone/tests/unit/test_backend_ldap_pool.py b/keystone/tests/unit/test_backend_ldap_pool.py
index 9b5e92748..1c4b19804 100644
--- a/keystone/tests/unit/test_backend_ldap_pool.py
+++ b/keystone/tests/unit/test_backend_ldap_pool.py
@@ -163,12 +163,23 @@ class LdapPoolCommonTestMixin(object):
# Then open 3 connections again and make sure size does not grow
# over 3
- with _get_conn() as _: # conn1
+ with _get_conn() as c1: # conn1
+ self.assertEqual(3, len(ldappool_cm))
+ c1.connected = False
+ with _get_conn() as c2: # conn2
+ self.assertEqual(3, len(ldappool_cm))
+ c2.connected = False
+ with _get_conn() as c3: # conn3
+ c3.connected = False
+ c3.unbind_ext_s()
+ self.assertEqual(3, len(ldappool_cm))
+
+ with _get_conn() as c1: # conn1
self.assertEqual(1, len(ldappool_cm))
- with _get_conn() as _: # conn2
+ with _get_conn() as c2: # conn2
self.assertEqual(2, len(ldappool_cm))
- with _get_conn() as _: # conn3
- _.unbind_ext_s()
+ with _get_conn() as c3: # conn3
+ c3.unbind_ext_s()
self.assertEqual(3, len(ldappool_cm))
def test_password_change_with_pool(self):
@@ -209,6 +220,105 @@ class LdapPoolCommonTestMixin(object):
user_id=self.user_sna['id'],
password=old_password)
+ @mock.patch.object(fakeldap.FakeLdap, 'search_ext')
+ def test_search_ext_ensure_pool_connection_released(self, mock_search_ext):
+ """Test search_ext exception resiliency.
+
+ Call search_ext function in isolation. Doing so will cause
+ search_ext to borrow a connection from the pool and associate
+ it with an AsynchronousMessage object. Borrowed connection ought
+ to be released if anything goes wrong during LDAP API call. This
+ test case intentionally throws an exception to ensure everything
+ goes as expected when LDAP connection raises an exception.
+ """
+ class CustomDummyException(Exception):
+ pass
+
+ # Throw an exception intentionally when LDAP
+ # connection search_ext function is called
+ mock_search_ext.side_effect = CustomDummyException()
+ self.config_fixture.config(group='ldap', pool_size=1)
+ pool = self.conn_pools[CONF.ldap.url]
+ user_api = ldap.UserApi(CONF)
+
+ # setUp primes the pool so pool
+ # must have one connection
+ self.assertEqual(1, len(pool))
+ for i in range(1, 10):
+ handler = user_api.get_connection()
+ # Just to ensure that we're using pooled connections
+ self.assertIsInstance(handler.conn, common_ldap.PooledLDAPHandler)
+ # LDAP API will throw CustomDummyException. In this scenario
+ # we expect LDAP connection to be made available back to the
+ # pool.
+ self.assertRaises(
+ CustomDummyException,
+ lambda: handler.search_ext(
+ 'dc=example,dc=test',
+ 'dummy',
+ 'objectclass=*',
+ ['mail', 'userPassword']
+ )
+ )
+ # Pooled connection must not be evicted from the pool
+ self.assertEqual(1, len(pool))
+ # Ensure that the connection is inactive afterwards
+ with pool._pool_lock:
+ for slot, conn in enumerate(pool._pool):
+ self.assertFalse(conn.active)
+
+ self.assertEqual(mock_search_ext.call_count, i)
+
+ @mock.patch.object(fakeldap.FakeLdap, 'result3')
+ def test_result3_ensure_pool_connection_released(self, mock_result3):
+ """Test search_ext-->result3 exception resiliency.
+
+ Call search_ext function, grab an AsynchronousMessage object and
+ call result3 with it. During the result3 call, LDAP API will throw
+ an exception.The expectation is that the associated LDAP pool
+ connection for AsynchronousMessage must be released back to the
+ LDAP connection pool.
+ """
+ class CustomDummyException(Exception):
+ pass
+
+ # Throw an exception intentionally when LDAP
+ # connection result3 function is called
+ mock_result3.side_effect = CustomDummyException()
+ self.config_fixture.config(group='ldap', pool_size=1)
+ pool = self.conn_pools[CONF.ldap.url]
+ user_api = ldap.UserApi(CONF)
+
+ # setUp primes the pool so pool
+ # must have one connection
+ self.assertEqual(1, len(pool))
+ for i in range(1, 10):
+ handler = user_api.get_connection()
+ # Just to ensure that we're using pooled connections
+ self.assertIsInstance(handler.conn, common_ldap.PooledLDAPHandler)
+ msg = handler.search_ext(
+ 'dc=example,dc=test',
+ 'dummy',
+ 'objectclass=*',
+ ['mail', 'userPassword']
+ )
+ # Connection is in use, must be already marked active
+ self.assertTrue(msg.connection.active)
+ # Pooled connection must not be evicted from the pool
+ self.assertEqual(1, len(pool))
+ # LDAP API will throw CustomDummyException. In this
+ # scenario we expect LDAP connection to be made
+ # available back to the pool.
+ self.assertRaises(
+ CustomDummyException,
+ lambda: handler.result3(msg)
+ )
+ # Connection must be set inactive
+ self.assertFalse(msg.connection.active)
+ # Pooled connection must not be evicted from the pool
+ self.assertEqual(1, len(pool))
+ self.assertEqual(mock_result3.call_count, i)
+
class LDAPIdentity(LdapPoolCommonTestMixin,
test_backend_ldap.LDAPIdentity,
diff --git a/keystone/tests/unit/test_cli.py b/keystone/tests/unit/test_cli.py
index c94d8c196..2f9bed064 100644
--- a/keystone/tests/unit/test_cli.py
+++ b/keystone/tests/unit/test_cli.py
@@ -754,18 +754,28 @@ class CliDBSyncTestCase(unit.BaseTestCase):
self.version = None
def setUp(self):
- super(CliDBSyncTestCase, self).setUp()
+ super().setUp()
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
- upgrades.offline_sync_database_to_version = mock.Mock()
- upgrades.expand_schema = mock.Mock()
- upgrades.migrate_data = mock.Mock()
- upgrades.contract_schema = mock.Mock()
+
+ self.patchers = patchers = [
+ mock.patch.object(upgrades, "offline_sync_database_to_version"),
+ mock.patch.object(upgrades, "expand_schema"),
+ mock.patch.object(upgrades, "migrate_data"),
+ mock.patch.object(upgrades, "contract_schema"),
+ ]
+ for p in patchers:
+ p.start()
self.command_check = False
self.command_expand = False
self.command_migrate = False
self.command_contract = False
+ def tearDown(self):
+ for p in self.patchers:
+ p.stop()
+ super().tearDown()
+
def _assert_correct_call(self, mocked_function):
for func in [upgrades.offline_sync_database_to_version,
upgrades.expand_schema,
diff --git a/keystone/tests/unit/test_sql_banned_operations.py b/keystone/tests/unit/test_sql_banned_operations.py
index 2a9be1029..95ba2368d 100644
--- a/keystone/tests/unit/test_sql_banned_operations.py
+++ b/keystone/tests/unit/test_sql_banned_operations.py
@@ -1,10 +1,8 @@
-# Copyright 2016 Intel Corporation
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -14,20 +12,38 @@
import os
+from alembic import command as alembic_api
+from alembic import script as alembic_script
import fixtures
-from migrate.versioning import api as versioning_api
-from migrate.versioning import repository
from oslo_db.sqlalchemy import enginefacade
-from oslo_db.sqlalchemy import test_fixtures as db_fixtures
-from oslo_db.sqlalchemy import test_migrations
-from oslotest import base as test_base
-import sqlalchemy
-import testtools
-
-from keystone.common.sql.legacy_migrations import contract_repo
-from keystone.common.sql.legacy_migrations import data_migration_repo
-from keystone.common.sql.legacy_migrations import expand_repo
+from oslo_db.sqlalchemy import test_fixtures
+from oslo_log import log as logging
+
+from keystone.common import sql
from keystone.common.sql import upgrades
+import keystone.conf
+from keystone.tests import unit
+
+# We need to import all of these so the tables are registered. It would be
+# easier if these were all in a central location :(
+import keystone.application_credential.backends.sql # noqa: F401
+import keystone.assignment.backends.sql # noqa: F401
+import keystone.assignment.role_backends.sql_model # noqa: F401
+import keystone.catalog.backends.sql # noqa: F401
+import keystone.credential.backends.sql # noqa: F401
+import keystone.endpoint_policy.backends.sql # noqa: F401
+import keystone.federation.backends.sql # noqa: F401
+import keystone.identity.backends.sql_model # noqa: F401
+import keystone.identity.mapping_backends.sql # noqa: F401
+import keystone.limit.backends.sql # noqa: F401
+import keystone.oauth1.backends.sql # noqa: F401
+import keystone.policy.backends.sql # noqa: F401
+import keystone.resource.backends.sql_model # noqa: F401
+import keystone.resource.config_backends.sql # noqa: F401
+import keystone.revoke.backends.sql # noqa: F401
+import keystone.trust.backends.sql # noqa: F401
+
+LOG = logging.getLogger(__name__)
class DBOperationNotAllowed(Exception):
@@ -37,322 +53,228 @@ class DBOperationNotAllowed(Exception):
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations."""
- def __init__(self, banned_ops, migration_repo):
+ def __init__(self, banned_ops, revision):
super().__init__()
self._banned_ops = banned_ops or {}
- self._migration_repo = migration_repo
+ self._revision = revision
@staticmethod
- def _explode(resource_op, repo):
- # Extract the repo name prior to the trailing '/__init__.py'
- repo_name = repo.split('/')[-2]
- raise DBOperationNotAllowed(
- 'Operation %s() is not allowed in %s database migrations' % (
- resource_op, repo_name))
+ def _explode(op, revision):
+ msg = "Operation '%s' is not allowed in migration %s"
+ raise DBOperationNotAllowed(msg % (op, revision))
def setUp(self):
super().setUp()
explode_lambda = {
- 'Table.create': lambda *a, **k: self._explode(
- 'Table.create', self._migration_repo),
- 'Table.alter': lambda *a, **k: self._explode(
- 'Table.alter', self._migration_repo),
- 'Table.drop': lambda *a, **k: self._explode(
- 'Table.drop', self._migration_repo),
- 'Table.insert': lambda *a, **k: self._explode(
- 'Table.insert', self._migration_repo),
- 'Table.update': lambda *a, **k: self._explode(
- 'Table.update', self._migration_repo),
- 'Table.delete': lambda *a, **k: self._explode(
- 'Table.delete', self._migration_repo),
- 'Column.create': lambda *a, **k: self._explode(
- 'Column.create', self._migration_repo),
- 'Column.alter': lambda *a, **k: self._explode(
- 'Column.alter', self._migration_repo),
- 'Column.drop': lambda *a, **k: self._explode(
- 'Column.drop', self._migration_repo)
+ x: lambda *a, **k: self._explode(x, self._revision)
+ for x in [
+ 'add_column',
+ 'alter_column',
+ 'batch_alter_table',
+ 'bulk_insert',
+ 'create_check_constraint',
+ 'create_exclude_constraint',
+ 'create_foreign_key',
+ 'create_index',
+ 'create_primary_key',
+ 'create_table',
+ 'create_table_comment',
+ 'create_unique_constraint',
+ 'drop_column',
+ 'drop_constraint',
+ 'drop_index',
+ 'drop_table',
+ 'drop_table_comment',
+ # 'execute',
+ 'rename_table',
+ ]
}
- for resource in self._banned_ops:
- for op in self._banned_ops[resource]:
- resource_op = '%(resource)s.%(op)s' % {
- 'resource': resource, 'op': op}
- self.useFixture(fixtures.MonkeyPatch(
- 'sqlalchemy.%s' % resource_op,
- explode_lambda[resource_op]))
-
-
-class TestBannedDBSchemaOperations(testtools.TestCase):
- """Test the BannedDBSchemaOperations fixture."""
-
- def test_column(self):
- """Test column operations raise DBOperationNotAllowed."""
- column = sqlalchemy.Column()
- with BannedDBSchemaOperations(
- banned_ops={'Column': ['create', 'alter', 'drop']},
- migration_repo=expand_repo.__file__,
- ):
- self.assertRaises(DBOperationNotAllowed, column.drop)
- self.assertRaises(DBOperationNotAllowed, column.alter)
- self.assertRaises(DBOperationNotAllowed, column.create)
-
- def test_table(self):
- """Test table operations raise DBOperationNotAllowed."""
- table = sqlalchemy.Table()
- with BannedDBSchemaOperations(
- banned_ops={'Table': ['create', 'alter', 'drop',
- 'insert', 'update', 'delete']},
- migration_repo=expand_repo.__file__,
- ):
- self.assertRaises(DBOperationNotAllowed, table.drop)
- self.assertRaises(DBOperationNotAllowed, table.alter)
- self.assertRaises(DBOperationNotAllowed, table.create)
- self.assertRaises(DBOperationNotAllowed, table.insert)
- self.assertRaises(DBOperationNotAllowed, table.update)
- self.assertRaises(DBOperationNotAllowed, table.delete)
-
-
-class KeystoneMigrationsCheckers(test_migrations.WalkVersionsMixin):
- """Walk over and test all sqlalchemy-migrate migrations."""
-
- migrate_file = None
- first_version = 1
- # A mapping of entity (Table, Column, ...) to operation
- banned_ops = {}
- exceptions = [
- # NOTE(xek): Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE UNLESS
- # JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT CAUSE
- # PROBLEMS FOR ROLLING UPGRADES.
- ]
-
- @property
- def INIT_VERSION(self):
- return upgrades.INITIAL_VERSION
-
- @property
- def REPOSITORY(self):
- return repository.Repository(
- os.path.abspath(os.path.dirname(self.migrate_file))
- )
-
- @property
- def migration_api(self):
- temp = __import__('oslo_db.sqlalchemy.migration', globals(),
- locals(), ['versioning_api'], 0)
- return temp.versioning_api
-
- @property
- def migrate_engine(self):
- return self.engine
-
- def migrate_fully(self, repo_name):
- abs_path = os.path.abspath(os.path.dirname(repo_name))
- init_version = upgrades.get_init_version(abs_path=abs_path)
- schema = versioning_api.ControlledSchema.create(
- self.migrate_engine, abs_path, init_version)
- max_version = schema.repository.version().version
- upgrade = True
- err = ''
- version = versioning_api._migrate_version(
- schema, max_version, upgrade, err)
- schema.upgrade(version)
-
- def migrate_up(self, version, with_data=False):
- """Check that migrations don't cause downtime.
-
- Schema migrations can be done online, allowing for rolling upgrades.
- """
- # NOTE(xek):
- # self.exceptions contains a list of migrations where we allow the
- # banned operations. Only Migrations which don't cause
- # incompatibilities are allowed, for example dropping an index or
- # constraint.
- #
- # Please follow the guidelines outlined at:
- # https://docs.openstack.org/keystone/latest/contributor/database-migrations.html
-
- if version >= self.first_version and version not in self.exceptions:
- banned_ops = self.banned_ops
- else:
- banned_ops = None
- with BannedDBSchemaOperations(banned_ops, self.migrate_file):
- super().migrate_up(version, with_data)
-
- snake_walk = False
- downgrade = False
-
- def test_walk_versions(self):
- self.walk_versions(self.snake_walk, self.downgrade)
-
-
-class TestKeystoneExpandSchemaMigrations(KeystoneMigrationsCheckers):
-
- migrate_file = expand_repo.__file__
- first_version = 1
- # TODO(henry-nash): we should include Table update here as well, but this
- # causes the update of the migration version to appear as a banned
- # operation!
- banned_ops = {'Table': ['alter', 'drop', 'insert', 'delete'],
- 'Column': ['alter', 'drop']}
- exceptions = [
+ for op in self._banned_ops:
+ self.useFixture(
+ fixtures.MonkeyPatch('alembic.op.%s' % op, explode_lambda[op])
+ )
+
+
+class KeystoneMigrationsWalk(
+ test_fixtures.OpportunisticDBTestMixin,
+):
+ # Migrations can take a long time, particularly on underpowered CI nodes.
+ # Give them some breathing room.
+ TIMEOUT_SCALING_FACTOR = 4
+
+ BANNED_OPS = {
+ 'expand': [
+ 'alter_column',
+ 'batch_alter_table',
+ 'drop_column',
+ 'drop_constraint',
+ 'drop_index',
+ 'drop_table',
+ 'drop_table_comment',
+ # 'execute',
+ 'rename_table',
+ ],
+ 'contract': {
+ 'add_column',
+ 'bulk_insert',
+ 'create_check_constraint',
+ 'create_exclude_constraint',
+ 'create_foreign_key',
+ 'create_index',
+ 'create_primary_key',
+ 'create_table',
+ 'create_table_comment',
+ 'create_unique_constraint',
+ # 'execute',
+ 'rename_table',
+ },
+ }
+
+ BANNED_OP_EXCEPTIONS = [
# NOTE(xek, henry-nash): Reviewers: DO NOT ALLOW THINGS TO BE ADDED
# HERE UNLESS JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT
# CAUSE PROBLEMS FOR ROLLING UPGRADES.
-
- # Migration 002 changes the column type, from datetime to timestamp in
- # the contract phase. Adding exception here to pass expand banned
- # tests, otherwise fails.
- 2,
- # NOTE(lbragstad): The expand 003 migration alters the credential table
- # to make `blob` nullable. This allows the triggers added in 003 to
- # catch writes when the `blob` attribute isn't populated. We do this so
- # that the triggers aren't aware of the encryption implementation.
- 3,
- # Migration 004 changes the password created_at column type, from
- # timestamp to datetime and updates the initial value in the contract
- # phase. Adding an exception here to pass expand banned tests,
- # otherwise fails.
- 4,
-
- # Migration 79 changes a varchar column length, doesn't
- # convert the data within that column/table and doesn't rebuild
- # indexes.
- 79
]
def setUp(self):
- super(TestKeystoneExpandSchemaMigrations, self).setUp()
-
-
-class TestKeystoneExpandSchemaMigrationsMySQL(
- db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase,
- TestKeystoneExpandSchemaMigrations):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
- def setUp(self):
- super(TestKeystoneExpandSchemaMigrationsMySQL, self).setUp()
- self.engine = enginefacade.writer.get_engine()
- self.sessionmaker = enginefacade.writer.get_sessionmaker()
-
-
-class TestKeystoneExpandSchemaMigrationsPostgreSQL(
- db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase,
- TestKeystoneExpandSchemaMigrations):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
- def setUp(self):
- super(TestKeystoneExpandSchemaMigrationsPostgreSQL, self).setUp()
+ super().setUp()
self.engine = enginefacade.writer.get_engine()
- self.sessionmaker = enginefacade.writer.get_sessionmaker()
+ self.config = upgrades._find_alembic_conf()
+ self.init_version = upgrades.ALEMBIC_INIT_VERSION
+
+ # TODO(stephenfin): Do we need this? I suspect not since we're using
+ # enginefacade.write.get_engine() directly above
+ # Override keystone's context manager to be oslo.db's global context
+ # manager.
+ sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True
+ self.addCleanup(setattr,
+ sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False)
+ self.addCleanup(sql.cleanup)
+
+ def _migrate_up(self, connection, revision):
+ version = revision.revision
+
+ if version == self.init_version: # no tests for the initial revision
+ alembic_api.upgrade(self.config, version)
+ return
+
+ self.assertIsNotNone(
+ getattr(self, '_check_%s' % version, None),
+ (
+ 'DB Migration %s does not have a test; you must add one'
+ ) % version,
+ )
+ pre_upgrade = getattr(self, '_pre_upgrade_%s' % version, None)
+ if pre_upgrade:
+ pre_upgrade(connection)
-class TestKeystoneDataMigrations(
- KeystoneMigrationsCheckers):
+ banned_ops = []
+ if version not in self.BANNED_OP_EXCEPTIONS:
+ # there should only ever be one label, but this is safer
+ for branch_label in revision.branch_labels:
+ banned_ops.extend(self.BANNED_OPS[branch_label])
- migrate_file = data_migration_repo.__file__
- first_version = 1
- banned_ops = {'Table': ['create', 'alter', 'drop'],
- 'Column': ['create', 'alter', 'drop']}
- exceptions = [
- # NOTE(xek, henry-nash): Reviewers: DO NOT ALLOW THINGS TO BE ADDED
- # HERE UNLESS JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT
- # CAUSE PROBLEMS FOR ROLLING UPGRADES.
+ with BannedDBSchemaOperations(banned_ops, version):
+ alembic_api.upgrade(self.config, version)
- # Migration 002 changes the column type, from datetime to timestamp in
- # the contract phase. Adding exception here to pass banned data
- # migration tests. Fails otherwise.
- 2,
- # Migration 004 changes the password created_at column type, from
- # timestamp to datetime and updates the initial value in the contract
- # phase. Adding an exception here to pass data migrations banned tests,
- # otherwise fails.
- 4
- ]
+ post_upgrade = getattr(self, '_check_%s' % version, None)
+ if post_upgrade:
+ post_upgrade(connection)
- def setUp(self):
- super(TestKeystoneDataMigrations, self).setUp()
- self.migrate_fully(expand_repo.__file__)
+ def _pre_upgrade_e25ffa003242(self, connection):
+ """This is a no-op migration."""
+ pass
+ def _check_e25ffa003242(self, connection):
+ """This is a no-op migration."""
+ pass
-class TestKeystoneDataMigrationsMySQL(
- TestKeystoneDataMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
+ def _pre_upgrade_29e87d24a316(self, connection):
+ """This is a no-op migration."""
+ pass
+ def _check_29e87d24a316(self, connection):
+ """This is a no-op migration."""
+ pass
-class TestKeystoneDataMigrationsPostgreSQL(
- TestKeystoneDataMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
+ def test_single_base_revision(self):
+ """Ensure we only have a single base revision.
+ There's no good reason for us to have diverging history, so validate
+ that only one base revision exists. This will prevent simple errors
+ where people forget to specify the base revision. If this fail for
+ your change, look for migrations that do not have a 'revises' line in
+ them.
+ """
+ script = alembic_script.ScriptDirectory.from_config(self.config)
+ self.assertEqual(1, len(script.get_bases()))
-class TestKeystoneDataMigrationsSQLite(
- TestKeystoneDataMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- pass
+ def test_head_revisions(self):
+ """Ensure we only have a two head revisions.
+ There's no good reason for us to have diverging history beyond the
+ expand and contract branches, so validate that only these head
+ revisions exist. This will prevent merge conflicts adding additional
+ head revision points. If this fail for your change, look for migrations
+ with the duplicate 'revises' line in them.
+ """
+ script = alembic_script.ScriptDirectory.from_config(self.config)
+ self.assertEqual(2, len(script.get_heads()))
-class TestKeystoneContractSchemaMigrations(
- KeystoneMigrationsCheckers):
+ def test_walk_versions(self):
+ with self.engine.begin() as connection:
+ self.config.attributes['connection'] = connection
+ script = alembic_script.ScriptDirectory.from_config(self.config)
+ revisions = [x for x in script.walk_revisions()]
+
+ # for some reason, 'walk_revisions' gives us the revisions in
+ # reverse chronological order so we have to invert this
+ revisions.reverse()
+ self.assertEqual(revisions[0].revision, self.init_version)
+
+ for revision in revisions:
+ LOG.info('Testing revision %s', revision.revision)
+ self._migrate_up(connection, revision)
+
+ def _get_head_from_file(self, branch):
+ path = os.path.join(
+ os.path.dirname(upgrades.__file__),
+ 'migrations',
+ 'versions',
+ f'{branch.upper()}_HEAD',
+ )
- migrate_file = contract_repo.__file__
- first_version = 1
- # TODO(henry-nash): we should include Table update here as well, but this
- # causes the update of the migration version to appear as a banned
- # operation!
- banned_ops = {'Table': ['create', 'insert', 'delete'],
- 'Column': ['create']}
- exceptions = [
- # NOTE(xek, henry-nash): Reviewers: DO NOT ALLOW THINGS TO BE ADDED
- # HERE UNLESS JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT
- # CAUSE PROBLEMS FOR ROLLING UPGRADES.
+ with open(path) as fh:
+ return fh.read().strip()
- # Migration 002 changes the column type, from datetime to timestamp.
- # To do this, the column is first dropped and recreated. This should
- # not have any negative impact on a rolling upgrade deployment.
- 2,
- # Migration 004 changes the password created_at column type, from
- # timestamp to datetime and updates the created_at value. This is
- # likely not going to impact a rolling upgrade as the contract repo is
- # executed once the code has been updated; thus the created_at column
- # would be populated for any password changes. That being said, there
- # could be a performance issue for existing large password tables, as
- # the migration is not batched. However, it's a compromise and not
- # likely going to be a problem for operators.
- 4,
- # Migration 013 updates a foreign key constraint at the federated_user
- # table. It is a composite key pointing to the procotol.id and
- # protocol.idp_id columns. Since we can't create a new foreign key
- # before dropping the old one and the operations happens in the same
- # upgrade phase, adding an exception here to pass the contract
- # banned tests.
- 13
- ]
+ def test_db_version_alembic(self):
+ upgrades.offline_sync_database_to_version(engine=self.engine)
- def setUp(self):
- super(TestKeystoneContractSchemaMigrations, self).setUp()
- self.migrate_fully(expand_repo.__file__)
- self.migrate_fully(data_migration_repo.__file__)
+ for branch in (upgrades.EXPAND_BRANCH, upgrades.CONTRACT_BRANCH):
+ head = self._get_head_from_file(branch)
+ self.assertEqual(head, upgrades.get_db_version(branch))
-class TestKeystoneContractSchemaMigrationsMySQL(
- TestKeystoneContractSchemaMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
+class TestMigrationsWalkSQLite(
+ KeystoneMigrationsWalk,
+ test_fixtures.OpportunisticDBTestMixin,
+ unit.TestCase,
+):
+ pass
-class TestKeystoneContractSchemaMigrationsPostgreSQL(
- TestKeystoneContractSchemaMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
+class TestMigrationsWalkMySQL(
+ KeystoneMigrationsWalk,
+ test_fixtures.OpportunisticDBTestMixin,
+ unit.TestCase,
+):
+ FIXTURE = test_fixtures.MySQLOpportunisticFixture
-class TestKeystoneContractSchemaMigrationsSQLite(
- TestKeystoneContractSchemaMigrations,
- db_fixtures.OpportunisticDBTestMixin):
- # In Sqlite an alter will appear as a create, so if we check for creates
- # we will get false positives.
- def setUp(self):
- super(TestKeystoneContractSchemaMigrationsSQLite, self).setUp()
- self.banned_ops['Table'].remove('create')
+class TestMigrationsWalkPostgreSQL(
+ KeystoneMigrationsWalk,
+ test_fixtures.OpportunisticDBTestMixin,
+ unit.TestCase,
+):
+ FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
diff --git a/keystone/tests/unit/test_sql_upgrade.py b/keystone/tests/unit/test_sql_upgrade.py
index 78f644977..5a8211881 100644
--- a/keystone/tests/unit/test_sql_upgrade.py
+++ b/keystone/tests/unit/test_sql_upgrade.py
@@ -39,28 +39,23 @@ For further information, see `oslo.db documentation
all data will be lost.
"""
-import glob
-import os
-
import fixtures
-from migrate.versioning import api as migrate_api
from migrate.versioning import script
-from oslo_db import exception as db_exception
+from oslo_db import options as db_options
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures as db_fixtures
from oslo_log import fixture as log_fixture
from oslo_log import log
-from oslotest import base as test_base
import sqlalchemy.exc
from keystone.cmd import cli
from keystone.common import sql
from keystone.common.sql import upgrades
-from keystone.credential.providers import fernet as credential_fernet
+import keystone.conf
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
-from keystone.tests.unit.ksfixtures import database
+CONF = keystone.conf.CONF
# NOTE(morganfainberg): This should be updated when each DB migration collapse
# is done to mirror the expected structure of the DB in the format of
@@ -229,54 +224,11 @@ INITIAL_TABLE_STRUCTURE = {
}
-class Repository:
-
- def __init__(self, engine, repo_name):
- self.repo_name = repo_name
-
- self.repo_path = upgrades._get_migrate_repo_path(self.repo_name)
- self.min_version = upgrades.INITIAL_VERSION
- self.schema_ = migrate_api.ControlledSchema.create(
- engine, self.repo_path, self.min_version,
- )
- self.max_version = self.schema_.repository.version().version
-
- def upgrade(self, version=None, current_schema=None):
- version = version or self.max_version
- err = ''
- upgrade = True
- version = migrate_api._migrate_version(
- self.schema_, version, upgrade, err,
- )
- upgrades._validate_upgrade_order(
- self.repo_name, target_repo_version=version,
- )
- if not current_schema:
- current_schema = self.schema_
- changeset = current_schema.changeset(version)
- for ver, change in changeset:
- self.schema_.runchange(ver, change, changeset.step)
-
- if self.schema_.version != version:
- raise Exception(
- 'Actual version (%s) of %s does not equal expected '
- 'version (%s)' % (
- self.schema_.version, self.repo_name, version,
- ),
- )
-
- @property
- def version(self):
- with sql.session_for_read() as session:
- return upgrades._migrate_db_version(
- session.get_bind(), self.repo_path, self.min_version,
- )
-
-
class MigrateBase(
db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase,
):
+ """Test complete orchestration between all database phases."""
+
def setUp(self):
super().setUp()
@@ -292,10 +244,7 @@ class MigrateBase(
# modules have the same name (001_awesome.py).
self.addCleanup(script.PythonScript.clear)
- # NOTE(dstanek): SQLAlchemy's migrate makes some assumptions in the
- # SQLite driver about the lack of foreign key enforcement.
- database.initialize_sql_session(self.engine.url,
- enforce_sqlite_fks=False)
+ db_options.set_defaults(CONF, connection=self.engine.url)
# Override keystone's context manager to be oslo.db's global context
# manager.
@@ -304,29 +253,13 @@ class MigrateBase(
sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False)
self.addCleanup(sql.cleanup)
- self.repos = {
- upgrades.EXPAND_BRANCH: Repository(
- self.engine, upgrades.EXPAND_BRANCH,
- ),
- upgrades.DATA_MIGRATION_BRANCH: Repository(
- self.engine, upgrades.DATA_MIGRATION_BRANCH,
- ),
- upgrades.CONTRACT_BRANCH: Repository(
- self.engine, upgrades.CONTRACT_BRANCH,
- ),
- }
-
- def expand(self, *args, **kwargs):
+ def expand(self):
"""Expand database schema."""
- self.repos[upgrades.EXPAND_BRANCH].upgrade(*args, **kwargs)
-
- def migrate(self, *args, **kwargs):
- """Migrate data."""
- self.repos[upgrades.DATA_MIGRATION_BRANCH].upgrade(*args, **kwargs)
+ upgrades.expand_schema(engine=self.engine)
- def contract(self, *args, **kwargs):
+ def contract(self):
"""Contract database schema."""
- self.repos[upgrades.CONTRACT_BRANCH].upgrade(*args, **kwargs)
+ upgrades.contract_schema(engine=self.engine)
@property
def metadata(self):
@@ -334,7 +267,9 @@ class MigrateBase(
return sqlalchemy.MetaData(self.engine)
def load_table(self, name):
- table = sqlalchemy.Table(name, self.metadata, autoload=True)
+ table = sqlalchemy.Table(
+ name, self.metadata, autoload_with=self.engine,
+ )
return table
def assertTableDoesNotExist(self, table_name):
@@ -342,7 +277,9 @@ class MigrateBase(
# Switch to a different metadata otherwise you might still
# detect renamed or dropped tables
try:
- sqlalchemy.Table(table_name, self.metadata, autoload=True)
+ sqlalchemy.Table(
+ table_name, self.metadata, autoload_with=self.engine,
+ )
except sqlalchemy.exc.NoSuchTableError:
pass
else:
@@ -357,210 +294,8 @@ class MigrateBase(
self.assertCountEqual(expected_cols, actual_cols,
'%s table' % table_name)
-
-class ExpandSchemaUpgradeTests(MigrateBase):
-
- def test_start_version_db_init_version(self):
- self.assertEqual(
- self.repos[upgrades.EXPAND_BRANCH].min_version,
- self.repos[upgrades.EXPAND_BRANCH].version)
-
- def test_blank_db_to_start(self):
- self.assertTableDoesNotExist('user')
-
- def test_upgrade_add_initial_tables(self):
- self.expand(upgrades.INITIAL_VERSION + 1)
- self.check_initial_table_structure()
-
- def check_initial_table_structure(self):
- for table in INITIAL_TABLE_STRUCTURE:
- self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
-
-
-class MySQLOpportunisticExpandSchemaUpgradeTestCase(
- ExpandSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
-
-class PostgreSQLOpportunisticExpandSchemaUpgradeTestCase(
- ExpandSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
-
-class DataMigrationUpgradeTests(MigrateBase):
-
- def setUp(self):
- # Make sure the expand repo is fully upgraded, since the data migration
- # phase is only run after this is upgraded
- super().setUp()
- self.expand()
-
- def test_start_version_db_init_version(self):
- self.assertEqual(
- self.repos[upgrades.DATA_MIGRATION_BRANCH].min_version,
- self.repos[upgrades.DATA_MIGRATION_BRANCH].version,
- )
-
-
-class MySQLOpportunisticDataMigrationUpgradeTestCase(
- DataMigrationUpgradeTests,
-):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
-
-class PostgreSQLOpportunisticDataMigrationUpgradeTestCase(
- DataMigrationUpgradeTests,
-):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
-
-class ContractSchemaUpgradeTests(MigrateBase, unit.TestCase):
-
- def setUp(self):
- # Make sure the expand and data migration repos are fully
- # upgraded, since the contract phase is only run after these are
- # upgraded.
- super().setUp()
- self.useFixture(
- ksfixtures.KeyRepository(
- self.config_fixture,
- 'credential',
- credential_fernet.MAX_ACTIVE_KEYS
- )
- )
- self.expand()
- self.migrate()
-
- def test_start_version_db_init_version(self):
- self.assertEqual(
- self.repos[upgrades.CONTRACT_BRANCH].min_version,
- self.repos[upgrades.CONTRACT_BRANCH].version,
- )
-
-
-class MySQLOpportunisticContractSchemaUpgradeTestCase(
- ContractSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
-
-class PostgreSQLOpportunisticContractSchemaUpgradeTestCase(
- ContractSchemaUpgradeTests,
-):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
-
-class VersionTests(MigrateBase):
-
- def test_migrate_repos_stay_in_lockstep(self):
- """Rolling upgrade repositories should always stay in lockstep.
-
- By maintaining a single "latest" version number in each of the three
- migration repositories (expand, data migrate, and contract), we can
- trivially prevent operators from "doing the wrong thing", such as
- running upgrades operations out of order (for example, you should not
- be able to run data migration 5 until schema expansion 5 has been run).
-
- For example, even if your rolling upgrade task *only* involves adding a
- new column with a reasonable default, and doesn't require any triggers,
- data migration, etc, you still need to create "empty" upgrade steps in
- the data migration and contract repositories with the same version
- number as the expansion.
-
- For more information, see "Database Migrations" here:
-
- https://docs.openstack.org/keystone/latest/contributor/database-migrations.html
-
- """
- # Transitive comparison: expand == data migration == contract
- self.assertEqual(
- self.repos[upgrades.EXPAND_BRANCH].max_version,
- self.repos[upgrades.DATA_MIGRATION_BRANCH].max_version,
- )
- self.assertEqual(
- self.repos[upgrades.DATA_MIGRATION_BRANCH].max_version,
- self.repos[upgrades.CONTRACT_BRANCH].max_version,
- )
-
- def test_migrate_repos_file_names_have_prefix(self):
- """Migration files should be unique to avoid caching errors.
-
- This test enforces migration files to include a prefix (expand,
- migrate, contract) in order to keep them unique. Here is the required
- format: [version]_[prefix]_[description]. For example:
- 001_expand_add_created_column.py
-
- """
- versions_path = '/versions'
-
- # test for expand prefix, e.g. 001_expand_new_fk_constraint.py
- repo_path = self.repos[upgrades.EXPAND_BRANCH].repo_path
- expand_list = glob.glob(repo_path + versions_path + '/*.py')
- self.assertRepoFileNamePrefix(expand_list, 'expand')
-
- # test for migrate prefix, e.g. 001_migrate_new_fk_constraint.py
- repo_path = self.repos[upgrades.DATA_MIGRATION_BRANCH].repo_path
- migrate_list = glob.glob(repo_path + versions_path + '/*.py')
- self.assertRepoFileNamePrefix(migrate_list, 'migrate')
-
- # test for contract prefix, e.g. 001_contract_new_fk_constraint.py
- repo_path = self.repos[upgrades.CONTRACT_BRANCH].repo_path
- contract_list = glob.glob(repo_path + versions_path + '/*.py')
- self.assertRepoFileNamePrefix(contract_list, 'contract')
-
- def assertRepoFileNamePrefix(self, repo_list, prefix):
- if len(repo_list) > 1:
- # grab the file name for the max version
- file_name = os.path.basename(sorted(repo_list)[-2])
- # pattern for the prefix standard, ignoring placeholder, init files
- pattern = (
- '^[0-9]{3,}_PREFIX_|^[0-9]{3,}_placeholder.py|^__init__.py')
- pattern = pattern.replace('PREFIX', prefix)
- msg = 'Missing required prefix %s in $file_name' % prefix
- self.assertRegex(file_name, pattern, msg)
-
-
-class MigrationValidation(MigrateBase, unit.TestCase):
- """Test validation of database between database phases."""
-
- def _set_db_sync_command_versions(self):
- self.expand(upgrades.INITIAL_VERSION + 1)
- self.migrate(upgrades.INITIAL_VERSION + 1)
- self.contract(upgrades.INITIAL_VERSION + 1)
- for version in (
- upgrades.get_db_version('expand'),
- upgrades.get_db_version('data_migration'),
- upgrades.get_db_version('contract'),
- ):
- self.assertEqual(upgrades.INITIAL_VERSION + 1, version)
-
- def test_running_db_sync_migrate_ahead_of_expand_fails(self):
- self._set_db_sync_command_versions()
- self.assertRaises(
- db_exception.DBMigrationError,
- self.migrate,
- upgrades.INITIAL_VERSION + 2,
- "You are attempting to upgrade migrate ahead of expand",
- )
-
- def test_running_db_sync_contract_ahead_of_migrate_fails(self):
- self._set_db_sync_command_versions()
- self.assertRaises(
- db_exception.DBMigrationError,
- self.contract,
- upgrades.INITIAL_VERSION + 2,
- "You are attempting to upgrade contract ahead of migrate",
- )
-
-
-class FullMigration(MigrateBase, unit.TestCase):
- """Test complete orchestration between all database phases."""
-
def test_db_sync_check(self):
checker = cli.DbSync()
- latest_version = self.repos[upgrades.EXPAND_BRANCH].max_version
# If the expand repository doesn't exist yet, then we need to make sure
# we advertise that `--expand` must be run first.
@@ -569,25 +304,9 @@ class FullMigration(MigrateBase, unit.TestCase):
self.assertIn("keystone-manage db_sync --expand", log_info.output)
self.assertEqual(status, 2)
- # Assert the correct message is printed when expand is the first step
- # that needs to run
- self.expand(upgrades.INITIAL_VERSION + 1)
- log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
- status = checker.check_db_sync_status()
- self.assertIn("keystone-manage db_sync --expand", log_info.output)
- self.assertEqual(status, 2)
-
- # Assert the correct message is printed when expand is farther than
- # migrate
- self.expand(latest_version)
- log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
- status = checker.check_db_sync_status()
- self.assertIn("keystone-manage db_sync --migrate", log_info.output)
- self.assertEqual(status, 3)
-
- # Assert the correct message is printed when migrate is farther than
+ # Assert the correct message is printed when migrate is ahead of
# contract
- self.migrate(latest_version)
+ self.expand()
log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
status = checker.check_db_sync_status()
self.assertIn("keystone-manage db_sync --contract", log_info.output)
@@ -595,47 +314,25 @@ class FullMigration(MigrateBase, unit.TestCase):
# Assert the correct message gets printed when all commands are on
# the same version
- self.contract(latest_version)
+ self.contract()
log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
status = checker.check_db_sync_status()
self.assertIn("All db_sync commands are upgraded", log_info.output)
self.assertEqual(status, 0)
- def test_out_of_sync_db_migration_fails(self):
- # We shouldn't allow for operators to accidentally run migration out of
- # order. This test ensures we fail if we attempt to upgrade the
- # contract repository ahead of the expand or migrate repositories.
- self.expand(upgrades.INITIAL_VERSION + 1)
- self.migrate(upgrades.INITIAL_VERSION + 1)
- self.assertRaises(
- db_exception.DBMigrationError,
- self.contract,
- upgrades.INITIAL_VERSION + 2,
- )
-
- def test_migration_079_expand_update_local_id_limit(self):
- self.expand(78)
- self.migrate(78)
- self.contract(78)
-
- id_mapping_table = sqlalchemy.Table('id_mapping',
- self.metadata, autoload=True)
- # assert local_id column is a string of 64 characters (before)
- self.assertEqual('VARCHAR(64)', str(id_mapping_table.c.local_id.type))
+ def test_upgrade_add_initial_tables(self):
+ self.expand()
+ for table in INITIAL_TABLE_STRUCTURE:
+ self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
- self.expand(79)
- self.migrate(79)
- self.contract(79)
- id_mapping_table = sqlalchemy.Table('id_mapping',
- self.metadata, autoload=True)
- # assert local_id column is a string of 255 characters (after)
- self.assertEqual('VARCHAR(255)', str(id_mapping_table.c.local_id.type))
+class FullMigrationSQLite(MigrateBase, unit.TestCase):
+ pass
-class MySQLOpportunisticFullMigration(FullMigration):
+class FullMigrationMySQL(MigrateBase, unit.TestCase):
FIXTURE = db_fixtures.MySQLOpportunisticFixture
-class PostgreSQLOpportunisticFullMigration(FullMigration):
+class FullMigrationPostgreSQL(MigrateBase, unit.TestCase):
FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
diff --git a/keystone/tests/unit/test_v3.py b/keystone/tests/unit/test_v3.py
index 951a8f83f..f3f943215 100644
--- a/keystone/tests/unit/test_v3.py
+++ b/keystone/tests/unit/test_v3.py
@@ -13,12 +13,11 @@
# under the License.
import datetime
-import uuid
-
import http.client
import oslo_context.context
from oslo_serialization import jsonutils
from testtools import matchers
+import uuid
import webtest
from keystone.common import authorization
@@ -1238,6 +1237,13 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
return environment
+class OAuth2RestfulTestCase(RestfulTestCase):
+ def assertValidErrorResponse(self, response):
+ resp = response.result
+ self.assertIsNotNone(resp.get('error'))
+ self.assertIsNotNone(resp.get('error_description'))
+
+
class VersionTestCase(RestfulTestCase):
def test_get_version(self):
pass
diff --git a/keystone/tests/unit/test_v3_auth.py b/keystone/tests/unit/test_v3_auth.py
index e710634d0..eb7ea0e29 100644
--- a/keystone/tests/unit/test_v3_auth.py
+++ b/keystone/tests/unit/test_v3_auth.py
@@ -19,8 +19,10 @@ import itertools
import operator
import re
from unittest import mock
+from urllib import parse
import uuid
+from cryptography.hazmat.primitives.serialization import Encoding
import freezegun
import http.client
from oslo_serialization import jsonutils as json
@@ -2645,6 +2647,187 @@ class TokenAPITests(object):
r = self._validate_token(token, allow_expired=True)
self.assertValidProjectScopedTokenResponse(r)
+ def _create_project_user(self):
+ new_domain_ref = unit.new_domain_ref()
+ PROVIDERS.resource_api.create_domain(
+ new_domain_ref['id'], new_domain_ref
+ )
+ new_project_ref = unit.new_project_ref(domain_id=self.domain_id)
+ PROVIDERS.resource_api.create_project(
+ new_project_ref['id'], new_project_ref
+ )
+ new_user = unit.create_user(PROVIDERS.identity_api,
+ domain_id=new_domain_ref['id'],
+ project_id=new_project_ref['id'])
+ PROVIDERS.assignment_api.create_grant(
+ self.role['id'],
+ user_id=new_user['id'],
+ project_id=new_project_ref['id'])
+ return new_user, new_domain_ref, new_project_ref
+
+ def _create_certificates(self,
+ root_dn=None,
+ server_dn=None,
+ client_dn=None):
+ root_subj = unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organization_name='fujitsu',
+ organizational_unit_name='test',
+ common_name='root'
+ )
+ if root_dn:
+ root_subj = unit.update_dn(root_subj, root_dn)
+
+ root_cert, root_key = unit.create_certificate(root_subj)
+ keystone_subj = unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organization_name='fujitsu',
+ organizational_unit_name='test',
+ common_name='keystone.local'
+ )
+ if server_dn:
+ keystone_subj = unit.update_dn(keystone_subj, server_dn)
+
+ ks_cert, ks_key = unit.create_certificate(
+ keystone_subj, ca=root_cert, ca_key=root_key)
+ client_subj = unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organization_name='fujitsu',
+ organizational_unit_name='test',
+ common_name='client'
+ )
+ if client_dn:
+ client_subj = unit.update_dn(client_subj, client_dn)
+
+ client_cert, client_key = unit.create_certificate(
+ client_subj, ca=root_cert, ca_key=root_key)
+ return root_cert, root_key, ks_cert, ks_key, client_cert, client_key
+
+ def _get_cert_content(self, cert):
+ return cert.public_bytes(Encoding.PEM).decode('ascii')
+
+ def _get_oauth2_access_token(self, client_id, client_cert_content,
+ expected_status=http.client.OK):
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ }
+ data = {
+ 'grant_type': 'client_credentials',
+ 'client_id': client_id
+ }
+ extra_environ = {
+ 'SSL_CLIENT_CERT': client_cert_content
+ }
+ data = parse.urlencode(data).encode()
+ resp = self.post(
+ '/OS-OAUTH2/token',
+ headers=headers,
+ noauth=True,
+ convert=False,
+ body=data,
+ environ=extra_environ,
+ expected_status=expected_status)
+ return resp
+
+ def _create_mapping(self):
+ mapping = {
+ 'id': 'oauth2_mapping',
+ 'rules': [
+ {
+ 'local': [
+ {
+ 'user': {
+ 'name': '{0}',
+ 'id': '{1}',
+ 'email': '{2}',
+ 'domain': {
+ 'name': '{3}',
+ 'id': '{4}'
+ }
+ }
+ }
+ ],
+ 'remote': [
+ {
+ 'type': 'SSL_CLIENT_SUBJECT_DN_CN'
+ },
+ {
+ 'type': 'SSL_CLIENT_SUBJECT_DN_UID'
+ },
+ {
+ 'type': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS'
+ },
+ {
+ 'type': 'SSL_CLIENT_SUBJECT_DN_O'
+ },
+ {
+ 'type': 'SSL_CLIENT_SUBJECT_DN_DC'
+ },
+ {
+ 'type': 'SSL_CLIENT_ISSUER_DN_CN',
+ 'any_one_of': [
+ 'root'
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ PROVIDERS.federation_api.create_mapping(mapping['id'], mapping)
+
+ def test_verify_oauth2_token_project_scope_ok(self):
+ cache_on_issue = CONF.token.cache_on_issue
+ caching = CONF.token.caching
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+
+ *_, client_cert, _ = self._create_certificates(
+ root_dn=unit.create_dn(
+ common_name='root'
+ ),
+ client_dn=unit.create_dn(
+ common_name=user['name'],
+ user_id=user['id'],
+ email_address=user['email'],
+ organization_name=user_domain['name'],
+ domain_component=user_domain['id']
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ CONF.token.cache_on_issue = False
+ CONF.token.caching = False
+ resp = self._get_oauth2_access_token(user['id'], cert_content)
+
+ json_resp = json.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ },
+ expected_status=http.client.OK)
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+ CONF.token.cache_on_issue = cache_on_issue
+ CONF.token.caching = caching
+
class TokenDataTests(object):
"""Test the data in specific token types."""
@@ -5577,6 +5760,21 @@ class ApplicationCredentialAuth(test_v3.RestfulTestCase):
self.v3_create_token(auth_data,
expected_status=http.client.UNAUTHORIZED)
+ def test_application_credential_expiration_limits_token_expiration(self):
+ expires_at = datetime.datetime.utcnow() + datetime.timedelta(minutes=1)
+ app_cred = self._make_app_cred(expires=expires_at)
+ app_cred_ref = self.app_cred_api.create_application_credential(
+ app_cred)
+ auth_data = self.build_authentication_request(
+ app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'])
+ resp = self.v3_create_token(auth_data,
+ expected_status=http.client.CREATED)
+ token = resp.headers.get('X-Subject-Token')
+ future = datetime.datetime.utcnow() + datetime.timedelta(minutes=2)
+ with freezegun.freeze_time(future):
+ self._validate_token(token,
+ expected_status=http.client.UNAUTHORIZED)
+
def test_application_credential_fails_when_user_deleted(self):
app_cred = self._make_app_cred()
app_cred_ref = self.app_cred_api.create_application_credential(
diff --git a/keystone/tests/unit/test_v3_oauth2.py b/keystone/tests/unit/test_v3_oauth2.py
new file mode 100644
index 000000000..6eaa8560f
--- /dev/null
+++ b/keystone/tests/unit/test_v3_oauth2.py
@@ -0,0 +1,2071 @@
+# Copyright 2022 openStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from base64 import b64encode
+from cryptography.hazmat.primitives.serialization import Encoding
+import fixtures
+import http
+from http import client
+from oslo_log import log
+from oslo_serialization import jsonutils
+from unittest import mock
+from urllib import parse
+
+from keystone.api.os_oauth2 import AccessTokenResource
+from keystone.common import provider_api
+from keystone.common import utils
+from keystone import conf
+from keystone import exception
+from keystone.federation.utils import RuleProcessor
+from keystone.tests import unit
+from keystone.tests.unit import test_v3
+from keystone.token.provider import Manager
+
+PROVIDERS = provider_api.ProviderAPIs
+LOG = log.getLogger(__name__)
+CONF = conf.CONF
+
+
+class FakeUserAppCredListCreateResource(mock.Mock):
+ pass
+
+
+class OAuth2AuthnMethodsTests(test_v3.OAuth2RestfulTestCase):
+ ACCESS_TOKEN_URL = '/OS-OAUTH2/token'
+
+ def setUp(self):
+ super(OAuth2AuthnMethodsTests, self).setUp()
+ self.config_fixture.config(
+ group='oauth2',
+ oauth2_authn_methods=['client_secret_basic', 'tls_client_auth'],
+ )
+
+ def _get_access_token(
+ self,
+ headers,
+ data,
+ expected_status,
+ client_cert_content=None):
+ data = parse.urlencode(data).encode()
+ kwargs = {
+ 'headers': headers,
+ 'noauth': True,
+ 'convert': False,
+ 'body': data,
+ 'expected_status': expected_status
+ }
+ if client_cert_content:
+ kwargs.update({'environ': {
+ 'SSL_CLIENT_CERT': client_cert_content
+ }})
+ resp = self.post(
+ self.ACCESS_TOKEN_URL,
+ **kwargs)
+ return resp
+
+ def _create_certificates(self):
+ return unit.create_certificate(
+ subject_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='tokyo',
+ locality_name='musashino',
+ organizational_unit_name='test'
+ )
+ )
+
+ def _get_cert_content(self, cert):
+ return cert.public_bytes(Encoding.PEM).decode('ascii')
+
+ @mock.patch.object(AccessTokenResource, '_client_secret_basic')
+ def test_secret_basic_header(self, mock_client_secret_basic):
+ """client_secret_basic is used if a client sercret is found."""
+ client_id = 'client_id'
+ client_secret = 'client_secret'
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+
+ _ = self._get_access_token(
+ headers=headers,
+ data=data,
+ expected_status=client.OK)
+ mock_client_secret_basic.assert_called_once_with(
+ client_id, client_secret)
+
+ @mock.patch.object(AccessTokenResource, '_client_secret_basic')
+ def test_secret_basic_form(self, mock_client_secret_basic):
+ """client_secret_basic is used if a client sercret is found."""
+ client_id = 'client_id'
+ client_secret = 'client_secret'
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ }
+ data = {
+ 'grant_type': 'client_credentials',
+ 'client_id': client_id,
+ 'client_secret': client_secret
+ }
+
+ _ = self._get_access_token(
+ headers=headers,
+ data=data,
+ expected_status=client.OK)
+ mock_client_secret_basic.assert_called_once_with(
+ client_id, client_secret)
+
+ @mock.patch.object(AccessTokenResource, '_client_secret_basic')
+ def test_secret_basic_header_and_form(self, mock_client_secret_basic):
+ """A header is used if secrets are found in a header and body."""
+ client_id_h = 'client_id_h'
+ client_secret_h = 'client_secret_h'
+ client_id_d = 'client_id_d'
+ client_secret_d = 'client_secret_d'
+ b64str = b64encode(
+ f'{client_id_h}:{client_secret_h}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials',
+ 'client_id': client_id_d,
+ 'client_secret': client_secret_d
+ }
+
+ _ = self._get_access_token(
+ headers=headers,
+ data=data,
+ expected_status=client.OK)
+ mock_client_secret_basic.assert_called_once_with(
+ client_id_h, client_secret_h)
+
+ @mock.patch.object(AccessTokenResource, '_tls_client_auth')
+ def test_client_cert(self, mock_tls_client_auth):
+ """tls_client_auth is used if a certificate is found."""
+ client_id = 'client_id'
+ client_cert, _ = self._create_certificates()
+ cert_content = self._get_cert_content(client_cert)
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ }
+ data = {
+ 'grant_type': 'client_credentials',
+ 'client_id': client_id
+ }
+ _ = self._get_access_token(
+ headers=headers,
+ data=data,
+ expected_status=client.OK,
+ client_cert_content=cert_content)
+ mock_tls_client_auth.assert_called_once_with(client_id, cert_content)
+
+ @mock.patch.object(AccessTokenResource, '_tls_client_auth')
+ def test_secret_basic_and_client_cert(self, mock_tls_client_auth):
+ """tls_client_auth is used if a certificate and secret are found."""
+ client_id_s = 'client_id_s'
+ client_secret = 'client_secret'
+ client_id_c = 'client_id_c'
+ client_cert, _ = self._create_certificates()
+ cert_content = self._get_cert_content(client_cert)
+ b64str = b64encode(
+ f'{client_id_s}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials',
+ 'client_id': client_id_c,
+ }
+
+ _ = self._get_access_token(
+ headers=headers,
+ data=data,
+ expected_status=client.OK,
+ client_cert_content=cert_content)
+ mock_tls_client_auth.assert_called_once_with(client_id_c, cert_content)
+
+
+class OAuth2SecretBasicTests(test_v3.OAuth2RestfulTestCase):
+ APP_CRED_CREATE_URL = '/users/%(user_id)s/application_credentials'
+ APP_CRED_LIST_URL = '/users/%(user_id)s/application_credentials'
+ APP_CRED_DELETE_URL = '/users/%(user_id)s/application_credentials/' \
+ '%(app_cred_id)s'
+ APP_CRED_SHOW_URL = '/users/%(user_id)s/application_credentials/' \
+ '%(app_cred_id)s'
+ ACCESS_TOKEN_URL = '/OS-OAUTH2/token'
+
+ def setUp(self):
+ super(OAuth2SecretBasicTests, self).setUp()
+ log.set_defaults(
+ logging_context_format_string='%(asctime)s.%(msecs)03d %('
+ 'color)s%(levelname)s %(name)s [^[['
+ '01;36m%(request_id)s ^[[00;36m%('
+ 'project_name)s %(user_name)s%('
+ 'color)s] ^[[01;35m%(instance)s%('
+ 'color)s%(message)s^[[00m',
+ default_log_levels=log.DEBUG)
+ CONF.log_opt_values(LOG, log.DEBUG)
+ LOG.debug(f'is_debug_enabled: {log.is_debug_enabled(CONF)}')
+ LOG.debug(f'get_default_log_levels: {log.get_default_log_levels()}')
+ self.config_fixture.config(
+ group='oauth2',
+ oauth2_authn_methods=['client_secret_basic'],
+ )
+
+ def _assert_error_resp(self, error_resp, error_msg, error_description):
+ resp_keys = (
+ 'error', 'error_description'
+ )
+ for key in resp_keys:
+ self.assertIsNotNone(error_resp.get(key, None))
+ self.assertEqual(error_msg, error_resp.get('error'))
+ self.assertEqual(error_description,
+ error_resp.get('error_description'))
+
+ def _create_app_cred(self, user_id, app_cred_name):
+ resp = self.post(
+ self.APP_CRED_CREATE_URL % {'user_id': user_id},
+ body={'application_credential': {'name': app_cred_name}}
+ )
+ LOG.debug(f'resp: {resp}')
+ app_ref = resp.result['application_credential']
+ return app_ref
+
+ def _delete_app_cred(self, user_id, app_cred_id):
+ resp = self.delete(
+ self.APP_CRED_CREATE_URL % {'user_id': user_id,
+ 'app_cred_id': app_cred_id})
+ LOG.debug(f'resp: {resp}')
+
+ def _get_access_token(self, app_cred, b64str, headers, data,
+ expected_status):
+ if b64str is None:
+ client_id = app_cred.get('id')
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ if headers is None:
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ if data is None:
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+ data = parse.urlencode(data).encode()
+ resp = self.post(
+ self.ACCESS_TOKEN_URL,
+ headers=headers,
+ convert=False,
+ body=data,
+ expected_status=expected_status)
+ return resp
+
+ def _get_access_token_method_not_allowed(self, app_cred,
+ http_func):
+ client_id = app_cred.get('id')
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+ data = parse.urlencode(data).encode()
+ resp = http_func(
+ self.ACCESS_TOKEN_URL,
+ headers=headers,
+ convert=False,
+ body=data,
+ expected_status=client.METHOD_NOT_ALLOWED)
+ LOG.debug(f'response: {resp}')
+ json_resp = jsonutils.loads(resp.body)
+ return json_resp
+
+ def test_get_access_token(self):
+ """Test case when an access token can be successfully obtain."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ resp = self._get_access_token(
+ app_cred,
+ b64str=None,
+ headers=None,
+ data=None,
+ expected_status=client.OK)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ def test_get_access_token_form(self):
+ """Test case when there is no client authorization."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ }
+ data = {
+ 'grant_type': 'client_credentials',
+ 'client_id': app_cred.get('id'),
+ 'client_secret': app_cred.get('secret'),
+ }
+ resp = self._get_access_token(
+ app_cred,
+ b64str=None,
+ headers=headers,
+ data=data,
+ expected_status=client.OK)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ def test_get_access_token_auth_type_is_not_basic(self):
+ """Test case when auth_type is not basic."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ client_id = app_cred.get('id')
+
+ base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
+ 'response="%s"' % (
+ client_id, 'realm', 'nonce', 'path', 'responding')
+
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Digest {base}'
+ }
+ error = 'invalid_client'
+ error_description = 'Client authentication failed.'
+ resp = self._get_access_token(app_cred,
+ b64str=None,
+ headers=headers,
+ data=None,
+ expected_status=client.UNAUTHORIZED)
+ self.assertNotEmpty(resp.headers.get("WWW-Authenticate"))
+ self.assertEqual('Keystone uri="http://localhost/v3"',
+ resp.headers.get("WWW-Authenticate"))
+ json_resp = jsonutils.loads(resp.body)
+ LOG.debug(f'error: {json_resp.get("error")}')
+ LOG.debug(f'error_description: {json_resp.get("error_description")}')
+ self.assertEqual(error,
+ json_resp.get('error'))
+ self.assertEqual(error_description,
+ json_resp.get('error_description'))
+
+ def test_get_access_token_without_client_id(self):
+ """Test case when there is no client_id."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f':{client_secret}'.encode()).decode().strip()
+ error = 'invalid_client'
+ error_description = 'Client authentication failed.'
+ resp = self._get_access_token(app_cred,
+ b64str=b64str,
+ headers=None,
+ data=None,
+ expected_status=client.UNAUTHORIZED)
+ self.assertNotEmpty(resp.headers.get("WWW-Authenticate"))
+ self.assertEqual('Keystone uri="http://localhost/v3"',
+ resp.headers.get("WWW-Authenticate"))
+ json_resp = jsonutils.loads(resp.body)
+ LOG.debug(f'error: {json_resp.get("error")}')
+ LOG.debug(f'error_description: {json_resp.get("error_description")}')
+ self.assertEqual(error,
+ json_resp.get('error'))
+ self.assertEqual(error_description,
+ json_resp.get('error_description'))
+
+ def test_get_access_token_without_client_secret(self):
+ """Test case when there is no client_secret."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ client_id = app_cred.get('id')
+ b64str = b64encode(
+ f'{client_id}:'.encode()).decode().strip()
+ error = 'invalid_client'
+ error_description = 'Client authentication failed.'
+ resp = self._get_access_token(app_cred,
+ b64str=b64str,
+ headers=None,
+ data=None,
+ expected_status=client.UNAUTHORIZED)
+ self.assertNotEmpty(resp.headers.get("WWW-Authenticate"))
+ self.assertEqual('Keystone uri="http://localhost/v3"',
+ resp.headers.get("WWW-Authenticate"))
+ json_resp = jsonutils.loads(resp.body)
+ LOG.debug(f'error: {json_resp.get("error")}')
+ LOG.debug(f'error_description: {json_resp.get("error_description")}')
+ self.assertEqual(error,
+ json_resp.get('error'))
+ self.assertEqual(error_description,
+ json_resp.get('error_description'))
+
+ def test_get_access_token_without_grant_type(self):
+ """Test case when there is no grant_type."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ data = {}
+ error = 'invalid_request'
+ error_description = 'The parameter grant_type is required.'
+ resp = self._get_access_token(app_cred,
+ b64str=None,
+ headers=None,
+ data=data,
+ expected_status=client.BAD_REQUEST)
+ json_resp = jsonutils.loads(resp.body)
+ LOG.debug(f'error: {json_resp.get("error")}')
+ LOG.debug(f'error_description: {json_resp.get("error_description")}')
+ self.assertEqual(error,
+ json_resp.get('error'))
+ self.assertEqual(error_description,
+ json_resp.get('error_description'))
+
+ def test_get_access_token_blank_grant_type(self):
+ """Test case when grant_type is blank."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ data = {
+ 'grant_type': ''
+ }
+ error = 'unsupported_grant_type'
+ error_description = 'The parameter grant_type ' \
+ ' is not supported.'
+ resp = self._get_access_token(app_cred,
+ b64str=None,
+ headers=None,
+ data=data,
+ expected_status=client.BAD_REQUEST)
+ json_resp = jsonutils.loads(resp.body)
+ LOG.debug(f'error: {json_resp.get("error")}')
+ LOG.debug(f'error_description: {json_resp.get("error_description")}')
+ self.assertEqual(error,
+ json_resp.get('error'))
+ self.assertEqual(error_description,
+ json_resp.get('error_description'))
+
+ def test_get_access_token_grant_type_is_not_client_credentials(self):
+ """Test case when grant_type is not client_credentials."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ data = {
+ 'grant_type': 'not_client_credentials'
+ }
+ error = 'unsupported_grant_type'
+ error_description = 'The parameter grant_type ' \
+ 'not_client_credentials is not supported.'
+ resp = self._get_access_token(app_cred,
+ b64str=None,
+ headers=None,
+ data=data,
+ expected_status=client.BAD_REQUEST)
+ json_resp = jsonutils.loads(resp.body)
+ LOG.debug(f'error: {json_resp.get("error")}')
+ LOG.debug(f'error_description: {json_resp.get("error_description")}')
+ self.assertEqual(error,
+ json_resp.get('error'))
+ self.assertEqual(error_description,
+ json_resp.get('error_description'))
+
+ def test_get_access_token_failed_401(self):
+ """Test case when client authentication failed."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ error = 'invalid_client'
+
+ client_id = app_cred.get('id')
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+ data = parse.urlencode(data).encode()
+ with mock.patch(
+ 'keystone.api._shared.authentication.'
+ 'authenticate_for_token') as co_mock:
+ co_mock.side_effect = exception.Unauthorized(
+ 'client is unauthorized')
+ resp = self.post(
+ self.ACCESS_TOKEN_URL,
+ headers=headers,
+ convert=False,
+ body=data,
+ noauth=True,
+ expected_status=client.UNAUTHORIZED)
+ self.assertNotEmpty(resp.headers.get("WWW-Authenticate"))
+ self.assertEqual('Keystone uri="http://localhost/v3"',
+ resp.headers.get("WWW-Authenticate"))
+ LOG.debug(f'response: {resp}')
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual(error,
+ json_resp.get('error'))
+ LOG.debug(f'error: {json_resp.get("error")}')
+
+ def test_get_access_token_failed_400(self):
+ """Test case when the called API is incorrect."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ error = 'invalid_request'
+ client_id = app_cred.get('id')
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+ data = parse.urlencode(data).encode()
+ with mock.patch(
+ 'keystone.api._shared.authentication.'
+ 'authenticate_for_token') as co_mock:
+ co_mock.side_effect = exception.ValidationError(
+ 'Auth method is invalid')
+ resp = self.post(
+ self.ACCESS_TOKEN_URL,
+ headers=headers,
+ convert=False,
+ body=data,
+ noauth=True,
+ expected_status=client.BAD_REQUEST)
+ LOG.debug(f'response: {resp}')
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual(error,
+ json_resp.get('error'))
+ LOG.debug(f'error: {json_resp.get("error")}')
+
+ def test_get_access_token_failed_500_other(self):
+ """Test case when unexpected error."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ error = 'other_error'
+ client_id = app_cred.get('id')
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+ data = parse.urlencode(data).encode()
+ with mock.patch(
+ 'keystone.api._shared.authentication.'
+ 'authenticate_for_token') as co_mock:
+ co_mock.side_effect = exception.UnexpectedError(
+ 'unexpected error.')
+ resp = self.post(
+ self.ACCESS_TOKEN_URL,
+ headers=headers,
+ convert=False,
+ body=data,
+ noauth=True,
+ expected_status=client.INTERNAL_SERVER_ERROR)
+
+ LOG.debug(f'response: {resp}')
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual(error,
+ json_resp.get('error'))
+
+ def test_get_access_token_failed_500(self):
+ """Test case when internal server error."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ error = 'other_error'
+ client_id = app_cred.get('id')
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+ data = parse.urlencode(data).encode()
+ with mock.patch(
+ 'keystone.api._shared.authentication.'
+ 'authenticate_for_token') as co_mock:
+ co_mock.side_effect = Exception(
+ 'Internal server is invalid')
+ resp = self.post(
+ self.ACCESS_TOKEN_URL,
+ headers=headers,
+ convert=False,
+ body=data,
+ noauth=True,
+ expected_status=client.INTERNAL_SERVER_ERROR)
+
+ LOG.debug(f'response: {resp}')
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual(error,
+ json_resp.get('error'))
+
+ def test_get_access_token_method_get_not_allowed(self):
+ """Test case when the request is get method that is not allowed."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ json_resp = self._get_access_token_method_not_allowed(
+ app_cred, self.get)
+ self.assertEqual('other_error',
+ json_resp.get('error'))
+ self.assertEqual('The method is not allowed for the requested URL.',
+ json_resp.get('error_description'))
+
+ def test_get_access_token_method_patch_not_allowed(self):
+ """Test case when the request is patch method that is not allowed."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ json_resp = self._get_access_token_method_not_allowed(
+ app_cred, self.patch)
+ self.assertEqual('other_error',
+ json_resp.get('error'))
+ self.assertEqual('The method is not allowed for the requested URL.',
+ json_resp.get('error_description'))
+
+ def test_get_access_token_method_put_not_allowed(self):
+ """Test case when the request is put method that is not allowed."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ json_resp = self._get_access_token_method_not_allowed(
+ app_cred, self.put)
+ self.assertEqual('other_error',
+ json_resp.get('error'))
+ self.assertEqual('The method is not allowed for the requested URL.',
+ json_resp.get('error_description'))
+
+ def test_get_access_token_method_delete_not_allowed(self):
+ """Test case when the request is delete method that is not allowed."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ json_resp = self._get_access_token_method_not_allowed(
+ app_cred, self.delete)
+ self.assertEqual('other_error',
+ json_resp.get('error'))
+ self.assertEqual('The method is not allowed for the requested URL.',
+ json_resp.get('error_description'))
+
+ def test_get_access_token_method_head_not_allowed(self):
+ """Test case when the request is head method that is not allowed."""
+ client_name = 'client_name_test'
+ app_cred = self._create_app_cred(self.user_id, client_name)
+ client_id = app_cred.get('id')
+ client_secret = app_cred.get('secret')
+ b64str = b64encode(
+ f'{client_id}:{client_secret}'.encode()).decode().strip()
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ 'Authorization': f'Basic {b64str}'
+ }
+ self.head(
+ self.ACCESS_TOKEN_URL,
+ headers=headers,
+ convert=False,
+ expected_status=client.METHOD_NOT_ALLOWED)
+
+
+class OAuth2CertificateTests(test_v3.OAuth2RestfulTestCase):
+ ACCESS_TOKEN_URL = '/OS-OAUTH2/token'
+
+ def setUp(self):
+ super(OAuth2CertificateTests, self).setUp()
+ self.log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG))
+ self.config_fixture.config(group='oauth2',
+ oauth2_authn_methods=['tls_client_auth'])
+ self.config_fixture.config(group='oauth2',
+ oauth2_cert_dn_mapping_id='oauth2_mapping')
+ (
+ self.oauth2_user,
+ self.oauth2_user_domain,
+ _,
+ ) = self._create_project_user()
+ *_, self.client_cert, self.client_key = self._create_certificates(
+ client_dn=unit.create_dn(
+ user_id=self.oauth2_user.get('id'),
+ common_name=self.oauth2_user.get('name'),
+ email_address=self.oauth2_user.get('email'),
+ domain_component=self.oauth2_user_domain.get('id'),
+ organization_name=self.oauth2_user_domain.get('name')
+ )
+ )
+
+ def _create_project_user(self, no_roles=False):
+ new_domain_ref = unit.new_domain_ref()
+ PROVIDERS.resource_api.create_domain(
+ new_domain_ref['id'], new_domain_ref
+ )
+ new_project_ref = unit.new_project_ref(domain_id=self.domain_id)
+ PROVIDERS.resource_api.create_project(
+ new_project_ref['id'], new_project_ref
+ )
+ new_user = unit.create_user(PROVIDERS.identity_api,
+ domain_id=new_domain_ref['id'],
+ project_id=new_project_ref['id'])
+ if not no_roles:
+ PROVIDERS.assignment_api.create_grant(
+ self.role['id'],
+ user_id=new_user['id'],
+ project_id=new_project_ref['id'])
+ return new_user, new_domain_ref, new_project_ref
+
+ def _create_certificates(self,
+ root_dn=None,
+ server_dn=None,
+ client_dn=None):
+ root_subj = unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organization_name='fujitsu',
+ organizational_unit_name='test',
+ common_name='root'
+ )
+ if root_dn:
+ root_subj = unit.update_dn(root_subj, root_dn)
+
+ root_cert, root_key = unit.create_certificate(root_subj)
+ keystone_subj = unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organization_name='fujitsu',
+ organizational_unit_name='test',
+ common_name='keystone.local'
+ )
+ if server_dn:
+ keystone_subj = unit.update_dn(keystone_subj, server_dn)
+
+ ks_cert, ks_key = unit.create_certificate(
+ keystone_subj, ca=root_cert, ca_key=root_key)
+ client_subj = unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test'
+ )
+ if client_dn:
+ client_subj = unit.update_dn(client_subj, client_dn)
+
+ client_cert, client_key = unit.create_certificate(
+ client_subj, ca=root_cert, ca_key=root_key)
+ return root_cert, root_key, ks_cert, ks_key, client_cert, client_key
+
+ def _create_mapping(self, id='oauth2_mapping', dn_rules=None):
+ rules = []
+ if not dn_rules:
+ dn_rules = [
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID',
+ 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS',
+ 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }
+ ]
+ for info in dn_rules:
+ index = 0
+ local_user = {}
+ remote = []
+ for k in info:
+ if k == 'user.name':
+ local_user['name'] = '{%s}' % index
+ remote.append({'type': info.get(k)})
+ index += 1
+ elif k == 'user.id':
+ local_user['id'] = '{%s}' % index
+ remote.append({'type': info.get(k)})
+ index += 1
+ elif k == 'user.email':
+ local_user['email'] = '{%s}' % index
+ remote.append({'type': info.get(k)})
+ index += 1
+ elif k == 'user.domain.name' or k == 'user.domain.id':
+ if not local_user.get('domain'):
+ local_user['domain'] = {}
+ if k == 'user.domain.name':
+ local_user['domain']['name'] = '{%s}' % index
+ remote.append({'type': info.get(k)})
+ index += 1
+ else:
+ local_user['domain']['id'] = '{%s}' % index
+ remote.append({'type': info.get(k)})
+ index += 1
+ else:
+ remote.append({
+ 'type': k,
+ 'any_one_of': info.get(k)
+ })
+ rule = {
+ 'local': [
+ {
+ 'user': local_user
+ }
+ ],
+ 'remote': remote
+ }
+ rules.append(rule)
+
+ mapping = {
+ 'id': id,
+ 'rules': rules
+ }
+
+ PROVIDERS.federation_api.create_mapping(mapping['id'], mapping)
+
+ def _get_access_token(self, client_id=None, client_cert_content=None,
+ expected_status=http.client.OK):
+ headers = {
+ 'Content-Type': 'application/x-www-form-urlencoded',
+ }
+ data = {
+ 'grant_type': 'client_credentials'
+ }
+ if client_id:
+ data.update({'client_id': client_id})
+ data = parse.urlencode(data).encode()
+ kwargs = {
+ 'headers': headers,
+ 'noauth': True,
+ 'convert': False,
+ 'body': data,
+ 'expected_status': expected_status
+ }
+ if client_cert_content:
+ kwargs.update({'environ': {
+ 'SSL_CLIENT_CERT': client_cert_content
+ }})
+ resp = self.post(
+ self.ACCESS_TOKEN_URL,
+ **kwargs)
+ return resp
+
+ def _get_cert_content(self, cert):
+ return cert.public_bytes(Encoding.PEM).decode('ascii')
+
+ def assertUnauthorizedResp(self, resp):
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual('invalid_client', json_resp['error'])
+ self.assertEqual(
+ 'Client authentication failed.',
+ json_resp['error_description'])
+
+ def test_get_access_token_project_scope(self):
+ """Test case when an access token can be successfully obtain."""
+ self._create_mapping()
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ def test_get_access_token_mapping_config(self):
+ """Test case when an access token can be successfully obtain."""
+ self.config_fixture.config(group='oauth2',
+ oauth2_cert_dn_mapping_id='oauth2_custom')
+ self._create_mapping(
+ id='oauth2_custom',
+ dn_rules=[
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }
+ ])
+
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id='test_UID',
+ common_name=user.get('name'),
+ domain_component=user_domain.get('name'),
+ organization_name='test_O'
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ self.config_fixture.config(group='oauth2',
+ oauth2_cert_dn_mapping_id='oauth2_mapping')
+
+ def test_get_access_token_mapping_multi_ca(self):
+ """Test case when an access token can be successfully obtain."""
+ self.config_fixture.config(group='oauth2',
+ oauth2_cert_dn_mapping_id='oauth2_custom')
+ self._create_mapping(
+ id='oauth2_custom',
+ dn_rules=[
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID',
+ 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS',
+ 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['rootA', 'rootB']
+ },
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['rootC']
+ }
+ ])
+
+ # CA rootA OK
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ root_dn=unit.create_dn(
+ common_name='rootA'
+ ),
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ # CA rootB OK
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ root_dn=unit.create_dn(
+ common_name='rootB'
+ ),
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ # CA rootC OK
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ root_dn=unit.create_dn(
+ common_name='rootC'
+ ),
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id='test_UID',
+ common_name=user.get('name'),
+ domain_component=user_domain.get('name'),
+ organization_name='test_O'
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ # CA not found NG
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ root_dn=unit.create_dn(
+ common_name='root_other'
+ ),
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed.',
+ self.log_fix.output)
+
+ self.config_fixture.config(group='oauth2',
+ oauth2_cert_dn_mapping_id='oauth2_mapping')
+
+ def test_get_access_token_no_default_mapping(self):
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping id %s is not found. ' % 'oauth2_mapping',
+ self.log_fix.output)
+
+ def test_get_access_token_no_custom_mapping(self):
+ self.config_fixture.config(group='oauth2',
+ oauth2_cert_dn_mapping_id='oauth2_custom')
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping id %s is not found. ' % 'oauth2_custom',
+ self.log_fix.output)
+ self.config_fixture.config(group='oauth2',
+ oauth2_cert_dn_mapping_id='oauth2_mapping')
+
+ def test_get_access_token_ignore_userid(self):
+ """Test case when an access token can be successfully obtain."""
+ self._create_mapping(dn_rules=[
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS',
+ 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }])
+
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id') + "_diff",
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ def test_get_access_token_ignore_username(self):
+ """Test case when an access token can be successfully obtain."""
+ self._create_mapping(dn_rules=[
+ {
+ 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID',
+ 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS',
+ 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }])
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ def test_get_access_token_ignore_email(self):
+ """Test case when an access token can be successfully obtain."""
+ self._create_mapping(dn_rules=[
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID',
+ 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }])
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ def test_get_access_token_ignore_domain_id(self):
+ """Test case when an access token can be successfully obtain."""
+ self._create_mapping(dn_rules=[
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID',
+ 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS',
+ 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }])
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id') + "_diff",
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ def test_get_access_token_ignore_domain_name(self):
+ """Test case when an access token can be successfully obtain."""
+ self._create_mapping(dn_rules=[
+ {
+ 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN',
+ 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID',
+ 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS',
+ 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC',
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }])
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ def test_get_access_token_ignore_all(self):
+ """Test case when an access token can be successfully obtain."""
+ self._create_mapping(dn_rules=[
+ {
+ 'SSL_CLIENT_ISSUER_DN_CN': ['root']
+ }])
+ user, user_domain, user_project = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id') + "_diff",
+ common_name=user.get('name') + "_diff",
+ email_address=user.get('email') + "_diff",
+ domain_component=user_domain.get('id') + "_diff"
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content)
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertIn('access_token', json_resp)
+ self.assertEqual('Bearer', json_resp['token_type'])
+ self.assertEqual(3600, json_resp['expires_in'])
+
+ verify_resp = self.get(
+ '/auth/tokens',
+ headers={
+ 'X-Subject-Token': json_resp['access_token'],
+ 'X-Auth-Token': json_resp['access_token']
+ }
+ )
+ self.assertIn('token', verify_resp.result)
+ self.assertIn('oauth2_credential', verify_resp.result['token'])
+ self.assertIn('roles', verify_resp.result['token'])
+ self.assertIn('project', verify_resp.result['token'])
+ self.assertIn('catalog', verify_resp.result['token'])
+ self.assertEqual(user_project.get('id'),
+ verify_resp.result['token']['project']['id'])
+ check_oauth2 = verify_resp.result['token']['oauth2_credential']
+ self.assertEqual(utils.get_certificate_thumbprint(cert_content),
+ check_oauth2['x5t#S256'])
+
+ def test_get_access_token_no_roles_project_scope(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user(no_roles=True)
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED)
+ LOG.debug(resp)
+
+ def test_get_access_token_no_default_project_id(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user(no_roles=True)
+ user['default_project_id'] = None
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ _ = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED)
+
+ def test_get_access_token_without_client_id(self):
+ self._create_mapping()
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn('Get OAuth2.0 Access Token API: '
+ 'failed to get a client_id from the request.',
+ self.log_fix.output)
+
+ def test_get_access_token_without_client_cert(self):
+ self._create_mapping()
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn('Get OAuth2.0 Access Token API: '
+ 'failed to get client credentials from the request.',
+ self.log_fix.output)
+
+ @mock.patch.object(utils, 'get_certificate_subject_dn')
+ def test_get_access_token_failed_to_get_cert_subject_dn(
+ self, mock_get_certificate_subject_dn):
+ self._create_mapping()
+ mock_get_certificate_subject_dn.side_effect = \
+ exception.ValidationError('Boom!')
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn('Get OAuth2.0 Access Token API: '
+ 'failed to get the subject DN from the certificate.',
+ self.log_fix.output)
+
+ @mock.patch.object(utils, 'get_certificate_issuer_dn')
+ def test_get_access_token_failed_to_get_cert_issuer_dn(
+ self, mock_get_certificate_issuer_dn):
+ self._create_mapping()
+ mock_get_certificate_issuer_dn.side_effect = \
+ exception.ValidationError('Boom!')
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn('Get OAuth2.0 Access Token API: '
+ 'failed to get the issuer DN from the certificate.',
+ self.log_fix.output)
+
+ def test_get_access_token_user_not_exist(self):
+ self._create_mapping()
+ cert_content = self._get_cert_content(self.client_cert)
+ user_id_not_exist = 'user_id_not_exist'
+ resp = self._get_access_token(
+ client_id=user_id_not_exist,
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'the user does not exist. user id: %s'
+ % user_id_not_exist,
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_not_match_user_id(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id') + "_diff",
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.' % (
+ 'user id',
+ user.get('id') + '_diff',
+ user.get('id')),
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_not_match_user_name(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name') + "_diff",
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.' % (
+ 'user name',
+ user.get('name') + '_diff',
+ user.get('name')),
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_not_match_email(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email') + "_diff",
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.' % (
+ 'user email',
+ user.get('email') + '_diff',
+ user.get('email')),
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_not_match_domain_id(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id') + "_diff",
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.' % (
+ 'user domain id',
+ user_domain.get('id') + '_diff',
+ user_domain.get('id')),
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_not_match_domain_name(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name') + "_diff"
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: %s check failed. '
+ 'DN value: %s, DB value: %s.' % (
+ 'user domain name',
+ user_domain.get('name') + '_diff',
+ user_domain.get('name')),
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_missing_user_id(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed.',
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_missing_user_name(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed.',
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_missing_email(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ domain_component=user_domain.get('id'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed.',
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_missing_domain_id(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ organization_name=user_domain.get('name')
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed.',
+ self.log_fix.output)
+
+ def test_get_access_token_cert_dn_missing_domain_name(self):
+ self._create_mapping()
+ user, user_domain, _ = self._create_project_user()
+ *_, client_cert, _ = self._create_certificates(
+ client_dn=unit.create_dn(
+ country_name='jp',
+ state_or_province_name='kanagawa',
+ locality_name='kawasaki',
+ organizational_unit_name='test',
+ user_id=user.get('id'),
+ common_name=user.get('name'),
+ email_address=user.get('email'),
+ domain_component=user_domain.get('id'),
+ )
+ )
+
+ cert_content = self._get_cert_content(client_cert)
+ resp = self._get_access_token(
+ client_id=user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ self.assertUnauthorizedResp(resp)
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed.',
+ self.log_fix.output)
+
+ @mock.patch.object(Manager, 'issue_token')
+ def test_get_access_token_issue_token_ks_error_400(self, mock_issue_token):
+ self._create_mapping()
+ err_msg = 'Boom!'
+ mock_issue_token.side_effect = exception.ValidationError(err_msg)
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.BAD_REQUEST
+ )
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual('invalid_request', json_resp['error'])
+ self.assertEqual(err_msg, json_resp['error_description'])
+ self.assertIn(err_msg, self.log_fix.output)
+
+ @mock.patch.object(Manager, 'issue_token')
+ def test_get_access_token_issue_token_ks_error_401(self, mock_issue_token):
+ self._create_mapping()
+ err_msg = 'Boom!'
+ mock_issue_token.side_effect = exception.Unauthorized(err_msg)
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.UNAUTHORIZED
+ )
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual('invalid_client', json_resp['error'])
+ self.assertEqual(
+ 'The request you have made requires authentication.',
+ json_resp['error_description'])
+
+ @mock.patch.object(Manager, 'issue_token')
+ def test_get_access_token_issue_token_ks_error_other(
+ self, mock_issue_token):
+ self._create_mapping()
+ err_msg = 'Boom!'
+ mock_issue_token.side_effect = exception.NotImplemented(err_msg)
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=exception.NotImplemented.code
+ )
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual('other_error', json_resp['error'])
+ self.assertEqual(
+ 'An unknown error occurred and failed to get an OAuth2.0 '
+ 'access token.',
+ json_resp['error_description'])
+
+ @mock.patch.object(Manager, 'issue_token')
+ def test_get_access_token_issue_token_other_exception(
+ self, mock_issue_token):
+ self._create_mapping()
+ err_msg = 'Boom!'
+ mock_issue_token.side_effect = Exception(err_msg)
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.INTERNAL_SERVER_ERROR
+ )
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual('other_error', json_resp['error'])
+ self.assertEqual(err_msg, json_resp['error_description'])
+
+ @mock.patch.object(RuleProcessor, 'process')
+ def test_get_access_token_process_other_exception(
+ self, mock_process):
+ self._create_mapping()
+ err_msg = 'Boom!'
+ mock_process.side_effect = Exception(err_msg)
+ cert_content = self._get_cert_content(self.client_cert)
+ resp = self._get_access_token(
+ client_id=self.oauth2_user.get('id'),
+ client_cert_content=cert_content,
+ expected_status=http.client.INTERNAL_SERVER_ERROR
+ )
+ LOG.debug(resp)
+ json_resp = jsonutils.loads(resp.body)
+ self.assertEqual('other_error', json_resp['error'])
+ self.assertEqual(err_msg, json_resp['error_description'])
+ self.assertIn(
+ 'Get OAuth2.0 Access Token API: '
+ 'mapping rule process failed.',
+ self.log_fix.output)
diff --git a/keystone/tests/unit/test_versions.py b/keystone/tests/unit/test_versions.py
index b509d2446..490f19364 100644
--- a/keystone/tests/unit/test_versions.py
+++ b/keystone/tests/unit/test_versions.py
@@ -371,6 +371,9 @@ V3_JSON_HOME_RESOURCES = {
'href-template': '/users/{user_id}/projects',
'href-vars': {'user_id': json_home.Parameters.USER_ID, }},
json_home.build_v3_resource_relation('users'): {'href': '/users'},
+ json_home.build_v3_extension_resource_relation(
+ 'OS-OAUTH2', '1.0', 'token'): {
+ 'href': '/OS-OAUTH2/token'},
_build_federation_rel(resource_name='domains'): {
'href': '/auth/domains'},
_build_federation_rel(resource_name='websso'): {
diff --git a/keystone/tests/unit/token/test_fernet_provider.py b/keystone/tests/unit/token/test_fernet_provider.py
index cc2a49d0b..fbc4faf05 100644
--- a/keystone/tests/unit/token/test_fernet_provider.py
+++ b/keystone/tests/unit/token/test_fernet_provider.py
@@ -17,6 +17,8 @@ import os
from unittest import mock
import uuid
+import fixtures
+from oslo_log import log
from oslo_utils import timeutils
from keystone import auth
@@ -26,6 +28,7 @@ from keystone.common import utils
import keystone.conf
from keystone import exception
from keystone.federation import constants as federation_constants
+from keystone.models import token_model
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
@@ -51,6 +54,59 @@ class TestFernetTokenProvider(unit.TestCase):
self.provider.validate_token,
token_id)
+ def test_log_warning_when_token_exceeds_max_token_size_default(self):
+ self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO))
+
+ token = token_model.TokenModel()
+ token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.expires_at = utils.isotime(
+ provider.default_expire_time(), subsecond=True)
+ token.methods = ['password']
+ token.audit_id = provider.random_urlsafe_str()
+ token_id, issued_at = self.provider.generate_id_and_issued_at(token)
+ expected_output = (
+ f'Fernet token created with length of {len(token_id)} characters, '
+ 'which exceeds 255 characters'
+ )
+ self.assertIn(expected_output, self.logging.output)
+
+ def test_log_warning_when_token_exceeds_max_token_size_override(self):
+ self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO))
+ self.config_fixture.config(max_token_size=250)
+
+ token = token_model.TokenModel()
+ token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.expires_at = utils.isotime(
+ provider.default_expire_time(), subsecond=True)
+ token.methods = ['password']
+ token.audit_id = provider.random_urlsafe_str()
+ token_id, issued_at = self.provider.generate_id_and_issued_at(token)
+ expected_output = (
+ f'Fernet token created with length of {len(token_id)} characters, '
+ 'which exceeds 250 characters'
+ )
+ self.assertIn(expected_output, self.logging.output)
+
+ def test_no_warning_when_token_does_not_exceed_max_token_size(self):
+ self.config_fixture.config(max_token_size=300)
+ self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO))
+
+ token = token_model.TokenModel()
+ token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef'
+ token.expires_at = utils.isotime(
+ provider.default_expire_time(), subsecond=True)
+ token.methods = ['password']
+ token.audit_id = provider.random_urlsafe_str()
+ token_id, issued_at = self.provider.generate_id_and_issued_at(token)
+ expected_output = (
+ f'Fernet token created with length of {len(token_id)} characters, '
+ 'which exceeds 255 characters'
+ )
+ self.assertNotIn(expected_output, self.logging.output)
+
class TestValidate(unit.TestCase):
def setUp(self):
@@ -261,7 +317,7 @@ class TestTokenFormatter(unit.TestCase):
(user_id, methods, audit_ids, system, domain_id, project_id, trust_id,
federated_group_ids, identity_provider_id, protocol_id,
- access_token_id, app_cred_id, issued_at,
+ access_token_id, app_cred_id, thumbprint, issued_at,
expires_at) = token_formatter.validate_token(token)
self.assertEqual(exp_user_id, user_id)
@@ -296,7 +352,7 @@ class TestTokenFormatter(unit.TestCase):
(user_id, methods, audit_ids, system, domain_id, project_id, trust_id,
federated_group_ids, identity_provider_id, protocol_id,
- access_token_id, app_cred_id, issued_at,
+ access_token_id, app_cred_id, thumbprint, issued_at,
expires_at) = token_formatter.validate_token(token)
self.assertEqual(exp_user_id, user_id)
@@ -417,7 +473,7 @@ class TestPayloads(unit.TestCase):
exp_trust_id=None, exp_federated_group_ids=None,
exp_identity_provider_id=None, exp_protocol_id=None,
exp_access_token_id=None, exp_app_cred_id=None,
- encode_ids=False):
+ encode_ids=False, exp_thumbprint=None):
def _encode_id(value):
if value is not None and str(value) and encode_ids:
return value.encode('utf-8')
@@ -440,12 +496,14 @@ class TestPayloads(unit.TestCase):
_encode_id(exp_identity_provider_id),
exp_protocol_id,
_encode_id(exp_access_token_id),
- _encode_id(exp_app_cred_id))
+ _encode_id(exp_app_cred_id),
+ exp_thumbprint)
(user_id, methods, system, project_id,
domain_id, expires_at, audit_ids,
trust_id, federated_group_ids, identity_provider_id, protocol_id,
- access_token_id, app_cred_id) = payload_class.disassemble(payload)
+ access_token_id, app_cred_id,
+ thumbprint) = payload_class.disassemble(payload)
self.assertEqual(exp_user_id, user_id)
self.assertEqual(exp_methods, methods)
diff --git a/keystone/token/provider.py b/keystone/token/provider.py
index 2ea4d7e08..9d888fdbc 100644
--- a/keystone/token/provider.py
+++ b/keystone/token/provider.py
@@ -154,8 +154,8 @@ class Manager(manager.Manager):
def _validate_token(self, token_id):
(user_id, methods, audit_ids, system, domain_id,
project_id, trust_id, federated_group_ids, identity_provider_id,
- protocol_id, access_token_id, app_cred_id, issued_at,
- expires_at) = self.driver.validate_token(token_id)
+ protocol_id, access_token_id, app_cred_id, thumbprint,
+ issued_at, expires_at) = self.driver.validate_token(token_id)
token = token_model.TokenModel()
token.user_id = user_id
@@ -169,6 +169,7 @@ class Manager(manager.Manager):
token.trust_id = trust_id
token.access_token_id = access_token_id
token.application_credential_id = app_cred_id
+ token.oauth2_thumbprint = thumbprint
token.expires_at = expires_at
if federated_group_ids is not None:
token.is_federated = True
@@ -221,7 +222,7 @@ class Manager(manager.Manager):
def issue_token(self, user_id, method_names, expires_at=None,
system=None, project_id=None, domain_id=None,
auth_context=None, trust_id=None, app_cred_id=None,
- parent_audit_id=None):
+ thumbprint=None, parent_audit_id=None):
# NOTE(lbragstad): Grab a blank token object and use composition to
# build the token according to the authentication and authorization
@@ -235,6 +236,7 @@ class Manager(manager.Manager):
token.trust_id = trust_id
token.application_credential_id = app_cred_id
token.audit_id = random_urlsafe_str()
+ token.oauth2_thumbprint = thumbprint
token.parent_audit_id = parent_audit_id
if auth_context:
@@ -267,6 +269,23 @@ class Manager(manager.Manager):
default_expire_time(), subsecond=True
)
+ # NOTE(d34dh0r53): If this token is being issued with an application
+ # credential and the application credential expires before the token
+ # we need to set the token expiration to be the same as the application
+ # credential. See CVE-2022-2447 for more information.
+ if app_cred_id is not None:
+ app_cred_api = PROVIDERS.application_credential_api
+ app_cred = app_cred_api.get_application_credential(
+ token.application_credential_id)
+ token_time = timeutils.normalize_time(
+ timeutils.parse_isotime(token.expires_at))
+ if (app_cred['expires_at'] is not None) and (
+ token_time > app_cred['expires_at']):
+ token.expires_at = app_cred['expires_at'].isoformat()
+ LOG.debug('Resetting token expiration to the application'
+ ' credential expiration: %s',
+ app_cred['expires_at'].isoformat())
+
token_id, issued_at = self.driver.generate_id_and_issued_at(token)
token.mint(token_id, issued_at)
diff --git a/keystone/token/providers/base.py b/keystone/token/providers/base.py
index 34547c374..9de93ccfa 100644
--- a/keystone/token/providers/base.py
+++ b/keystone/token/providers/base.py
@@ -44,6 +44,7 @@ class Provider(object, metaclass=abc.ABCMeta):
``protocol_id`` unique ID of the protocol used to obtain the token
``access_token_id`` the unique ID of the access_token for OAuth1 tokens
``app_cred_id`` the unique ID of the application credential
+ ``param thumbprint`` thumbprint of the certificate for OAuth2.0 mTLS
``issued_at`` a datetime object of when the token was minted
``expires_at`` a datetime object of when the token expires
diff --git a/keystone/token/providers/fernet/core.py b/keystone/token/providers/fernet/core.py
index 7c0fda342..9eef56727 100644
--- a/keystone/token/providers/fernet/core.py
+++ b/keystone/token/providers/fernet/core.py
@@ -58,6 +58,8 @@ class Provider(base.Provider):
return tf.FederatedUnscopedPayload
elif token.application_credential_id:
return tf.ApplicationCredentialScopedPayload
+ elif token.oauth2_thumbprint:
+ return tf.Oauth2CredentialsScopedPayload
elif token.project_scoped:
return tf.ProjectScopedPayload
elif token.domain_scoped:
@@ -83,7 +85,8 @@ class Provider(base.Provider):
identity_provider_id=token.identity_provider_id,
protocol_id=token.protocol_id,
access_token_id=token.access_token_id,
- app_cred_id=token.application_credential_id
+ app_cred_id=token.application_credential_id,
+ thumbprint=token.oauth2_thumbprint,
)
creation_datetime_obj = self.token_formatter.creation_time(token_id)
issued_at = ks_utils.isotime(
diff --git a/keystone/token/providers/jws/core.py b/keystone/token/providers/jws/core.py
index 7d14d313c..5dc70c870 100644
--- a/keystone/token/providers/jws/core.py
+++ b/keystone/token/providers/jws/core.py
@@ -70,7 +70,8 @@ class Provider(base.Provider):
identity_provider_id=token.identity_provider_id,
protocol_id=token.protocol_id,
access_token_id=token.access_token_id,
- app_cred_id=token.application_credential_id
+ app_cred_id=token.application_credential_id,
+ thumbprint=token.oauth2_thumbprint,
)
def validate_token(self, token_id):
@@ -106,7 +107,8 @@ class JWSFormatter(object):
system=None, domain_id=None, project_id=None,
trust_id=None, federated_group_ids=None,
identity_provider_id=None, protocol_id=None,
- access_token_id=None, app_cred_id=None):
+ access_token_id=None, app_cred_id=None,
+ thumbprint=None):
issued_at = utils.isotime(subsecond=True)
issued_at_int = self._convert_time_string_to_int(issued_at)
@@ -128,7 +130,8 @@ class JWSFormatter(object):
'openstack_idp_id': identity_provider_id,
'openstack_protocol_id': protocol_id,
'openstack_access_token_id': access_token_id,
- 'openstack_app_cred_id': app_cred_id
+ 'openstack_app_cred_id': app_cred_id,
+ 'openstack_thumbprint': thumbprint,
}
# NOTE(lbragstad): Calling .items() on a dictionary in python 2 returns
@@ -164,6 +167,7 @@ class JWSFormatter(object):
protocol_id = payload.get('openstack_protocol_id', None)
access_token_id = payload.get('openstack_access_token_id', None)
app_cred_id = payload.get('openstack_app_cred_id', None)
+ thumbprint = payload.get('openstack_thumbprint', None)
issued_at = self._convert_time_int_to_string(issued_at_int)
expires_at = self._convert_time_int_to_string(expires_at_int)
@@ -171,7 +175,7 @@ class JWSFormatter(object):
return (
user_id, methods, audit_ids, system, domain_id, project_id,
trust_id, federated_group_ids, identity_provider_id, protocol_id,
- access_token_id, app_cred_id, issued_at, expires_at
+ access_token_id, app_cred_id, thumbprint, issued_at, expires_at,
)
def _decode_token_from_id(self, token_id):
diff --git a/keystone/token/token_formatters.py b/keystone/token/token_formatters.py
index bb407ab09..b1971ca52 100644
--- a/keystone/token/token_formatters.py
+++ b/keystone/token/token_formatters.py
@@ -137,14 +137,14 @@ class TokenFormatter(object):
methods=None, system=None, domain_id=None,
project_id=None, trust_id=None, federated_group_ids=None,
identity_provider_id=None, protocol_id=None,
- access_token_id=None, app_cred_id=None):
+ access_token_id=None, app_cred_id=None,
+ thumbprint=None):
"""Given a set of payload attributes, generate a Fernet token."""
version = payload_class.version
payload = payload_class.assemble(
user_id, methods, system, project_id, domain_id, expires_at,
audit_ids, trust_id, federated_group_ids, identity_provider_id,
- protocol_id, access_token_id, app_cred_id
- )
+ protocol_id, access_token_id, app_cred_id, thumbprint)
versioned_payload = (version,) + payload
serialized_payload = msgpack.packb(versioned_payload)
@@ -156,10 +156,11 @@ class TokenFormatter(object):
# characters. Even though Keystone isn't storing a Fernet token
# anywhere, we can't say it isn't being stored somewhere else with
# those kind of backend constraints.
- if len(token) > 255:
- LOG.info('Fernet token created with length of %d '
- 'characters, which exceeds 255 characters',
- len(token))
+ if len(token) > CONF.max_token_size:
+ LOG.info(
+ f'Fernet token created with length of {len(token)} '
+ f'characters, which exceeds {CONF.max_token_size} characters',
+ )
return token
@@ -186,7 +187,8 @@ class TokenFormatter(object):
(user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id) = payload_class.disassemble(payload)
+ app_cred_id, thumbprint) = (
+ payload_class.disassemble(payload))
break
else:
# If the token_format is not recognized, raise ValidationError.
@@ -210,8 +212,8 @@ class TokenFormatter(object):
return (user_id, methods, audit_ids, system, domain_id, project_id,
trust_id, federated_group_ids, identity_provider_id,
- protocol_id, access_token_id, app_cred_id, issued_at,
- expires_at)
+ protocol_id, access_token_id, app_cred_id, thumbprint,
+ issued_at, expires_at)
class BasePayload(object):
@@ -222,7 +224,7 @@ class BasePayload(object):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
"""Assemble the payload of a token.
:param user_id: identifier of the user in the token request
@@ -238,6 +240,7 @@ class BasePayload(object):
:param protocol_id: federated protocol used for authentication
:param access_token_id: ID of the secret in OAuth1 authentication
:param app_cred_id: ID of the application credential in effect
+ :param thumbprint: thumbprint of the certificate in OAuth2 mTLS
:returns: the payload of a token
"""
@@ -376,7 +379,7 @@ class UnscopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
expires_at_int = cls._convert_time_string_to_float(expires_at)
@@ -400,10 +403,11 @@ class UnscopedPayload(BasePayload):
protocol_id = None
access_token_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id)
+ app_cred_id, thumbprint)
class DomainScopedPayload(BasePayload):
@@ -413,7 +417,7 @@ class DomainScopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
try:
@@ -454,10 +458,11 @@ class DomainScopedPayload(BasePayload):
protocol_id = None
access_token_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id)
+ app_cred_id, thumbprint)
class ProjectScopedPayload(BasePayload):
@@ -467,7 +472,7 @@ class ProjectScopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
@@ -493,10 +498,11 @@ class ProjectScopedPayload(BasePayload):
protocol_id = None
access_token_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id)
+ app_cred_id, thumbprint)
class TrustScopedPayload(BasePayload):
@@ -506,7 +512,7 @@ class TrustScopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
@@ -535,10 +541,11 @@ class TrustScopedPayload(BasePayload):
protocol_id = None
access_token_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id)
+ app_cred_id, thumbprint)
class FederatedUnscopedPayload(BasePayload):
@@ -558,7 +565,7 @@ class FederatedUnscopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_group_ids = list(map(cls.pack_group_id, federated_group_ids))
@@ -589,9 +596,10 @@ class FederatedUnscopedPayload(BasePayload):
trust_id = None
access_token_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, group_ids, idp_id,
- protocol_id, access_token_id, app_cred_id)
+ protocol_id, access_token_id, app_cred_id, thumbprint)
class FederatedScopedPayload(FederatedUnscopedPayload):
@@ -601,7 +609,7 @@ class FederatedScopedPayload(FederatedUnscopedPayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_scope_id = cls.attempt_convert_uuid_hex_to_bytes(
@@ -640,9 +648,10 @@ class FederatedScopedPayload(FederatedUnscopedPayload):
trust_id = None
access_token_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, group_ids, idp_id,
- protocol_id, access_token_id, app_cred_id)
+ protocol_id, access_token_id, app_cred_id, thumbprint)
class FederatedProjectScopedPayload(FederatedScopedPayload):
@@ -660,7 +669,7 @@ class OauthScopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
@@ -691,11 +700,12 @@ class OauthScopedPayload(BasePayload):
identity_provider_id = None
protocol_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id)
+ app_cred_id, thumbprint)
class SystemScopedPayload(BasePayload):
@@ -705,7 +715,7 @@ class SystemScopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
expires_at_int = cls._convert_time_string_to_float(expires_at)
@@ -729,10 +739,11 @@ class SystemScopedPayload(BasePayload):
protocol_id = None
access_token_id = None
app_cred_id = None
+ thumbprint = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id)
+ app_cred_id, thumbprint)
class ApplicationCredentialScopedPayload(BasePayload):
@@ -742,7 +753,7 @@ class ApplicationCredentialScopedPayload(BasePayload):
def assemble(cls, user_id, methods, system, project_id, domain_id,
expires_at, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id):
+ app_cred_id, thumbprint):
b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
methods = auth_plugins.convert_method_list_to_integer(methods)
b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
@@ -771,10 +782,55 @@ class ApplicationCredentialScopedPayload(BasePayload):
access_token_id = None
(is_stored_as_bytes, app_cred_id) = payload[5]
app_cred_id = cls._convert_or_decode(is_stored_as_bytes, app_cred_id)
+ thumbprint = None
+ return (user_id, methods, system, project_id, domain_id,
+ expires_at_str, audit_ids, trust_id, federated_group_ids,
+ identity_provider_id, protocol_id, access_token_id,
+ app_cred_id, thumbprint)
+
+
+class Oauth2CredentialsScopedPayload(BasePayload):
+ version = 10
+
+ @classmethod
+ def assemble(cls, user_id, methods, system, project_id, domain_id,
+ expires_at, audit_ids, trust_id, federated_group_ids,
+ identity_provider_id, protocol_id, access_token_id,
+ app_cred_id, thumbprint):
+ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id)
+ methods = auth_plugins.convert_method_list_to_integer(methods)
+ b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id)
+ b_domain_id = cls.attempt_convert_uuid_hex_to_bytes(domain_id)
+ expires_at_int = cls._convert_time_string_to_float(expires_at)
+ b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids))
+ b_thumbprint = (False, thumbprint)
+ return (b_user_id, methods, b_project_id, b_domain_id, expires_at_int,
+ b_audit_ids, b_thumbprint)
+
+ @classmethod
+ def disassemble(cls, payload):
+ (is_stored_as_bytes, user_id) = payload[0]
+ user_id = cls._convert_or_decode(is_stored_as_bytes, user_id)
+ methods = auth_plugins.convert_integer_to_method_list(payload[1])
+ (is_stored_as_bytes, project_id) = payload[2]
+ project_id = cls._convert_or_decode(is_stored_as_bytes, project_id)
+ (is_stored_as_bytes, domain_id) = payload[3]
+ domain_id = cls._convert_or_decode(is_stored_as_bytes, domain_id)
+ expires_at_str = cls._convert_float_to_time_string(payload[4])
+ audit_ids = list(map(cls.base64_encode, payload[5]))
+ (is_stored_as_bytes, thumbprint) = payload[6]
+ thumbprint = cls._convert_or_decode(is_stored_as_bytes, thumbprint)
+ system = None
+ trust_id = None
+ federated_group_ids = None
+ identity_provider_id = None
+ protocol_id = None
+ access_token_id = None
+ app_cred_id = None
return (user_id, methods, system, project_id, domain_id,
expires_at_str, audit_ids, trust_id, federated_group_ids,
identity_provider_id, protocol_id, access_token_id,
- app_cred_id)
+ app_cred_id, thumbprint)
_PAYLOAD_CLASSES = [
@@ -788,4 +844,5 @@ _PAYLOAD_CLASSES = [
OauthScopedPayload,
SystemScopedPayload,
ApplicationCredentialScopedPayload,
+ Oauth2CredentialsScopedPayload,
]
diff --git a/lower-constraints.txt b/lower-constraints.txt
deleted file mode 100644
index 71f497fbd..000000000
--- a/lower-constraints.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-amqp==5.0.0
-Babel==2.3.4
-bashate==0.5.1
-bcrypt==3.1.3
-coverage==4.0
-cryptography==2.7
-docutils==0.14
-dogpile.cache==1.0.2
-fixtures==3.0.0
-flake8-docstrings==0.2.1.post1
-flake8==2.6.0
-Flask===1.0.2
-Flask-RESTful===0.3.5
-freezegun==0.3.6
-hacking==1.1.0
-iso8601==0.1.12
-jsonschema==3.2.0
-keystoneauth1==3.4.0
-keystonemiddleware==7.0.0
-ldappool===2.3.1
-lxml==4.5.0
-mock==2.0.0
-msgpack==0.5.0
-oauthlib==0.6.2
-os-api-ref==1.4.0
-oslo.cache==1.26.0
-oslo.concurrency==3.26.0
-oslo.config==6.8.0
-oslo.context==2.22.0
-oslo.db==6.0.0
-oslo.i18n==3.15.3
-oslo.log==3.44.0
-oslo.messaging==5.29.0
-oslo.middleware==3.31.0
-oslo.policy==3.10.0
-oslo.serialization==2.18.0
-oslo.upgradecheck==1.3.0
-oslo.utils==3.33.0
-oslotest==3.2.0
-osprofiler==1.4.0
-passlib==1.7.0
-pbr==2.0.0
-pep257==0.7.0
-pika==0.10.0
-pycadf==1.1.0
-pycodestyle==2.0.0
-python-ldap===3.0.0
-pymongo===3.0.2
-pysaml2==5.0.0
-PyJWT==1.6.1
-PyMySQL==0.8.0
-python-keystoneclient==3.8.0
-python-memcached===1.56
-pytz==2013.6
-requests==2.14.2
-scrypt==0.8.0
-six==1.10.0
-sqlalchemy-migrate==0.13.0
-SQLAlchemy==1.3.0
-stestr==1.0.0
-stevedore==1.20.0
-tempest==17.1.0
-testtools==2.2.0
-urllib3==1.22
-vine==1.3.0
-WebOb==1.7.1
-WebTest==2.0.27
-Werkzeug==0.14.1
diff --git a/releasenotes/notes/bp-oauth2-client-credentials-ext-c8933f00a7b45be8.yaml b/releasenotes/notes/bp-oauth2-client-credentials-ext-c8933f00a7b45be8.yaml
new file mode 100644
index 000000000..d475b6743
--- /dev/null
+++ b/releasenotes/notes/bp-oauth2-client-credentials-ext-c8933f00a7b45be8.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ [`blueprint oauth2-client-credentials-ext <https://blueprints.launchpad.net/keystone/+spec/oauth2-client-credentials-ext>`_]
+ Users can now use the OAuth2.0 Access Token API to get an access token
+ from the keystone identity server with application credentials. Then the
+ users can use the access token to access the OpenStack APIs that use the
+ keystone middleware to support OAuth2.0 client credentials authentication
+ through the keystone identity server.
diff --git a/releasenotes/notes/bp-support-oauth2-mtls-8552892a8e0c72d2.yaml b/releasenotes/notes/bp-support-oauth2-mtls-8552892a8e0c72d2.yaml
new file mode 100644
index 000000000..19b6ccb11
--- /dev/null
+++ b/releasenotes/notes/bp-support-oauth2-mtls-8552892a8e0c72d2.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ [`blueprint support-oauth2-mtls <https://blueprints.launchpad.net/keystone/+spec/support-oauth2-mtls>`_]
+ Provide the option for users to proof-of-possession of OAuth 2.0 access
+ token based on `RFC8705 OAuth 2.0 Mutual-TLS Client Authentication and
+ Certificate-Bound Access Tokens`. Users can now use the OAuth 2.0 Access
+ Token API to get an OAuth 2.0 certificate-bound access token from the
+ keystone identity server with OAuth 2.0 credentials and Mutual-TLS
+ certificates. Then users can use the OAuth 2.0 certificate-bound access
+ token and the Mutual-TLS certificates to access the OpenStack APIs that use
+ the keystone middleware to support OAuth 2.0 Mutual-TLS client
+ authentication.
diff --git a/releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml b/releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml
new file mode 100644
index 000000000..040811b79
--- /dev/null
+++ b/releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ [`bug 1926483 <https://bugs.launchpad.net/keystone/+bug/1926483>`_]
+ Keystone will only log warnings about token length for Fernet tokens when
+ the token length exceeds the value of `keystone.conf [DEFAULT]
+ max_token_size`.
diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml
new file mode 100644
index 000000000..db420d739
--- /dev/null
+++ b/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Python 3.6 & 3.7 support has been dropped. The minimum version of Python now
+ supported is Python 3.8.
diff --git a/releasenotes/notes/max-password-length-truncation-and-warning-bd69090315ec18a7.yaml b/releasenotes/notes/max-password-length-truncation-and-warning-bd69090315ec18a7.yaml
new file mode 100644
index 000000000..003dc47df
--- /dev/null
+++ b/releasenotes/notes/max-password-length-truncation-and-warning-bd69090315ec18a7.yaml
@@ -0,0 +1,9 @@
+---
+security:
+ - |
+ Passwords will now be automatically truncated if the max_password_length is
+ greater than the allowed length for the selected password hashing
+ algorithm. Currently only bcrypt has fixed allowed lengths defined which is
+ 54 characters. A warning will be generated in the log if a password is
+ truncated. This will not affect existing passwords, however only the first
+ 54 characters of existing bcrypt passwords will be validated.
diff --git a/releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml b/releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml
new file mode 100644
index 000000000..833837dcb
--- /dev/null
+++ b/releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ The database migration engine has changed from `sqlalchemy-migrate`__ to
+ `alembic`__. For most deployments, this should have minimal to no impact
+ and the switch should be mostly transparent. The main user-facing impact is
+ the change in schema versioning. While sqlalchemy-migrate used a linear,
+ integer-based versioning scheme, which required placeholder migrations to
+ allow for potential migration backports, alembic uses a distributed version
+ control-like schema where a migration's ancestor is encoded in the file and
+ branches are possible. The alembic migration files therefore use a
+ arbitrary UUID-like naming scheme and the ``keystone-manage db_version``
+ command returns such a version.
+
+ When the ``keystone-manage db_sync`` command is run without options or
+ with the ``--expand`` or ``--contract`` options, all remaining
+ sqlalchemy-migrate-based migrations will be automatically applied.
+
+ Data migrations are now included in the expand phase and the ``--migrate``
+ option is now a no-op. It may be removed in a future release.
+
+ .. __: https://sqlalchemy-migrate.readthedocs.io/en/latest/
+ .. __: https://alembic.sqlalchemy.org/en/latest/
diff --git a/releasenotes/notes/token_expiration_to_match_application_credential-56d058355a9f240d.yaml b/releasenotes/notes/token_expiration_to_match_application_credential-56d058355a9f240d.yaml
new file mode 100644
index 000000000..d37073a9d
--- /dev/null
+++ b/releasenotes/notes/token_expiration_to_match_application_credential-56d058355a9f240d.yaml
@@ -0,0 +1,10 @@
+---
+security:
+ - |
+ [`bug 1992183 <https://bugs.launchpad.net/keystone/+bug/1992183>`_]
+ [`CVE-2022-2447 <http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-2447>`_]
+ Tokens issued with application credentials will now have their expiration
+ validated against that of the application credential. If the application
+ credential expires before the token the token's expiration will be set to
+ the same expiration as the application credential. Otherwise the token
+ will use the configured value.
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 000000000..d1238479b
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 0b5d76c1b..c16798107 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -54,8 +54,8 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'Keystone Release Notes'
-copyright = u'2015, Keystone Developers'
+project = 'Keystone Release Notes'
+copyright = '2015, Keystone Developers'
# Release notes are version independent
@@ -197,8 +197,8 @@ htmlhelp_basename = 'KeystoneReleaseNotesdoc'
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'KeystoneReleaseNotes.tex',
- u'Keystone Release Notes Documentation',
- u'Keystone Developers', 'manual'),
+ 'Keystone Release Notes Documentation',
+ 'Keystone Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -227,8 +227,8 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- ('index', 'keystonereleasenotes', u'Keystone Release Notes Documentation',
- [u'Keystone Developers'], 1)
+ ('index', 'keystonereleasenotes', 'Keystone Release Notes Documentation',
+ ['Keystone Developers'], 1)
]
# If true, show URL addresses after external links.
@@ -241,8 +241,8 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'KeystoneReleaseNotes', u'Keystone Release Notes Documentation',
- u'Keystone Developers', 'KeystoneReleaseNotes',
+ ('index', 'KeystoneReleaseNotes', 'Keystone Release Notes Documentation',
+ 'Keystone Developers', 'KeystoneReleaseNotes',
'Identity, Authentication and Access Management for OpenStack.',
'Miscellaneous'),
]
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 620554687..5f3317b2d 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -19,6 +19,8 @@
:maxdepth: 1
unreleased
+ 2023.1
+ zed
yoga
xena
wallaby
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 600d9e0b0..794225331 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,15 +1,16 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Keystone Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-01-08 19:54+0000\n"
+"POT-Creation-Date: 2022-09-07 16:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-12-19 01:35+0000\n"
+"PO-Revision-Date: 2022-09-05 10:32+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -81,6 +82,9 @@ msgstr "13.0.2"
msgid "13.0.3"
msgstr "13.0.3"
+msgid "13.0.4-9"
+msgstr "13.0.4-9"
+
msgid "14.0.0"
msgstr "14.0.0"
@@ -93,8 +97,8 @@ msgstr "14.1.0"
msgid "14.2.0"
msgstr "14.2.0"
-msgid "14.2.0-4"
-msgstr "14.2.0-4"
+msgid "14.2.0-7"
+msgstr "14.2.0-7"
msgid "15.0.0"
msgstr "15.0.0"
@@ -102,17 +106,50 @@ msgstr "15.0.0"
msgid "15.0.1"
msgstr "15.0.1"
+msgid "15.0.1-9"
+msgstr "15.0.1-9"
+
msgid "16.0.0"
msgstr "16.0.0"
msgid "16.0.1"
msgstr "16.0.1"
+msgid "16.0.2"
+msgstr "16.0.2"
+
+msgid "16.0.2-6"
+msgstr "16.0.2-6"
+
msgid "17.0.0"
msgstr "17.0.0"
-msgid "17.0.0-6"
-msgstr "17.0.0-6"
+msgid "17.0.1"
+msgstr "17.0.1"
+
+msgid "17.0.1-7"
+msgstr "17.0.1-7"
+
+msgid "18.0.0"
+msgstr "18.0.0"
+
+msgid "18.1.0"
+msgstr "18.1.0"
+
+msgid "18.1.0-3"
+msgstr "18.1.0-3"
+
+msgid "19.0.0"
+msgstr "19.0.0"
+
+msgid "19.0.1"
+msgstr "19.0.1"
+
+msgid "20.0.0"
+msgstr "20.0.0"
+
+msgid "21.0.0"
+msgstr "21.0.0"
msgid "8.0.1"
msgstr "8.0.1"
@@ -297,6 +334,15 @@ msgstr ""
"Certain variables in ``keystone.conf`` now have options, which determine if "
"the user's setting is valid."
+msgid ""
+"Change the min value of pool_retry_max to 1. Setting this value to 0 caused "
+"the pool to fail before connecting to ldap, always raising "
+"MaxConnectionReachedError."
+msgstr ""
+"Change the min value of pool_retry_max to 1. Setting this value to 0 caused "
+"the pool to fail before connecting to ldap, always raising "
+"MaxConnectionReachedError."
+
msgid "Configuring per-Identity Provider WebSSO is now supported."
msgstr "Configuring per-Identity Provider WebSSO is now supported."
@@ -306,6 +352,13 @@ msgstr "Critical Issues"
msgid "Current Series Release Notes"
msgstr "Current Series Release Notes"
+msgid ""
+"Data migrations are now included in the expand phase and the ``--migrate`` "
+"option is now a no-op. It may be removed in a future release."
+msgstr ""
+"Data migrations are now included in the expand phase and the ``--migrate`` "
+"option is now a no-op. It may be removed in a future release."
+
msgid "Deprecation Notes"
msgstr "Deprecation Notes"
@@ -463,6 +516,24 @@ msgstr ""
"this option is set back to `False`."
msgid ""
+"If you are affected by this bug, a fix in the keystone database will be "
+"needed so we recommend to dump the users' tables before doing this process:"
+msgstr ""
+"If you are affected by this bug, a fix in the keystone database will be "
+"needed so we recommend to dump the users' tables before doing this process:"
+
+msgid ""
+"If you are affected by this bug, you must remove stale role assignments "
+"manually. The following is an example SQL statement you can use to fix the "
+"issue, but you should verify it's applicability to your deployment's SQL "
+"implementation and version."
+msgstr ""
+"If you are affected by this bug, you must remove stale role assignments "
+"manually. The following is an example SQL statement you can use to fix the "
+"issue, but you should verify it's applicability to your deployment's SQL "
+"implementation and version."
+
+msgid ""
"In ``keystone-paste.ini``, using ``paste.filter_factory`` is deprecated in "
"favor of the \"use\" directive, specifying an entrypoint."
msgstr ""
@@ -681,6 +752,9 @@ msgstr ""
msgid "Queens Series Release Notes"
msgstr "Queens Series Release Notes"
+msgid "Rocky Series Release Notes"
+msgstr "Rocky Series Release Notes"
+
msgid ""
"Routes and SQL backends for the contrib extensions have been removed, they "
"have been incorporated into keystone and are no longer optional. This "
@@ -772,6 +846,9 @@ msgstr ""
"``validate_token(self, token_ref)``. If using a custom token provider, "
"update the custom provider accordingly."
+msgid "Stein Series Release Notes"
+msgstr "Stein Series Release Notes"
+
msgid ""
"Support for writing to LDAP has been removed. See ``Other Notes`` for more "
"details."
@@ -1398,6 +1475,9 @@ msgstr ""
msgid "Tokens can now be cached when issued."
msgstr "Tokens can now be cached when issued."
+msgid "Train Series Release Notes"
+msgstr "Train Series Release Notes"
+
msgid ""
"UUID token provider ``[token] provider=uuid`` has been deprecated in favor "
"of Fernet tokens ``[token] provider=fernet``. With Fernet tokens becoming "
@@ -1433,6 +1513,15 @@ msgstr ""
"Using the full path to the driver class is deprecated in favour of using the "
"entrypoint. In the Mitaka release, the entrypoint must be used."
+msgid "Ussuri Series Release Notes"
+msgstr "Ussuri Series Release Notes"
+
+msgid "Victoria Series Release Notes"
+msgstr "Victoria Series Release Notes"
+
+msgid "Wallaby Series Release Notes"
+msgstr "Wallaby Series Release Notes"
+
msgid ""
"We have added the ``password_expires_at`` attribute to the user response "
"object."
@@ -1454,6 +1543,12 @@ msgstr ""
"Write support for the LDAP has been removed in favour of read-only support. "
"The following operations are no longer supported for LDAP:"
+msgid "Xena Series Release Notes"
+msgstr "Xena Series Release Notes"
+
+msgid "Yoga Series Release Notes"
+msgstr "Yoga Series Release Notes"
+
msgid ""
"[`Bug 1645487 <https://bugs.launchpad.net/keystone/+bug/1645487>`_] Added a "
"new PCI-DSS feature that will require users to immediately change their "
@@ -1792,6 +1887,13 @@ msgstr "lt - password expires before the timestamp"
msgid "lte - password expires at or before timestamp"
msgstr "lte - password expires at or before timestamp"
+msgid ""
+"mysqldump -h <mysql host> -p -P <mysql port> -u keystone keystone "
+"federated_user local_user user > user_tables.sql"
+msgstr ""
+"mysqldump -h <mysql host> -p -P <mysql port> -u keystone keystone "
+"federated_user local_user user > user_tables.sql"
+
msgid "neq - password expires not at the timestamp"
msgstr "neq - password expires not at the timestamp"
diff --git a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
deleted file mode 100644
index ab596b94f..000000000
--- a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
+++ /dev/null
@@ -1,120 +0,0 @@
-# Gérald LONLAS <g.lonlas@gmail.com>, 2016. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: Keystone Release Notes\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-03-03 21:34+0000\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2016-10-22 05:03+0000\n"
-"Last-Translator: Gérald LONLAS <g.lonlas@gmail.com>\n"
-"Language-Team: French\n"
-"Language: fr\n"
-"X-Generator: Zanata 4.3.3\n"
-"Plural-Forms: nplurals=2; plural=(n > 1)\n"
-
-msgid "10.0.0"
-msgstr "10.0.0"
-
-msgid "8.0.1"
-msgstr "8.0.1"
-
-msgid "8.1.0"
-msgstr "8.1.0"
-
-msgid "9.0.0"
-msgstr "9.0.0"
-
-msgid "9.2.0"
-msgstr "9.2.0"
-
-msgid "Bug Fixes"
-msgstr "Corrections de bugs"
-
-msgid "Critical Issues"
-msgstr "Erreurs critiques"
-
-msgid "Current Series Release Notes"
-msgstr "Note de la release actuelle"
-
-msgid "Deprecation Notes"
-msgstr "Notes dépréciées "
-
-msgid "Keystone Release Notes"
-msgstr "Note de release de Keystone"
-
-msgid "Liberty Series Release Notes"
-msgstr "Note de release pour Liberty"
-
-msgid "Mitaka Series Release Notes"
-msgstr "Note de release pour Mitaka"
-
-msgid "New Features"
-msgstr "Nouvelles fonctionnalités"
-
-msgid "Newton Series Release Notes"
-msgstr "Note de release pour Newton"
-
-msgid "Other Notes"
-msgstr "Autres notes"
-
-msgid "Security Issues"
-msgstr "Problèmes de sécurités"
-
-msgid "Upgrade Notes"
-msgstr "Notes de mises à jours"
-
-msgid "``add user to group``"
-msgstr "``add user to group``"
-
-msgid "``create group``"
-msgstr "``create group``"
-
-msgid "``create user``"
-msgstr "``create user``"
-
-msgid "``delete group``"
-msgstr "``delete group``"
-
-msgid "``delete user``"
-msgstr "``delete user``"
-
-msgid "``keystone/common/cache/backends/memcache_pool``"
-msgstr "``keystone/common/cache/backends/memcache_pool``"
-
-msgid "``keystone/common/cache/backends/mongo``"
-msgstr "``keystone/common/cache/backends/mongo``"
-
-msgid "``keystone/common/cache/backends/noop``"
-msgstr "``keystone/common/cache/backends/noop``"
-
-msgid "``keystone/contrib/admin_crud``"
-msgstr "``keystone/contrib/admin_crud``"
-
-msgid "``keystone/contrib/endpoint_filter``"
-msgstr "``keystone/contrib/endpoint_filter``"
-
-msgid "``keystone/contrib/federation``"
-msgstr "``keystone/contrib/federation``"
-
-msgid "``keystone/contrib/oauth1``"
-msgstr "``keystone/contrib/oauth1``"
-
-msgid "``keystone/contrib/revoke``"
-msgstr "``keystone/contrib/revoke``"
-
-msgid "``keystone/contrib/simple_cert``"
-msgstr "``keystone/contrib/simple_cert``"
-
-msgid "``keystone/contrib/user_crud``"
-msgstr "``keystone/contrib/user_crud``"
-
-msgid "``remove user from group``"
-msgstr "``remove user from group``"
-
-msgid "``update group``"
-msgstr "``update group``"
-
-msgid "``update user``"
-msgstr "``update user``"
diff --git a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
deleted file mode 100644
index 8c91511ea..000000000
--- a/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po
+++ /dev/null
@@ -1,202 +0,0 @@
-# Sungjin Kang <gang.sungjin@gmail.com>, 2017. #zanata
-# Ian Y. Choi <ianyrchoi@gmail.com>, 2018. #zanata
-msgid ""
-msgstr ""
-"Project-Id-Version: Keystone Release Notes\n"
-"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-03-03 21:34+0000\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=UTF-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2018-02-14 07:39+0000\n"
-"Last-Translator: Ian Y. Choi <ianyrchoi@gmail.com>\n"
-"Language-Team: Korean (South Korea)\n"
-"Language: ko_KR\n"
-"X-Generator: Zanata 4.3.3\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-
-msgid "'/' and ',' are not allowed to be in a tag"
-msgstr "'/' 및 ',' 는 태그 내에서 허용하지 않습니다"
-
-msgid ""
-"**Experimental** - Domain specific configuration options can be stored in "
-"SQL instead of configuration files, using the new REST APIs."
-msgstr ""
-"**실험적 기능** - 도메인에 특화된 구성 옵션을 구성 파일 대신 SQL에 새로운 "
-"REST API를 사용하여 저장 가능합니다."
-
-msgid ""
-"**Experimental** - Keystone now supports tokenless authorization with X.509 "
-"SSL client certificate."
-msgstr ""
-"**실험적 기능** - Keystone이 이제 X.509 SSL 클라이언트 인증서를 사용한 토큰"
-"이 없는 인증 방식 (tokenless authorization)을 지원합니다."
-
-msgid "10.0.0"
-msgstr "10.0.0"
-
-msgid "10.0.1"
-msgstr "10.0.1"
-
-msgid "10.0.3"
-msgstr "10.0.3"
-
-msgid "11.0.0"
-msgstr "11.0.0"
-
-msgid "11.0.1"
-msgstr "11.0.1"
-
-msgid "11.0.3"
-msgstr "11.0.3"
-
-msgid "12.0.0"
-msgstr "12.0.0"
-
-msgid "8.0.1"
-msgstr "8.0.1"
-
-msgid "8.1.0"
-msgstr "8.1.0"
-
-msgid "9.0.0"
-msgstr "9.0.0"
-
-msgid "9.2.0"
-msgstr "9.2.0"
-
-msgid "Bug Fixes"
-msgstr "버그 수정"
-
-msgid "Critical Issues"
-msgstr "치명적인 이슈"
-
-msgid "Current Series Release Notes"
-msgstr "현재 시리즈 릴리즈 노트"
-
-msgid "Deprecation Notes"
-msgstr "지원 종료된 기능 노트"
-
-msgid ""
-"For additional details see: `event notifications <See https://docs.openstack."
-"org/developer/keystone/event_notifications.html>`_"
-msgstr ""
-"추가적으로 자세한 사항을 확인하려면 : `event notifications <See https://docs."
-"openstack.org/developer/keystone/event_notifications.html>`_"
-
-msgid "Keystone Release Notes"
-msgstr "Keystone 릴리즈 노트"
-
-msgid "Liberty Series Release Notes"
-msgstr "Liberty 시리즈 릴리즈 노트"
-
-msgid "Mitaka Series Release Notes"
-msgstr "Mitaka 시리즈 릴리즈 노트"
-
-msgid "New Features"
-msgstr "새로운 기능"
-
-msgid "Newton Series Release Notes"
-msgstr "Newton 시리즈 릴리즈 노트"
-
-msgid "Ocata Series Release Notes"
-msgstr "Ocata 시리즈 릴리즈 노트"
-
-msgid "Other Notes"
-msgstr "기타 기능"
-
-msgid "Pike Series Release Notes"
-msgstr "Pike 시리즈 릴리즈 노트"
-
-msgid "Queens Series Release Notes"
-msgstr "Queens 시리즈 릴리즈 노트"
-
-msgid "Security Issues"
-msgstr "보안 이슈"
-
-msgid "To::"
-msgstr "To::"
-
-msgid "Upgrade Notes"
-msgstr "업그레이드 노트"
-
-msgid "``add user to group``"
-msgstr "``add user to group``"
-
-msgid "``create group``"
-msgstr "``create group``"
-
-msgid "``create user``"
-msgstr "``create user``"
-
-msgid "``delete group``"
-msgstr "``delete group``"
-
-msgid "``delete user``"
-msgstr "``delete user``"
-
-msgid "``issue_v2_token``"
-msgstr "``issue_v2_token``"
-
-msgid "``issue_v3_token``"
-msgstr "``issue_v3_token``"
-
-msgid "``keystone.common.kvs.backends.inmemdb.MemoryBackend``"
-msgstr "``keystone.common.kvs.backends.inmemdb.MemoryBackend``"
-
-msgid "``keystone.common.kvs.backends.memcached.MemcachedBackend``"
-msgstr "``keystone.common.kvs.backends.memcached.MemcachedBackend``"
-
-msgid "``keystone.token.persistence.backends.kvs.Token``"
-msgstr "``keystone.token.persistence.backends.kvs.Token``"
-
-msgid "``keystone/common/cache/backends/memcache_pool``"
-msgstr "``keystone/common/cache/backends/memcache_pool``"
-
-msgid "``keystone/common/cache/backends/mongo``"
-msgstr "``keystone/common/cache/backends/mongo``"
-
-msgid "``keystone/common/cache/backends/noop``"
-msgstr "``keystone/common/cache/backends/noop``"
-
-msgid "``keystone/contrib/admin_crud``"
-msgstr "``keystone/contrib/admin_crud``"
-
-msgid "``keystone/contrib/endpoint_filter``"
-msgstr "``keystone/contrib/endpoint_filter``"
-
-msgid "``keystone/contrib/federation``"
-msgstr "``keystone/contrib/federation``"
-
-msgid "``keystone/contrib/oauth1``"
-msgstr "``keystone/contrib/oauth1``"
-
-msgid "``keystone/contrib/revoke``"
-msgstr "``keystone/contrib/revoke``"
-
-msgid "``keystone/contrib/simple_cert``"
-msgstr "``keystone/contrib/simple_cert``"
-
-msgid "``keystone/contrib/user_crud``"
-msgstr "``keystone/contrib/user_crud``"
-
-msgid "``remove user from group``"
-msgstr "``remove user from group``"
-
-msgid "``update group``"
-msgstr "``update group``"
-
-msgid "``update user``"
-msgstr "``update user``"
-
-msgid "``validate_non_persistent_token``"
-msgstr "``validate_non_persistent_token``"
-
-msgid "``validate_v2_token``"
-msgstr "``validate_v2_token``"
-
-msgid "``validate_v3_token``"
-msgstr "``validate_v3_token``"
-
-msgid "to::"
-msgstr "to::"
diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst
new file mode 100644
index 000000000..9608c05e4
--- /dev/null
+++ b/releasenotes/source/zed.rst
@@ -0,0 +1,6 @@
+========================
+Zed Series Release Notes
+========================
+
+.. release-notes::
+ :branch: stable/zed
diff --git a/requirements.txt b/requirements.txt
index c7e4605f3..5688af2ff 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,7 @@
+# Requirements lower bounds listed here are our best effort to keep them up to
+# date but we do not test them so no guarantee of having them all correct. If
+# you find any incorrect lower bounds, let us know or propose a fix.
+
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
diff --git a/setup.cfg b/setup.cfg
index c5d1f2a18..be6b602f7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,7 +6,7 @@ description_file =
author = OpenStack
author_email = openstack-discuss@lists.openstack.org
home_page = https://docs.openstack.org/keystone/latest
-python_requires = >=3.6
+python_requires = >=3.8
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -17,8 +17,8 @@ classifier =
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.6
- Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
[files]
data_files =
diff --git a/test-requirements.txt b/test-requirements.txt
index 0213085b8..1fca35803 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,11 +1,6 @@
-# The order of packages is significant, because pip processes them in the order
-# of appearance. Changing the order has an impact on the overall integration
-# process, which may cause wedges in the gate later.
-
-hacking>=3.0.1,<3.1.0 # Apache-2.0
-pep257==0.7.0 # MIT License
-flake8-docstrings==0.2.1.post1 # MIT
-bashate>=0.5.1 # Apache-2.0
+hacking~=4.1.0 # Apache-2.0
+flake8-docstrings~=1.6.0 # MIT
+bashate~=2.1.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
freezegun>=0.3.6 # Apache-2.0
pytz>=2013.6 # MIT
diff --git a/tox.ini b/tox.ini
index 402cea905..4a168a9e1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,30 +1,31 @@
[tox]
-minversion = 3.2.0
-envlist = py37,pep8,api-ref,docs,genconfig,genpolicy,releasenotes,protection
+minversion = 3.18.0
+envlist = py39,pep8,api-ref,docs,genconfig,genpolicy,releasenotes,protection
ignore_basepython_conflict = true
[testenv]
-usedevelop = True
basepython = python3
-setenv = VIRTUAL_ENV={envdir}
+usedevelop = True
+setenv =
+ PYTHONDONTWRITEBYTECODE=1
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/test-requirements.txt
.[ldap,memcache,mongodb]
commands =
find keystone -type f -name "*.pyc" -delete
stestr run {posargs}
-whitelist_externals =
+allowlist_externals =
bash
find
-passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY PBR_VERSION
+passenv = http_proxy,HTTP_PROXY,https_proxy,HTTPS_PROXY,no_proxy,NO_PROXY,PBR_VERSION
[testenv:pep8]
deps =
.[bandit]
{[testenv]deps}
commands =
- flake8 --ignore=D100,D101,D102,D103,D104,E305,E402,W503,W504,W605
+ flake8
# Run bash8 during pep8 runs to ensure violations are caught by
# the check and gate queues
bashate devstack/plugin.sh
@@ -42,7 +43,7 @@ passenv = FAST8_NUM_COMMITS
# NOTE(browne): This is required for the integration test job of the bandit
# project. Please do not remove.
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
.[bandit]
commands = bandit -r keystone -x 'keystone/tests/*'
@@ -71,36 +72,18 @@ commands = {posargs}
commands =
find keystone -type f -name "*.pyc" -delete
oslo_debug_helper {posargs}
-passenv =
- KSTEST_ADMIN_URL
- KSTEST_ADMIN_USERNAME
- KSTEST_ADMIN_PASSWORD
- KSTEST_ADMIN_DOMAIN_ID
- KSTEST_PUBLIC_URL
- KSTEST_USER_USERNAME
- KSTEST_USER_PASSWORD
- KSTEST_USER_DOMAIN_ID
- KSTEST_PROJECT_ID
+passenv = KSTEST_*
[testenv:functional]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/test-requirements.txt
setenv = OS_TEST_PATH=./keystone/tests/functional
commands =
find keystone -type f -name "*.pyc" -delete
stestr run {posargs}
stestr slowest
-passenv =
- KSTEST_ADMIN_URL
- KSTEST_ADMIN_USERNAME
- KSTEST_ADMIN_PASSWORD
- KSTEST_ADMIN_DOMAIN_ID
- KSTEST_PUBLIC_URL
- KSTEST_USER_USERNAME
- KSTEST_USER_PASSWORD
- KSTEST_USER_DOMAIN_ID
- KSTEST_PROJECT_ID
+passenv = KSTEST_*
[flake8]
filename= *.py,keystone-manage
@@ -111,18 +94,28 @@ enable-extensions = H203,H904
# D102: Missing docstring in public method
# D103: Missing docstring in public function
# D104: Missing docstring in public package
+# D106: Missing docstring in public nested class
+# D107: Missing docstring in __init__
# D203: 1 blank line required before class docstring (deprecated in pep257)
+# D401: First line should be in imperative mood; try rephrasing
# TODO(wxy): Fix the pep8 issue.
+# E305:
# E402: module level import not at top of file
+# H211: Use assert{Is,IsNot}instance
+# H214: Use assertIn/NotIn(A, B) rather than assertTrue/False(A in/not in B) when checking collection contents.
# W503: line break before binary operator
-# W504 line break after binary operator
-ignore = D100,D101,D102,D103,D104,D203,E402,W503,W504
-exclude=.venv,.git,.tox,build,dist,*lib/python*,*egg,tools,vendor,.update-venv,*.ini,*.po,*.pot
-max-complexity=24
+# W504: line break after binary operator
+# W605:
+ignore = D100,D101,D102,D103,D104,D106,D107,D203,D401,E305,E402,H211,H214,W503,W504,W605
+exclude = .venv,.git,.tox,build,dist,*lib/python*,*egg,tools,vendor,.update-venv,*.ini,*.po,*.pot
+max-complexity = 24
+per-file-ignores =
+# URL lines too long
+ keystone/common/password_hashing.py: E501
[testenv:docs]
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/doc/requirements.txt
.[ldap,memcache,mongodb]
commands=
@@ -139,7 +132,7 @@ commands=
[testenv:pdf-docs]
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
make
mkdir
rm
@@ -170,7 +163,6 @@ commands = oslopolicy-sample-generator --config-file config-generator/keystone-p
[hacking]
import_exceptions =
keystone.i18n
- six.moves
[flake8:local-plugins]
extension =
@@ -188,12 +180,6 @@ paths = ./keystone/tests/hacking
deps = bindep
commands = bindep test
-[testenv:lower-constraints]
-deps =
- -c{toxinidir}/lower-constraints.txt
- -r{toxinidir}/test-requirements.txt
- .[ldap,memcache,mongodb]
-
[testenv:protection]
commands =
find keystone -type f -name "*.pyc" -delete