summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml19
-rw-r--r--README.rst2
-rw-r--r--api-ref/source/v3/authenticate-v3.inc2
-rw-r--r--api-ref/source/v3/roles.inc2
-rw-r--r--api-ref/source/v3/unified_limits.inc4
-rw-r--r--bindep.txt4
-rw-r--r--doc/source/admin/auth-totp.rst4
-rw-r--r--doc/source/admin/bootstrap.rst10
-rw-r--r--doc/source/admin/domain-specific-config.inc6
-rw-r--r--doc/source/admin/service-api-protection.rst2
-rw-r--r--doc/source/admin/unified-limits.rst2
-rw-r--r--doc/source/conf.py32
-rw-r--r--doc/source/contributor/api_change_tutorial.rst13
-rw-r--r--doc/source/contributor/database-migrations.rst21
-rw-r--r--doc/source/contributor/how-can-i-help.rst2
-rw-r--r--doc/source/contributor/programming-exercises.rst6
-rw-r--r--doc/source/contributor/services.rst2
-rw-r--r--doc/source/contributor/testing-keystone.rst12
-rw-r--r--doc/source/getting-started/community.rst6
-rw-r--r--doc/source/install/index-obs.rst8
-rw-r--r--doc/source/install/index-rdo.rst8
-rw-r--r--doc/source/install/index-ubuntu.rst8
-rw-r--r--doc/source/user/application_credentials.rst43
-rw-r--r--keystone/api/s3tokens.py5
-rw-r--r--keystone/api/users.py4
-rw-r--r--keystone/cmd/cli.py178
-rw-r--r--keystone/common/cache/core.py4
-rw-r--r--keystone/common/fernet_utils.py4
-rw-r--r--keystone/common/policies/application_credential.py37
-rw-r--r--keystone/common/policies/consumer.py47
-rw-r--r--keystone/common/policies/credential.py30
-rw-r--r--keystone/common/policies/domain.py40
-rw-r--r--keystone/common/policies/domain_config.py34
-rw-r--r--keystone/common/policies/ec2_credential.py45
-rw-r--r--keystone/common/policies/endpoint.py38
-rw-r--r--keystone/common/policies/endpoint_group.py75
-rw-r--r--keystone/common/policies/grant.py87
-rw-r--r--keystone/common/policies/group.py80
-rw-r--r--keystone/common/policies/identity_provider.py51
-rw-r--r--keystone/common/policies/implied_role.py46
-rw-r--r--keystone/common/policies/mapping.py45
-rw-r--r--keystone/common/policies/policy.py38
-rw-r--r--keystone/common/policies/policy_association.py75
-rw-r--r--keystone/common/policies/project.py104
-rw-r--r--keystone/common/policies/project_endpoint.py46
-rw-r--r--keystone/common/policies/protocol.py49
-rw-r--r--keystone/common/policies/region.py31
-rw-r--r--keystone/common/policies/role.py87
-rw-r--r--keystone/common/policies/role_assignment.py23
-rw-r--r--keystone/common/policies/service.py47
-rw-r--r--keystone/common/policies/service_provider.py47
-rw-r--r--keystone/common/policies/token.py24
-rw-r--r--keystone/common/policies/trust.py48
-rw-r--r--keystone/common/policies/user.py40
-rw-r--r--keystone/common/sql/alembic.ini100
-rw-r--r--keystone/common/sql/contract_repo/README4
-rw-r--r--keystone/common/sql/contract_repo/versions/002_password_created_at_not_nullable.py39
-rw-r--r--keystone/common/sql/contract_repo/versions/003_remove_unencrypted_blob_column_from_credential.py60
-rw-r--r--keystone/common/sql/contract_repo/versions/004_reset_password_created_at.py37
-rw-r--r--keystone/common/sql/contract_repo/versions/005_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/006_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/007_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/008_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/009_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/010_contract_add_revocation_event_index.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/011_contract_user_id_unique_for_nonlocal_user.py23
-rw-r--r--keystone/common/sql/contract_repo/versions/012_contract_add_domain_id_to_idp.py38
-rw-r--r--keystone/common/sql/contract_repo/versions/013_contract_protocol_cascade_delete_for_federated_user.py31
-rw-r--r--keystone/common/sql/contract_repo/versions/014_contract_add_domain_id_to_user_table.py94
-rw-r--r--keystone/common/sql/contract_repo/versions/015_contract_update_federated_user_domain.py34
-rw-r--r--keystone/common/sql/contract_repo/versions/016_contract_add_user_options.py16
-rw-r--r--keystone/common/sql/contract_repo/versions/017_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/018_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/019_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/020_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/021_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/022_contract_add_default_project_id_index.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/023_contract_add_second_password_column_for_expanded_hash_sizes.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/024_contract_create_created_at_int_columns.py61
-rw-r--r--keystone/common/sql/contract_repo/versions/025_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/026_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/027_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/028_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/029_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/030_contract_add_project_tags_table.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/031_contract_system_assignment_table.py16
-rw-r--r--keystone/common/sql/contract_repo/versions/032_contract_add_expired_at_int_to_trust.py51
-rw-r--r--keystone/common/sql/contract_repo/versions/033_contract_add_limits_tables.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/034_contract_add_application_credentials_table.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/035_contract_add_system_column_to_application_credential_table.py23
-rw-r--r--keystone/common/sql/contract_repo/versions/036_contract_rename_application_credential_restriction_column.py40
-rw-r--r--keystone/common/sql/contract_repo/versions/037_contract_remove_service_and_region_fk_for_registered_limit.py36
-rw-r--r--keystone/common/sql/contract_repo/versions/038_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/039_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/040_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/041_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/042_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/043_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/044_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/045_contract_add_description_to_limit.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/046_contract_old_password_data_to_password_hash_column.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/047_contract_expand_update_pk_for_unified_limit.py63
-rw-r--r--keystone/common/sql/contract_repo/versions/048_contract_add_registered_limit_id_column_for_limit.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/049_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/050_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/051_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/052_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/053_contract_add_role_description_to_role_table.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/055_contract_add_domain_to_limit.py21
-rw-r--r--keystone/common/sql/contract_repo/versions/056_contract_add_application_credential_access_rules.py17
-rw-r--r--keystone/common/sql/contract_repo/versions/057_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/058_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/059_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/060_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/061_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/062_contract_extract_redelegation_data_from_extras.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/063_contract_drop_limit_columns.py23
-rw-r--r--keystone/common/sql/contract_repo/versions/064_contract_add_remote_id_attribute_to_federation_protocol_table.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/065_contract_add_user_external_id_to_access_rule.py15
-rw-r--r--keystone/common/sql/contract_repo/versions/066_contract_add_resource_options_table.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/067_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/068_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/069_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/070_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/071_placeholder.py18
-rw-r--r--keystone/common/sql/contract_repo/versions/072_contract_drop_domain_id_fk.py47
-rw-r--r--keystone/common/sql/contract_repo/versions/073_contract_expiring_group_membership.py15
-rw-r--r--keystone/common/sql/core.py10
-rw-r--r--keystone/common/sql/data_migration_repo/README4
-rw-r--r--keystone/common/sql/data_migration_repo/versions/002_password_created_at_not_nullable.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/003_migrate_unencrypted_credentials.py39
-rw-r--r--keystone/common/sql/data_migration_repo/versions/004_reset_password_created_at.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/005_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/006_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/007_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/008_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/009_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/010_migrate_add_revocation_event_index.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/012_migrate_add_domain_id_to_idp.py55
-rw-r--r--keystone/common/sql/data_migration_repo/versions/013_migrate_protocol_cascade_delete_for_federated_user.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/014_migrate_add_domain_id_to_user_table.py45
-rw-r--r--keystone/common/sql/data_migration_repo/versions/015_migrate_update_federated_user_domain.py36
-rw-r--r--keystone/common/sql/data_migration_repo/versions/016_migrate_add_user_options.py16
-rw-r--r--keystone/common/sql/data_migration_repo/versions/017_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/018_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/019_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/020_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/021_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/022_migrate_add_default_project_id_index.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/023_migrate_add_second_password_column_for_expanded_hash_sizes.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/024_migrate_create_created_at_int_columns.py22
-rw-r--r--keystone/common/sql/data_migration_repo/versions/025_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/026_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/027_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/028_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/029_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/030_migrate_add_project_tags_table.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/031_migrate_system_assignment_table.py17
-rw-r--r--keystone/common/sql/data_migration_repo/versions/032_migrate_add_expired_at_int_to_trust.py22
-rw-r--r--keystone/common/sql/data_migration_repo/versions/033_migrate_add_limits_tables.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/034_migrate_add_application_credentials_table.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/035_migrate_add_system_column_to_application_credential_table.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/036_migrate_rename_application_credential_restriction_column.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/037_migrate_remove_service_and_region_fk_for_registered_limit.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/038_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/039_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/040_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/041_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/042_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/043_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/044_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/045_migrate_add_description_to_limit.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/046_migrate_old_password_data_to_password_hash_column.py26
-rw-r--r--keystone/common/sql/data_migration_repo/versions/047_migrate_update_pk_for_unified_limit.py37
-rw-r--r--keystone/common/sql/data_migration_repo/versions/048_migrate_add_registered_limit_id_column_for_limit.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/049_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/050_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/051_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/052_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/053_migrate_add_role_description_to_role_table.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/054_migrate_drop_old_passoword_column.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/055_migrate_add_domain_to_limit.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/056_migrate_add_application_credential_access_rules.py17
-rw-r--r--keystone/common/sql/data_migration_repo/versions/057_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/058_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/059_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/060_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/061_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/062_migrate_extract_redelegation_data_from_extras.py43
-rw-r--r--keystone/common/sql/data_migration_repo/versions/063_migrate_drop_limit_columns.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/064_migrate_add_remote_id_attribute_to_federation_protocol_table.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/065_migrate_add_user_external_id_to_access_rule.py15
-rw-r--r--keystone/common/sql/data_migration_repo/versions/066_migrate_add_resource_options_table.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/067_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/068_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/069_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/070_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/071_placeholder.py18
-rw-r--r--keystone/common/sql/data_migration_repo/versions/072_migrate_drop_domain_id_fk.py20
-rw-r--r--keystone/common/sql/data_migration_repo/versions/073_migrate_expiring_group_membership.py15
-rw-r--r--keystone/common/sql/expand_repo/README4
-rw-r--r--keystone/common/sql/expand_repo/versions/002_password_created_at_not_nullable.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/003_add_key_hash_and_encrypted_blob_to_credential.py129
-rw-r--r--keystone/common/sql/expand_repo/versions/004_reset_password_created_at.py15
-rw-r--r--keystone/common/sql/expand_repo/versions/005_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/006_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/007_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/008_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/009_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/010_expand_add_revocation_event_index.py31
-rw-r--r--keystone/common/sql/expand_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py15
-rw-r--r--keystone/common/sql/expand_repo/versions/012_expand_add_domain_id_to_idp.py73
-rw-r--r--keystone/common/sql/expand_repo/versions/013_expand_protocol_cascade_delete_for_federated_user.py15
-rw-r--r--keystone/common/sql/expand_repo/versions/014_expand_add_domain_id_to_user_table.py165
-rw-r--r--keystone/common/sql/expand_repo/versions/015_expand_update_federated_user_domain.py69
-rw-r--r--keystone/common/sql/expand_repo/versions/016_expand_add_user_options.py34
-rw-r--r--keystone/common/sql/expand_repo/versions/017_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/018_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/019_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/020_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/021_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/022_expand_add_default_project_id_index.py21
-rw-r--r--keystone/common/sql/expand_repo/versions/023_expand_add_second_password_column_for_expanded_hash_sizes.py25
-rw-r--r--keystone/common/sql/expand_repo/versions/024_expand_create_created_at_int_columns.py33
-rw-r--r--keystone/common/sql/expand_repo/versions/025_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/026_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/027_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/028_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/029_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/030_expand_add_project_tags_table.py44
-rw-r--r--keystone/common/sql/expand_repo/versions/031_expand_system_assignment_table.py33
-rw-r--r--keystone/common/sql/expand_repo/versions/032_expand_add_expired_at_int_to_trust.py35
-rw-r--r--keystone/common/sql/expand_repo/versions/033_expand_add_limits_tables.py68
-rw-r--r--keystone/common/sql/expand_repo/versions/034_expand_add_application_credential_table.py52
-rw-r--r--keystone/common/sql/expand_repo/versions/036_expand_rename_application_credential_restriction_column.py44
-rw-r--r--keystone/common/sql/expand_repo/versions/037_expand_remove_service_and_region_fk_for_registered_limit.py15
-rw-r--r--keystone/common/sql/expand_repo/versions/038_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/039_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/040_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/041_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/042_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/043_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/044_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/045_expand_add_description_to_limit.py29
-rw-r--r--keystone/common/sql/expand_repo/versions/046_expand_old_password_data_to_password_hash_column.py15
-rw-r--r--keystone/common/sql/expand_repo/versions/047_expand_update_pk_for_unified_limit.py103
-rw-r--r--keystone/common/sql/expand_repo/versions/048_expand_add_registered_limit_id_column_for_limit.py40
-rw-r--r--keystone/common/sql/expand_repo/versions/049_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/050_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/051_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/052_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/053_expand_add_role_description_to_role_table.py23
-rw-r--r--keystone/common/sql/expand_repo/versions/054_expand_drop_old_passoword_column.py15
-rw-r--r--keystone/common/sql/expand_repo/versions/055_expand_add_domain_to_limit.py34
-rw-r--r--keystone/common/sql/expand_repo/versions/056_expand_add_application_credential_access_rules.py45
-rw-r--r--keystone/common/sql/expand_repo/versions/057_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/058_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/059_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/060_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/061_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/062_expand_extract_redelegation_data_from_extras.py31
-rw-r--r--keystone/common/sql/expand_repo/versions/063_expand_drop_limit_columns.py15
-rw-r--r--keystone/common/sql/expand_repo/versions/064_expand_add_remote_id_attribute_to_federation_protocol_table.py23
-rw-r--r--keystone/common/sql/expand_repo/versions/065_expand_add_user_external_id_to_access_rule.py39
-rw-r--r--keystone/common/sql/expand_repo/versions/066_expand_add_role_and_project_option_tables.py51
-rw-r--r--keystone/common/sql/expand_repo/versions/067_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/068_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/069_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/070_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/071_placeholder.py18
-rw-r--r--keystone/common/sql/expand_repo/versions/072_expand_drop_domain_id_fk.py20
-rw-r--r--keystone/common/sql/expand_repo/versions/073_expand_expiring_group_membership.py47
-rw-r--r--keystone/common/sql/legacy_migrations/__init__.py (renamed from keystone/common/sql/contract_repo/__init__.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/README.rst13
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/__init__.py (renamed from keystone/common/sql/contract_repo/versions/__init__.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/manage.py (renamed from keystone/common/sql/contract_repo/manage.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/migrate.cfg (renamed from keystone/common/sql/contract_repo/migrate.cfg)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/073_contract_initial_migration.py (renamed from keystone/common/sql/contract_repo/versions/001_contract_initial_null_migration.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/074_placeholder.py (renamed from keystone/common/sql/contract_repo/versions/074_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/075_placeholder.py (renamed from keystone/common/sql/contract_repo/versions/075_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/076_placeholder.py (renamed from keystone/common/sql/contract_repo/versions/076_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/077_placeholder.py (renamed from keystone/common/sql/contract_repo/versions/077_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/078_placeholder.py (renamed from keystone/common/sql/contract_repo/versions/078_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/079_contract_update_local_id_limit.py (renamed from keystone/common/sql/data_migration_repo/versions/074_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/contract_repo/versions/__init__.py (renamed from keystone/common/sql/data_migration_repo/__init__.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/README.rst13
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/__init__.py (renamed from keystone/common/sql/data_migration_repo/versions/__init__.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/manage.py (renamed from keystone/common/sql/data_migration_repo/manage.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/migrate.cfg (renamed from keystone/common/sql/data_migration_repo/migrate.cfg)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/073_migrate_initial_migration.py (renamed from keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py)34
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/074_placeholder.py (renamed from keystone/common/sql/expand_repo/versions/074_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/075_placeholder.py (renamed from keystone/common/sql/data_migration_repo/versions/075_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/076_placeholder.py (renamed from keystone/common/sql/data_migration_repo/versions/076_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/077_placeholder.py (renamed from keystone/common/sql/data_migration_repo/versions/077_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/078_placeholder.py (renamed from keystone/common/sql/data_migration_repo/versions/078_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/079_migrate_update_local_id_limit.py (renamed from keystone/common/sql/expand_repo/versions/075_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/data_migration_repo/versions/__init__.py (renamed from keystone/common/sql/migrate_repo/__init__.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/README.rst13
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/__init__.py (renamed from keystone/common/sql/expand_repo/__init__.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/manage.py (renamed from keystone/common/sql/expand_repo/manage.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/migrate.cfg (renamed from keystone/common/sql/expand_repo/migrate.cfg)0
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/073_expand_initial_migration.py1183
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/074_placeholder.py (renamed from keystone/common/sql/expand_repo/versions/076_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/075_placeholder.py (renamed from keystone/common/sql/expand_repo/versions/077_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/076_placeholder.py (renamed from keystone/common/sql/expand_repo/versions/078_placeholder.py)0
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/077_placeholder.py18
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/078_placeholder.py18
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/079_expand_update_local_id_limit.py (renamed from keystone/common/sql/contract_repo/versions/054_contract_drop_old_passoword_column.py)7
-rw-r--r--keystone/common/sql/legacy_migrations/expand_repo/versions/__init__.py (renamed from keystone/common/sql/expand_repo/versions/__init__.py)0
-rw-r--r--keystone/common/sql/migrate_repo/README4
-rw-r--r--keystone/common/sql/migrate_repo/manage.py18
-rw-r--r--keystone/common/sql/migrate_repo/migrate.cfg25
-rw-r--r--keystone/common/sql/migrate_repo/versions/067_kilo.py317
-rw-r--r--keystone/common/sql/migrate_repo/versions/068_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/069_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/070_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/071_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/072_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py113
-rw-r--r--keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py27
-rw-r--r--keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py29
-rw-r--r--keystone/common/sql/migrate_repo/versions/076_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/077_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/078_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/079_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/080_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py54
-rw-r--r--keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py97
-rw-r--r--keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py75
-rw-r--r--keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py55
-rw-r--r--keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py70
-rw-r--r--keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py26
-rw-r--r--keystone/common/sql/migrate_repo/versions/087_implied_roles.py43
-rw-r--r--keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py60
-rw-r--r--keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py42
-rw-r--r--keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py84
-rw-r--r--keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py46
-rw-r--r--keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py125
-rw-r--r--keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py45
-rw-r--r--keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py62
-rw-r--r--keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py50
-rw-r--r--keystone/common/sql/migrate_repo/versions/097_drop_user_name_domainid_constraint.py67
-rw-r--r--keystone/common/sql/migrate_repo/versions/098_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/099_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/100_placeholder.py18
-rw-r--r--keystone/common/sql/migrate_repo/versions/101_drop_role_name_constraint.py53
-rw-r--r--keystone/common/sql/migrate_repo/versions/102_drop_domain_table.py21
-rw-r--r--keystone/common/sql/migrate_repo/versions/103_add_nonlocal_user_table.py32
-rw-r--r--keystone/common/sql/migrate_repo/versions/104_drop_user_name_domainid_constraint.py71
-rw-r--r--keystone/common/sql/migrate_repo/versions/105_add_password_date_columns.py30
-rw-r--r--keystone/common/sql/migrate_repo/versions/106_allow_password_column_to_be_nullable.py21
-rw-r--r--keystone/common/sql/migrate_repo/versions/107_add_user_date_columns.py30
-rw-r--r--keystone/common/sql/migrate_repo/versions/108_add_failed_auth_columns.py26
-rw-r--r--keystone/common/sql/migrate_repo/versions/109_add_password_self_service_column.py24
-rw-r--r--keystone/common/sql/migrate_repo/versions/__init__.py0
-rw-r--r--keystone/common/sql/migrations/README.rst15
-rw-r--r--keystone/common/sql/migrations/env.py80
-rw-r--r--keystone/common/sql/migrations/script.py.mako (renamed from keystone/common/sql/expand_repo/versions/035_expand_add_system_column_to_application_credential_table.py)27
-rw-r--r--keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py1106
-rw-r--r--keystone/common/sql/migrations/versions/CONTRACT_HEAD1
-rw-r--r--keystone/common/sql/migrations/versions/EXPAND_HEAD1
-rw-r--r--keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py (renamed from keystone/common/sql/expand_repo/versions/001_expand_initial_null_migration.py)17
-rw-r--r--keystone/common/sql/migrations/versions/yoga/expand/29e87d24a316_initial.py (renamed from keystone/common/sql/data_migration_repo/versions/001_data_initial_null_migration.py)17
-rw-r--r--keystone/common/sql/upgrades.py372
-rw-r--r--keystone/common/utils.py11
-rw-r--r--keystone/conf/ldap.py8
-rw-r--r--keystone/conf/memcache.py26
-rw-r--r--keystone/credential/providers/fernet/core.py2
-rw-r--r--keystone/federation/idp.py12
-rw-r--r--keystone/identity/backends/ldap/common.py19
-rw-r--r--keystone/identity/mapping_backends/sql.py2
-rw-r--r--keystone/identity/shadow_backends/sql.py3
-rw-r--r--keystone/locale/de/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/en_GB/LC_MESSAGES/keystone.po27
-rw-r--r--keystone/locale/es/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/fr/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/it/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ja/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ko_KR/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/pt_BR/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/ru/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/locale/zh_CN/LC_MESSAGES/keystone.po21
-rw-r--r--keystone/locale/zh_TW/LC_MESSAGES/keystone.po5
-rw-r--r--keystone/notifications.py2
-rw-r--r--keystone/server/flask/application.py4
-rw-r--r--keystone/tests/unit/assignment/test_backends.py6
-rw-r--r--keystone/tests/unit/base_classes.py9
-rw-r--r--keystone/tests/unit/catalog/test_backends.py42
-rw-r--r--keystone/tests/unit/common/sql/test_upgrades.py252
-rw-r--r--keystone/tests/unit/common/test_notifications.py4
-rw-r--r--keystone/tests/unit/config_files/backend_ldap_sql.conf2
-rw-r--r--keystone/tests/unit/config_files/backend_multi_ldap_sql.conf2
-rw-r--r--keystone/tests/unit/config_files/backend_sql.conf2
-rw-r--r--keystone/tests/unit/config_files/deprecated.conf8
-rw-r--r--keystone/tests/unit/config_files/deprecated_override.conf15
-rw-r--r--keystone/tests/unit/contrib/federation/test_utils.py6
-rw-r--r--keystone/tests/unit/core.py25
-rw-r--r--keystone/tests/unit/endpoint_policy/backends/test_base.py2
-rw-r--r--keystone/tests/unit/identity/shadow_users/test_backend.py4
-rw-r--r--keystone/tests/unit/identity/test_backend_sql.py10
-rw-r--r--keystone/tests/unit/identity/test_backends.py36
-rw-r--r--keystone/tests/unit/ksfixtures/__init__.py2
-rw-r--r--keystone/tests/unit/ksfixtures/logging.py114
-rw-r--r--keystone/tests/unit/ksfixtures/warnings.py79
-rw-r--r--keystone/tests/unit/policy/backends/test_base.py2
-rw-r--r--keystone/tests/unit/resource/test_backends.py45
-rw-r--r--keystone/tests/unit/test_associate_project_endpoint_extension.py8
-rw-r--r--keystone/tests/unit/test_backend_id_mapping_sql.py28
-rw-r--r--keystone/tests/unit/test_backend_ldap.py52
-rw-r--r--keystone/tests/unit/test_backend_sql.py4
-rw-r--r--keystone/tests/unit/test_backend_templated.py4
-rw-r--r--keystone/tests/unit/test_cli.py32
-rw-r--r--keystone/tests/unit/test_config.py32
-rw-r--r--keystone/tests/unit/test_contrib_s3_core.py82
-rw-r--r--keystone/tests/unit/test_hacking_checks.py2
-rw-r--r--keystone/tests/unit/test_policy.py4
-rw-r--r--keystone/tests/unit/test_sql_banned_operations.py103
-rw-r--r--keystone/tests/unit/test_sql_upgrade.py3421
-rw-r--r--keystone/tests/unit/test_v3.py2
-rw-r--r--keystone/tests/unit/test_v3_assignment.py2
-rw-r--r--keystone/tests/unit/test_v3_federation.py12
-rw-r--r--keystone/trust/backends/base.py2
-rw-r--r--keystone/trust/backends/sql.py6
-rw-r--r--keystone/trust/core.py18
-rw-r--r--lower-constraints.txt68
-rw-r--r--playbooks/enable-fips.yaml4
-rw-r--r--releasenotes/notes/bug-1688137-e4203c9a728690a7.yaml8
-rw-r--r--releasenotes/notes/bug-1897280-e7065c4368a325ad.yaml7
-rw-r--r--releasenotes/notes/bug-1929066-6e741c9182620a37.yaml7
-rw-r--r--releasenotes/notes/bug-1941020-f694395a9bcea72f.yaml11
-rw-r--r--releasenotes/notes/change_min_pool_retry_max-f5e7c8d315401426.yaml6
-rw-r--r--releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml5
-rw-r--r--releasenotes/notes/remove-db_sync-extension-opt-2ab1f29340281215.yaml6
-rw-r--r--releasenotes/notes/remove-legacy-migrations-647f60019c8dd9e8.yaml7
-rw-r--r--releasenotes/source/index.rst4
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po98
-rw-r--r--releasenotes/source/victoria.rst6
-rw-r--r--releasenotes/source/wallaby.rst6
-rw-r--r--releasenotes/source/xena.rst6
-rw-r--r--releasenotes/source/yoga.rst6
-rw-r--r--requirements.txt6
-rw-r--r--setup.cfg12
-rwxr-xr-xtools/generate-schemas134
-rw-r--r--tox.ini63
445 files changed, 5175 insertions, 11967 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 9e39b771e..ef9782f4c 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -33,6 +33,16 @@
USE_PYTHON3: True
- job:
+ name: keystone-dsvm-py3-functional-fips
+ parent: keystone-dsvm-py3-functional
+ nodeset: devstack-single-node-centos-9-stream
+ description: |
+ Functional testing for a FIPS enabled Centos 9 system
+ pre-run: playbooks/enable-fips.yaml
+ vars:
+ nslookup_target: 'opendev.org'
+
+- job:
name: keystone-dsvm-functional-federation-opensuse15
parent: keystone-dsvm-functional
nodeset: devstack-single-node-opensuse-15
@@ -195,13 +205,13 @@
- project:
templates:
- openstack-cover-jobs
- - openstack-python3-victoria-jobs
+ - openstack-python3-zed-jobs
- publish-openstack-docs-pti
- periodic-stable-jobs
- check-requirements
- integrated-gate-py3
- release-notes-jobs-python3
- - openstack-python3-wallaby-jobs-arm64
+ - openstack-python3-xena-jobs-arm64
check:
jobs:
- keystone-dsvm-py3-functional:
@@ -212,6 +222,9 @@
- ^etc/.*$
- ^keystone/tests/unit/.*$
- ^releasenotes/.*$
+ - keystone-dsvm-py3-functional-fips:
+ voting: false
+ irrelevant-files: *irrelevant-files
- keystone-dsvm-py3-functional-federation-ubuntu-focal:
voting: false
irrelevant-files: *irrelevant-files
@@ -260,8 +273,6 @@
irrelevant-files: *irrelevant-files
- tempest-pg-full:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-py3-opensuse15:
- irrelevant-files: *tempest-irrelevant-files
- keystone-dsvm-functional-federation-centos7:
irrelevant-files: *irrelevant-files
- keystone-dsvm-functional-federation-ubuntu-xenial:
diff --git a/README.rst b/README.rst
index 2a19ff51c..520a71e4e 100644
--- a/README.rst
+++ b/README.rst
@@ -49,7 +49,7 @@ Future design work is tracked at:
https://specs.openstack.org/openstack/keystone-specs
-Contributors are encouraged to join IRC (``#openstack-keystone`` on freenode):
+Contributors are encouraged to join IRC (``#openstack-keystone`` on OFTC):
https://wiki.openstack.org/wiki/IRC
diff --git a/api-ref/source/v3/authenticate-v3.inc b/api-ref/source/v3/authenticate-v3.inc
index 11f19cbb4..d69972aa9 100644
--- a/api-ref/source/v3/authenticate-v3.inc
+++ b/api-ref/source/v3/authenticate-v3.inc
@@ -965,7 +965,7 @@ Status Codes
.. rest_status_code:: success status.yaml
- - 201
+ - 204
.. rest_status_code:: error status.yaml
diff --git a/api-ref/source/v3/roles.inc b/api-ref/source/v3/roles.inc
index 3073e241d..80092ec82 100644
--- a/api-ref/source/v3/roles.inc
+++ b/api-ref/source/v3/roles.inc
@@ -1002,7 +1002,7 @@ Status Codes
.. rest_status_code:: success status.yaml
- - 201
+ - 204
.. rest_status_code:: error status.yaml
diff --git a/api-ref/source/v3/unified_limits.inc b/api-ref/source/v3/unified_limits.inc
index ce32a0f1c..bdb1d1959 100644
--- a/api-ref/source/v3/unified_limits.inc
+++ b/api-ref/source/v3/unified_limits.inc
@@ -614,8 +614,8 @@ Example
:language: javascript
-Delete Registered Limit
-=======================
+Delete Limit
+============
.. rest_method:: DELETE /v3/limits/{limit_id}
diff --git a/bindep.txt b/bindep.txt
index 9ed75e0e0..efa6c067b 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -29,9 +29,7 @@ openldap-devel [platform:redhat]
openssl-devel [platform:rpm]
postgresql-devel [platform:rpm]
postgresql-server [platform:rpm]
-python2-devel [platform:rpm]
-python3-devel [platform:fedora]
-python34-devel [platform:centos]
+python3-devel [platform:rpm]
libmariadb-devel [platform:suse]
openldap2-devel [platform:suse]
diff --git a/doc/source/admin/auth-totp.rst b/doc/source/admin/auth-totp.rst
index 3c331be96..c77ca64a0 100644
--- a/doc/source/admin/auth-totp.rst
+++ b/doc/source/admin/auth-totp.rst
@@ -40,8 +40,8 @@ secret:
.. code-block:: python
import base64
- message = '1234567890123456'
- print base64.b32encode(message).rstrip('=')
+ message = b'1234567890123456'
+ print(base64.b32encode(message).rstrip(b'='))
Example output::
diff --git a/doc/source/admin/bootstrap.rst b/doc/source/admin/bootstrap.rst
index 51142b370..888ab6112 100644
--- a/doc/source/admin/bootstrap.rst
+++ b/doc/source/admin/bootstrap.rst
@@ -73,10 +73,12 @@ Verbosely, keystone can be bootstrapped with:
--bootstrap-internal-url http://localhost:5000
This will create an ``admin`` user with the ``admin`` role on the ``admin``
-project. The user will have the password specified in the command. Note that
-both the user and the project will be created in the ``default`` domain. By not
-creating an endpoint in the catalog users will need to provide endpoint
-overrides to perform additional identity operations.
+project and the system. This allows the user to generate project-scoped and
+system-scoped tokens which ensures they have full RBAC authorization. The user
+will have the password specified in the command. Note that both the user and
+the project will be created in the ``default`` domain. By not creating an
+endpoint in the catalog users will need to provide endpoint overrides to
+perform additional identity operations.
This command will also create ``member`` and ``reader`` roles. The ``admin``
role implies the ``member`` role and ``member`` role implies the ``reader``
diff --git a/doc/source/admin/domain-specific-config.inc b/doc/source/admin/domain-specific-config.inc
index 3797e3078..2d8f9936a 100644
--- a/doc/source/admin/domain-specific-config.inc
+++ b/doc/source/admin/domain-specific-config.inc
@@ -146,6 +146,12 @@ then the same public ID will be created. This is useful if you are running
multiple keystones and want to ensure the same ID would be generated whichever
server you hit.
+.. NOTE::
+
+ In case of the LDAP backend, the names of users and groups are not hashed.
+ As a result, these are length limited to 255 characters. Longer names
+ will result in an error.
+
While keystone will dynamically maintain the identity mapping, including
removing entries when entities are deleted via the keystone, for those entities
in backends that are managed outside of keystone (e.g. a read-only LDAP),
diff --git a/doc/source/admin/service-api-protection.rst b/doc/source/admin/service-api-protection.rst
index 47886aeb0..249944354 100644
--- a/doc/source/admin/service-api-protection.rst
+++ b/doc/source/admin/service-api-protection.rst
@@ -31,7 +31,7 @@ custom policies.
Roles Definitions
-----------------
-The default roles provided by keystone, via ``keystone-manage boostrap``, are
+The default roles provided by keystone, via ``keystone-manage bootstrap``, are
related through role implications. The ``admin`` role implies the ``member``
role, and the ``member`` role implies the ``reader`` role. These implications
mean users with the ``admin`` role automatically have the ``member`` and
diff --git a/doc/source/admin/unified-limits.rst b/doc/source/admin/unified-limits.rst
index 1a37a4c5a..1d4498647 100644
--- a/doc/source/admin/unified-limits.rst
+++ b/doc/source/admin/unified-limits.rst
@@ -154,7 +154,7 @@ recommend extremely careful planning and understanding of various enforcement
models if you're planning on switching from one model to another in a
deployment.
-Keystone exposes a ``GET /limits-model`` endpoint that returns the enforcement
+Keystone exposes a ``GET /limits/model`` endpoint that returns the enforcement
model selected by the deployment. This allows limit information to be
discoverable and preserves interoperability between OpenStack deployments with
different enforcement models.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 819c1d9e9..41a245632 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -32,18 +32,20 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['sphinx.ext.coverage',
- 'sphinx.ext.viewcode',
- 'oslo_config.sphinxconfiggen',
- 'oslo_config.sphinxext',
- 'oslo_policy.sphinxpolicygen',
- 'openstackdocstheme',
- 'oslo_policy.sphinxext',
- 'sphinxcontrib.apidoc',
- 'sphinxcontrib.seqdiag',
- 'sphinx_feature_classification.support_matrix',
- 'sphinxcontrib.blockdiag'
- ]
+extensions = [
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.viewcode',
+ 'sphinx.ext.todo',
+ 'oslo_config.sphinxconfiggen',
+ 'oslo_config.sphinxext',
+ 'oslo_policy.sphinxpolicygen',
+ 'openstackdocstheme',
+ 'oslo_policy.sphinxext',
+ 'sphinxcontrib.apidoc',
+ 'sphinxcontrib.seqdiag',
+ 'sphinx_feature_classification.support_matrix',
+ 'sphinxcontrib.blockdiag'
+]
blockdiag_html_image_format = 'SVG'
@@ -55,7 +57,11 @@ apidoc_output_dir = 'api'
apidoc_excluded_paths = [
'tests/*',
'tests',
- 'test']
+ 'test',
+ # TODO(gmann): with new release of SQLAlchemy(1.4.27) TypeDecorator used
+ # in common/sql/core.py file started failing. Remove this oncethe issue of
+ # TypeDecorator is fixed.
+ 'common/sql/core.py']
apidoc_separate_modules = True
# sphinxcontrib.seqdiag options
diff --git a/doc/source/contributor/api_change_tutorial.rst b/doc/source/contributor/api_change_tutorial.rst
index 90ef8a33b..dc63de433 100644
--- a/doc/source/contributor/api_change_tutorial.rst
+++ b/doc/source/contributor/api_change_tutorial.rst
@@ -87,6 +87,19 @@ files, respectively (currently only the SQL driver is supported).
Changing the SQL Model and Driver
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+.. note::
+
+ The below guidance is out-of-date and refers to the legacy ``migrate_repo``
+ migration repository, which was removed in 21.0.0 (Yoga). Nowadays, for a
+ change like this, you would create an additive or "expand" migration in the
+ ``expand_repo`` repository along with null migrations in the
+ ``contract_repo`` and ``data_migration_repo`` repositories. For more
+ information, refer to :doc:`/contributor/database-migrations`.
+
+.. todo::
+
+ Update this section to reflect the new migration model.
+
First, you need to change the role model to include the description attribute.
Go to `keystone/assignment/role_backends/sql.py` and update it like::
diff --git a/doc/source/contributor/database-migrations.rst b/doc/source/contributor/database-migrations.rst
index 09b1e8e3d..3827ea8e6 100644
--- a/doc/source/contributor/database-migrations.rst
+++ b/doc/source/contributor/database-migrations.rst
@@ -17,10 +17,17 @@
Database Migrations
===================
+.. note::
+
+ The framework being used is currently being migrated from
+ SQLAlchemy-Migrate to Alembic, meaning this information will change in the
+ near-term.
+
Starting with Newton, keystone supports upgrading both with and without
downtime. In order to support this, there are three separate migration
-repositories (all under ``keystone/common/sql/``) that match the three phases
-of an upgrade (schema expansion, data migration, and schema contraction):
+repositories (all under ``keystone/common/sql/legacy_migrations``) that match
+the three phases of an upgrade (schema expansion, data migration, and schema
+contraction):
``expand_repo``
For additive schema modifications and triggers to ensure data is kept in
@@ -43,14 +50,6 @@ do in a specific phase, then include a no-op migration to simply ``pass`` (in
fact the ``001`` migration in each of these repositories is a no-op migration,
so that can be used as a template).
-.. NOTE::
-
- Since rolling upgrade support was added part way through the Newton cycle,
- some migrations had already been added to the legacy repository
- (``keystone/common/sql/migrate_repo``). This repository is now closed and
- no new migrations should be added (except for backporting of previous
- placeholders).
-
In order to support rolling upgrades, where two releases of keystone briefly
operate side-by-side using the same database without downtime, each phase of
the migration must adhere to following constraints:
@@ -79,7 +78,7 @@ Data Migration phase:
No schema changes are allowed.
Contract phase:
- Only contractive schema changes are allowed, such as dropping or altering
+ Only destructive schema changes are allowed, such as dropping or altering
columns, tables, indices, and triggers.
Data insertion, modification, and removal is not allowed.
diff --git a/doc/source/contributor/how-can-i-help.rst b/doc/source/contributor/how-can-i-help.rst
index 4e37af0a1..47c2f4ad7 100644
--- a/doc/source/contributor/how-can-i-help.rst
+++ b/doc/source/contributor/how-can-i-help.rst
@@ -50,7 +50,7 @@ become part of the team:
You can also subscribe to email notifications for new bugs.
* Subscribe to the openstack-discuss@lists.openstack.org mailing list (filter on
subject tag ``[keystone]``) and join the #openstack-keystone IRC channel on
- freenode. Help answer user support questions if you or your organization has
+ OFTC. Help answer user support questions if you or your organization has
faced and solved a similar problem, or chime in on design discussions that
will affect you and your organization.
* Check out the low hanging fruit bugs, submit patches to fix them:
diff --git a/doc/source/contributor/programming-exercises.rst b/doc/source/contributor/programming-exercises.rst
index 5af344467..b51725d08 100644
--- a/doc/source/contributor/programming-exercises.rst
+++ b/doc/source/contributor/programming-exercises.rst
@@ -53,9 +53,9 @@ Refer to the :doc:`API Change tutorial <api_change_tutorial>`. In short, you wil
steps:
#. Create a SQL migration to add the parameter to the database table
- (:py:mod:`keystone.common.sql.expand_repo.versions`,
- :py:mod:`keystone.common.sql.data_migration_repo.versions`,
- :py:mod:`keystone.common.sql.contract_repo.versions`)
+ (:py:mod:`keystone.common.sql.legacy_migration.expand_repo.versions`,
+ :py:mod:`keystone.common.sql.legacy_migration.data_migration_repo.versions`,
+ :py:mod:`keystone.common.sql.legacy_migration.contract_repo.versions`)
#. Add a SQL migration unit test (`keystone/tests/unit/test_sql_upgrade.py`)
diff --git a/doc/source/contributor/services.rst b/doc/source/contributor/services.rst
index bdca28b15..c1c397e30 100644
--- a/doc/source/contributor/services.rst
+++ b/doc/source/contributor/services.rst
@@ -99,7 +99,7 @@ The "default" domain
The v2.0 API has been removed as of the Queens release. While this section
references the v2.0 API, it is purely for historical reasons that clarify
- the existance of the *default* domain.
+ the existence of the *default* domain.
Domains were introduced as a v3-only feature. As a result, the v2.0 API didn't
understand the concept of domains. To allow for both versions of the Identity
diff --git a/doc/source/contributor/testing-keystone.rst b/doc/source/contributor/testing-keystone.rst
index 4f4bbd226..72575fbcb 100644
--- a/doc/source/contributor/testing-keystone.rst
+++ b/doc/source/contributor/testing-keystone.rst
@@ -138,6 +138,12 @@ Identity module.
Testing Schema Migrations
-------------------------
+.. note::
+
+ The framework being used is currently being migrated from
+ SQLAlchemy-Migrate to Alembic, meaning this information will change in the
+ near-term.
+
The application of schema migrations can be tested using SQLAlchemy Migrate's
built-in test runner, one migration at a time.
@@ -151,9 +157,9 @@ version control:
.. code-block:: bash
- $ python keystone/common/sql/migrate_repo/manage.py test \
- --url=sqlite:///test.db \
- --repository=keystone/common/sql/migrate_repo/
+ $ python keystone/common/sql/legacy_migrations/expand_repo/manage.py test \
+ --url=sqlite:///test.db \
+ --repository=keystone/common/sql/legacy_migrations/expand_repo/
This command references to a SQLite database (test.db) to be used. Depending on
the migration, this command alone does not make assertions as to the integrity
diff --git a/doc/source/getting-started/community.rst b/doc/source/getting-started/community.rst
index 47145adbc..4598cd8e6 100644
--- a/doc/source/getting-started/community.rst
+++ b/doc/source/getting-started/community.rst
@@ -34,10 +34,10 @@ from feature designs to documentation to testing to deployment scripts.
.. _Launchpad: https://launchpad.net/keystone
.. _wiki: https://wiki.openstack.org/
-#openstack-keystone on Freenode IRC Network
--------------------------------------------
+#openstack-keystone on OFTC IRC Network
+---------------------------------------
-You can find Keystone folks in `<irc://freenode.net/#openstack-keystone>`_.
+You can find Keystone folks in `<irc://oftc.net/#openstack-keystone>`_.
This is usually the best place to ask questions and find your way around. IRC
stands for Internet Relay Chat and it is a way to chat online in real time.
You can also ask a question and come back to the log files to read the answer
diff --git a/doc/source/install/index-obs.rst b/doc/source/install/index-obs.rst
index c67974d74..46129285a 100644
--- a/doc/source/install/index-obs.rst
+++ b/doc/source/install/index-obs.rst
@@ -12,14 +12,6 @@ both SP1 and SP2 - through the Open Build Service Cloud repository.
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/install/index-rdo.rst b/doc/source/install/index-rdo.rst
index 6e0e3984f..dc48e890f 100644
--- a/doc/source/install/index-rdo.rst
+++ b/doc/source/install/index-rdo.rst
@@ -12,14 +12,6 @@ the RDO repository.
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/install/index-ubuntu.rst b/doc/source/install/index-ubuntu.rst
index b3e5cb064..d1c7fe138 100644
--- a/doc/source/install/index-ubuntu.rst
+++ b/doc/source/install/index-ubuntu.rst
@@ -12,14 +12,6 @@ Ubuntu 16.04 (LTS).
Explanations of configuration options and sample configuration files
are included.
-.. note::
- The Training Labs scripts provide an automated way of deploying the
- cluster described in this Installation Guide into VirtualBox or KVM
- VMs. You will need a desktop computer or a laptop with at least 8
- GB memory and 20 GB free storage running Linux, MacOS, or Windows.
- Please see the
- `OpenStack Training Labs <https://docs.openstack.org/training_labs/>`_.
-
.. warning::
This guide is a work-in-progress and is subject to updates frequently.
diff --git a/doc/source/user/application_credentials.rst b/doc/source/user/application_credentials.rst
index eff86f7b3..5455a04e7 100644
--- a/doc/source/user/application_credentials.rst
+++ b/doc/source/user/application_credentials.rst
@@ -174,8 +174,47 @@ Access Rules
============
In addition to delegating a subset of roles to an application credential, you
-may also delegate more fine-grained access control by using access rules. For
-example, to create an application credential that is constricted to creating
+may also delegate more fine-grained access control by using access rules.
+
+.. note::
+
+ Application credentials with access rules require additional configuration
+ of each service that will use it. See below for details.
+
+If application credentials with access rules are required, an OpenStack
+service using keystonemiddleware to authenticate with keystone, needs to
+define ``service_type`` in its configuration file. Following is an example for the
+cinder V3 service:
+
+.. code-block:: ini
+
+ [keystone_authtoken]
+ service_type = volumev3
+
+For other OpenStack sevices, their types can be obtained using the OpenStack
+client. For example:
+
+.. code-block:: console
+
+ $ openstack service list -c Name -c Type
+ +-----------+-----------+
+ | Name | Type |
+ +-----------+-----------+
+ | glance | image |
+ | cinderv3 | volumev3 |
+ | cinderv2 | volumev2 |
+ | keystone | identity |
+ | nova | compute |
+ | neutron | network |
+ | placement | placement |
+ +-----------+-----------+
+
+.. note::
+
+ Updates to the configuration files of a service require restart of the appropriate
+ services for the changes to take effect.
+
+In order to create an example application credential that is constricted to creating
servers in nova, the user can add the following access rules:
.. code-block:: console
diff --git a/keystone/api/s3tokens.py b/keystone/api/s3tokens.py
index 73d0b399e..4a8439d69 100644
--- a/keystone/api/s3tokens.py
+++ b/keystone/api/s3tokens.py
@@ -56,7 +56,10 @@ def _calculate_signature_v4(string_to_sign, secret_key):
if len(parts) != 4 or parts[0] != b'AWS4-HMAC-SHA256':
raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
scope = parts[2].split(b'/')
- if len(scope) != 4 or scope[2] != b's3' or scope[3] != b'aws4_request':
+ if len(scope) != 4 or scope[3] != b'aws4_request':
+ raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
+ allowed_services = [b's3', b'iam', b'sts']
+ if scope[2] not in allowed_services:
raise exception.Unauthorized(message=_('Invalid EC2 signature.'))
def _sign(key, msg):
diff --git a/keystone/api/users.py b/keystone/api/users.py
index 10f26bd42..3fd4e4190 100644
--- a/keystone/api/users.py
+++ b/keystone/api/users.py
@@ -13,7 +13,7 @@
# This file handles all flask-restful resources for /v3/users
import base64
-import os
+import secrets
import uuid
import flask
@@ -577,7 +577,7 @@ class UserAppCredListCreateResource(ks_flask.ResourceBase):
@staticmethod
def _generate_secret():
length = 64
- secret = os.urandom(length)
+ secret = secrets.token_bytes(length)
secret = base64.urlsafe_b64encode(secret)
secret = secret.rstrip(b'=')
secret = secret.decode('utf-8')
diff --git a/keystone/cmd/cli.py b/keystone/cmd/cli.py
index 6cd10cfb6..1e866d76a 100644
--- a/keystone/cmd/cli.py
+++ b/keystone/cmd/cli.py
@@ -18,9 +18,8 @@ import os
import sys
import uuid
-import migrate
from oslo_config import cfg
-from oslo_db.sqlalchemy import migration
+from oslo_db import exception as db_exception
from oslo_log import log
from oslo_serialization import jsonutils
import pbr.version
@@ -211,15 +210,6 @@ class Doctor(BaseApp):
raise SystemExit(doctor.diagnose())
-def assert_not_extension(extension):
- if extension:
- print(_("All extensions have been moved into keystone core and as "
- "such its migrations are maintained by the main keystone "
- "database control. Use the command: keystone-manage "
- "db_sync"))
- raise RuntimeError
-
-
class DbSync(BaseApp):
"""Sync the database."""
@@ -228,73 +218,91 @@ class DbSync(BaseApp):
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbSync, cls).add_argument_parser(subparsers)
- parser.add_argument('version', default=None, nargs='?',
- help=('Migrate the database up to a specified '
- 'version. If not provided, db_sync will '
- 'migrate the database to the latest known '
- 'version. Schema downgrades are not '
- 'supported.'))
- parser.add_argument('--extension', default=None,
- help=('This is a deprecated option to migrate a '
- 'specified extension. Since extensions are '
- 'now part of the main repository, '
- 'specifying db_sync without this option '
- 'will cause all extensions to be migrated.'))
+ parser.add_argument(
+ 'version',
+ default=None,
+ nargs='?',
+ help=(
+ 'Migrate the database up to a specified version. '
+ 'If not provided, db_sync will migrate the database to the '
+ 'latest known version. '
+ 'Schema downgrades are not supported.'
+ ),
+ )
group = parser.add_mutually_exclusive_group()
- group.add_argument('--expand', default=False, action='store_true',
- help=('Expand the database schema in preparation '
- 'for data migration.'))
- group.add_argument('--migrate', default=False,
- action='store_true',
- help=('Copy all data that needs to be migrated '
- 'within the database ahead of starting the '
- 'first keystone node upgraded to the new '
- 'release. This command should be run '
- 'after the --expand command. Once the '
- '--migrate command has completed, you can '
- 'upgrade all your keystone nodes to the new '
- 'release and restart them.'))
-
- group.add_argument('--contract', default=False, action='store_true',
- help=('Remove any database tables and columns '
- 'that are no longer required. This command '
- 'should be run after all keystone nodes are '
- 'running the new release.'))
-
- group.add_argument('--check', default=False, action='store_true',
- help=('Check for outstanding database actions that '
- 'still need to be executed. This command can '
- 'be used to verify the condition of the '
- 'current database state.'))
+ group.add_argument(
+ '--expand',
+ default=False,
+ action='store_true',
+ help=(
+ 'Expand the database schema in preparation for data migration.'
+ ),
+ )
+ group.add_argument(
+ '--migrate',
+ default=False,
+ action='store_true',
+ help=(
+ 'Copy all data that needs to be migrated within the database '
+ 'ahead of starting the first keystone node upgraded to the '
+ 'new release. '
+ 'This command should be run after the --expand command. '
+ 'Once the --migrate command has completed, you can upgrade '
+ 'all your keystone nodes to the new release and restart them.'
+ ),
+ )
+ group.add_argument(
+ '--contract',
+ default=False,
+ action='store_true',
+ help=(
+ 'Remove any database tables and columns that are no longer '
+ 'required. This command should be run after all keystone '
+ 'nodes are running the new release.'
+ ),
+ )
+ group.add_argument(
+ '--check',
+ default=False,
+ action='store_true',
+ help=(
+ 'Check for outstanding database actions that still need to be '
+ 'executed. This command can be used to verify the condition '
+ 'of the current database state.'
+ ),
+ )
return parser
@classmethod
def check_db_sync_status(cls):
status = 0
try:
- expand_version = upgrades.get_db_version(repo='expand_repo')
- except migration.exception.DBMigrationError:
- LOG.info('Your database is not currently under version '
- 'control or the database is already controlled. Your '
- 'first step is to run `keystone-manage db_sync '
- '--expand`.')
+ expand_version = upgrades.get_db_version(branch='expand')
+ except db_exception.DBMigrationError:
+ LOG.info(
+ 'Your database is not currently under version '
+ 'control or the database is already controlled. Your '
+ 'first step is to run `keystone-manage db_sync --expand`.'
+ )
return 2
+
try:
migrate_version = upgrades.get_db_version(
- repo='data_migration_repo')
- except migration.exception.DBMigrationError:
+ branch='data_migration')
+ except db_exception.DBMigrationError:
migrate_version = 0
+
try:
- contract_version = upgrades.get_db_version(repo='contract_repo')
- except migration.exception.DBMigrationError:
+ contract_version = upgrades.get_db_version(branch='contract')
+ except db_exception.DBMigrationError:
contract_version = 0
- repo = migrate.versioning.repository.Repository(
- upgrades.find_repo('expand_repo'))
- migration_script_version = int(max(repo.versions.versions))
+ migration_script_version = upgrades.LATEST_VERSION
- if (contract_version > migrate_version or migrate_version >
- expand_version):
+ if (
+ contract_version > migrate_version or
+ migrate_version > expand_version
+ ):
LOG.info('Your database is out of sync. For more information '
'refer to https://docs.openstack.org/keystone/'
'latest/admin/identity-upgrading.html')
@@ -311,29 +319,31 @@ class DbSync(BaseApp):
LOG.info('Migrate version is ahead of contract. Your next '
'step is to run `keystone-manage db_sync --contract`.')
status = 4
- elif (migration_script_version == expand_version == migrate_version ==
- contract_version):
+ elif (
+ migration_script_version == expand_version == migrate_version ==
+ contract_version
+ ):
LOG.info('All db_sync commands are upgraded to the same '
'version and up-to-date.')
- LOG.info('The latest installed migration script version is: '
- '%(script)d.\nCurrent repository versions:\nExpand: '
- '%(expand)d \nMigrate: %(migrate)d\nContract: '
- '%(contract)d', {'script': migration_script_version,
- 'expand': expand_version,
- 'migrate': migrate_version,
- 'contract': contract_version})
+ LOG.info(
+ 'The latest installed migration script version is: %(script)d.\n'
+ 'Current repository versions:\n'
+ 'Expand: %(expand)d\n'
+ 'Migrate: %(migrate)d\n'
+ 'Contract: %(contract)d',
+ {
+ 'script': migration_script_version,
+ 'expand': expand_version,
+ 'migrate': migrate_version,
+ 'contract': contract_version,
+ },
+ )
return status
@staticmethod
def main():
- assert_not_extension(CONF.command.extension)
- # It is possible to run expand and migrate at the same time,
- # expand needs to run first however.
if CONF.command.check:
sys.exit(DbSync.check_db_sync_status())
- elif CONF.command.expand and CONF.command.migrate:
- upgrades.expand_schema()
- upgrades.migrate_data()
elif CONF.command.expand:
upgrades.expand_schema()
elif CONF.command.migrate:
@@ -350,20 +360,8 @@ class DbVersion(BaseApp):
name = 'db_version'
- @classmethod
- def add_argument_parser(cls, subparsers):
- parser = super(DbVersion, cls).add_argument_parser(subparsers)
- parser.add_argument('--extension', default=None,
- help=('This is a deprecated option to print the '
- 'version of a specified extension. Since '
- 'extensions are now part of the main '
- 'repository, the version of an extension is '
- 'implicit in the version of the main '
- 'repository.'))
-
@staticmethod
def main():
- assert_not_extension(CONF.command.extension)
print(upgrades.get_db_version())
diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py
index de0d8a023..fb9fc1ca8 100644
--- a/keystone/common/cache/core.py
+++ b/keystone/common/cache/core.py
@@ -14,7 +14,7 @@
"""Keystone Caching Layer Implementation."""
-import os
+import secrets
from dogpile.cache import region
from dogpile.cache import util
@@ -36,7 +36,7 @@ class RegionInvalidationManager(object):
self._region_key = self.REGION_KEY_PREFIX + region_name
def _generate_new_id(self):
- return os.urandom(10)
+ return secrets.token_bytes(10)
@property
def region_id(self):
diff --git a/keystone/common/fernet_utils.py b/keystone/common/fernet_utils.py
index 9188dfbfc..928c2488d 100644
--- a/keystone/common/fernet_utils.py
+++ b/keystone/common/fernet_utils.py
@@ -36,8 +36,8 @@ NULL_KEY = base64.urlsafe_b64encode(b'\x00' * 32)
class FernetUtils(object):
- def __init__(self, key_repository=None, max_active_keys=None,
- config_group=None):
+ def __init__(self, key_repository, max_active_keys,
+ config_group):
self.key_repository = key_repository
self.max_active_keys = max_active_keys
self.config_group = config_group
diff --git a/keystone/common/policies/application_credential.py b/keystone/common/policies/application_credential.py
index cebb85b02..bae998a39 100644
--- a/keystone/common/policies/application_credential.py
+++ b/keystone/common/policies/application_credential.py
@@ -18,23 +18,30 @@ from keystone.common.policies import base
collection_path = '/v3/users/{user_id}/application_credentials'
resource_path = collection_path + '/{application_credential_id}'
+DEPRECATED_REASON = (
+ "The application credential API is now aware of system scope and default "
+ "roles."
+)
+
deprecated_list_application_credentials_for_user = policy.DeprecatedRule(
name=base.IDENTITY % 'list_application_credentials',
- check_str=base.RULE_ADMIN_OR_OWNER
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_application_credentials_for_user = policy.DeprecatedRule(
- name=base.IDENTITY % 'get_application_credentials',
- check_str=base.RULE_ADMIN_OR_OWNER
+ name=base.IDENTITY % 'get_application_credential',
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_application_credentials_for_user = policy.DeprecatedRule(
- name=base.IDENTITY % 'delete_application_credentials',
- check_str=base.RULE_ADMIN_OR_OWNER
+ name=base.IDENTITY % 'delete_application_credential',
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The application credential API is now aware of system scope and default "
- "roles."
-)
application_credential_policies = [
policy.DocumentedRuleDefault(
@@ -46,9 +53,7 @@ application_credential_policies = [
'method': 'GET'},
{'path': resource_path,
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_application_credentials_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_application_credentials_for_user),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_application_credentials',
check_str=base.RULE_SYSTEM_READER_OR_OWNER,
@@ -58,9 +63,7 @@ application_credential_policies = [
'method': 'GET'},
{'path': collection_path,
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_application_credentials_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_application_credentials_for_user),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_application_credential',
check_str=base.RULE_OWNER,
@@ -75,9 +78,7 @@ application_credential_policies = [
description='Delete an application credential.',
operations=[{'path': resource_path,
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_application_credentials_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_delete_application_credentials_for_user)
]
diff --git a/keystone/common/policies/consumer.py b/keystone/common/policies/consumer.py
index bf9a6bdd7..7931bf05b 100644
--- a/keystone/common/policies/consumer.py
+++ b/keystone/common/policies/consumer.py
@@ -15,30 +15,41 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The OAUTH1 consumer API is now aware of system scope and default roles."
+)
+
deprecated_get_consumer = policy.DeprecatedRule(
name=base.IDENTITY % 'get_consumer',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_consumers = policy.DeprecatedRule(
name=base.IDENTITY % 'list_consumers',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_consumer = policy.DeprecatedRule(
name=base.IDENTITY % 'create_consumer',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_update_consumer = policy.DeprecatedRule(
name=base.IDENTITY % 'update_consumer',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_consumer = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_consumer',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The OAUTH1 consumer API is now aware of system scope and default roles."
-)
consumer_policies = [
policy.DocumentedRuleDefault(
@@ -48,9 +59,7 @@ consumer_policies = [
description='Show OAUTH1 consumer details.',
operations=[{'path': '/v3/OS-OAUTH1/consumers/{consumer_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_get_consumer,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_consumer),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_consumers',
check_str=base.SYSTEM_READER,
@@ -58,9 +67,7 @@ consumer_policies = [
description='List OAUTH1 consumers.',
operations=[{'path': '/v3/OS-OAUTH1/consumers',
'method': 'GET'}],
- deprecated_rule=deprecated_list_consumers,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_consumers),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_consumer',
check_str=base.SYSTEM_ADMIN,
@@ -68,9 +75,7 @@ consumer_policies = [
description='Create OAUTH1 consumer.',
operations=[{'path': '/v3/OS-OAUTH1/consumers',
'method': 'POST'}],
- deprecated_rule=deprecated_create_consumer,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_consumer),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_consumer',
check_str=base.SYSTEM_ADMIN,
@@ -78,9 +83,7 @@ consumer_policies = [
description='Update OAUTH1 consumer.',
operations=[{'path': '/v3/OS-OAUTH1/consumers/{consumer_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_consumer,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_update_consumer),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_consumer',
check_str=base.SYSTEM_ADMIN,
@@ -88,9 +91,7 @@ consumer_policies = [
description='Delete OAUTH1 consumer.',
operations=[{'path': '/v3/OS-OAUTH1/consumers/{consumer_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_consumer,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_consumer),
]
diff --git a/keystone/common/policies/credential.py b/keystone/common/policies/credential.py
index 52a9fa808..675e31875 100644
--- a/keystone/common/policies/credential.py
+++ b/keystone/common/policies/credential.py
@@ -21,23 +21,33 @@ DEPRECATED_REASON = (
deprecated_get_credential = policy.DeprecatedRule(
name=base.IDENTITY % 'get_credential',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_credentials = policy.DeprecatedRule(
name=base.IDENTITY % 'list_credentials',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_credential = policy.DeprecatedRule(
name=base.IDENTITY % 'create_credential',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_credential = policy.DeprecatedRule(
name=base.IDENTITY % 'update_credential',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_credential = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_credential',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
@@ -50,8 +60,6 @@ credential_policies = [
operations=[{'path': '/v3/credentials/{credential_id}',
'method': 'GET'}],
deprecated_rule=deprecated_get_credential,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_credentials',
@@ -61,8 +69,6 @@ credential_policies = [
operations=[{'path': '/v3/credentials',
'method': 'GET'}],
deprecated_rule=deprecated_list_credentials,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_credential',
@@ -72,8 +78,6 @@ credential_policies = [
operations=[{'path': '/v3/credentials',
'method': 'POST'}],
deprecated_rule=deprecated_create_credential,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_credential',
@@ -83,8 +87,6 @@ credential_policies = [
operations=[{'path': '/v3/credentials/{credential_id}',
'method': 'PATCH'}],
deprecated_rule=deprecated_update_credential,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_credential',
@@ -94,8 +96,6 @@ credential_policies = [
operations=[{'path': '/v3/credentials/{credential_id}',
'method': 'DELETE'}],
deprecated_rule=deprecated_delete_credential,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
)
]
diff --git a/keystone/common/policies/domain.py b/keystone/common/policies/domain.py
index 7d3e3d788..cd743ee90 100644
--- a/keystone/common/policies/domain.py
+++ b/keystone/common/policies/domain.py
@@ -21,23 +21,33 @@ DEPRECATED_REASON = (
deprecated_list_domains = policy.DeprecatedRule(
name=base.IDENTITY % 'list_domains',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_get_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'get_domain',
- check_str=base.RULE_ADMIN_OR_TARGET_DOMAIN
+ check_str=base.RULE_ADMIN_OR_TARGET_DOMAIN,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'update_domain',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'create_domain',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_domain = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_domain',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER = (
'(role:reader and system_scope:all) or '
@@ -56,9 +66,7 @@ domain_policies = [
description='Show domain details.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_get_domain,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_domains',
check_str=base.SYSTEM_READER,
@@ -66,9 +74,7 @@ domain_policies = [
description='List domains.',
operations=[{'path': '/v3/domains',
'method': 'GET'}],
- deprecated_rule=deprecated_list_domains,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_domains),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_domain',
check_str=base.SYSTEM_ADMIN,
@@ -76,9 +82,7 @@ domain_policies = [
description='Create domain.',
operations=[{'path': '/v3/domains',
'method': 'POST'}],
- deprecated_rule=deprecated_create_domain,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_domain',
check_str=base.SYSTEM_ADMIN,
@@ -86,9 +90,7 @@ domain_policies = [
description='Update domain.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_domain,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_domain),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_domain',
check_str=base.SYSTEM_ADMIN,
@@ -96,9 +98,7 @@ domain_policies = [
description='Delete domain.',
operations=[{'path': '/v3/domains/{domain_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_domain,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_delete_domain),
]
diff --git a/keystone/common/policies/domain_config.py b/keystone/common/policies/domain_config.py
index a157f0d5c..b1c8fdab5 100644
--- a/keystone/common/policies/domain_config.py
+++ b/keystone/common/policies/domain_config.py
@@ -15,36 +15,46 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The domain config API is now aware of system scope and default roles."
+)
+
deprecated_get_domain_config = policy.DeprecatedRule(
name=base.IDENTITY % 'get_domain_config',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_domain_config_default = policy.DeprecatedRule(
name=base.IDENTITY % 'get_domain_config_default',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_domain_config = policy.DeprecatedRule(
name=base.IDENTITY % 'create_domain_config',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_update_domain_config = policy.DeprecatedRule(
name=base.IDENTITY % 'update_domain_config',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_domain_config = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_domain_config',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The domain config API is now aware of system scope and default roles."
-)
-
domain_config_policies = [
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_domain_config',
@@ -65,9 +75,7 @@ domain_config_policies = [
'method': 'PUT'
}
],
- deprecated_rule=deprecated_create_domain_config,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
+ deprecated_rule=deprecated_create_domain_config
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_domain_config',
@@ -103,8 +111,6 @@ domain_config_policies = [
}
],
deprecated_rule=deprecated_get_domain_config,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_security_compliance_domain_config',
@@ -124,12 +130,12 @@ domain_config_policies = [
'method': 'HEAD'
},
{
- 'path': ('v3/domains/{domain_id}/config/'
+ 'path': ('/v3/domains/{domain_id}/config/'
'security_compliance/{option}'),
'method': 'GET'
},
{
- 'path': ('v3/domains/{domain_id}/config/'
+ 'path': ('/v3/domains/{domain_id}/config/'
'security_compliance/{option}'),
'method': 'HEAD'
}
@@ -156,8 +162,6 @@ domain_config_policies = [
}
],
deprecated_rule=deprecated_update_domain_config,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_domain_config',
@@ -180,8 +184,6 @@ domain_config_policies = [
}
],
deprecated_rule=deprecated_delete_domain_config,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_domain_config_default',
@@ -216,8 +218,6 @@ domain_config_policies = [
}
],
deprecated_rule=deprecated_get_domain_config_default,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
)
]
diff --git a/keystone/common/policies/ec2_credential.py b/keystone/common/policies/ec2_credential.py
index 25e65b532..9e5270922 100644
--- a/keystone/common/policies/ec2_credential.py
+++ b/keystone/common/policies/ec2_credential.py
@@ -15,26 +15,35 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The EC2 credential API is now aware of system scope and default roles."
+)
+
deprecated_ec2_get_credential = policy.DeprecatedRule(
name=base.IDENTITY % 'ec2_get_credential',
- check_str=base.RULE_ADMIN_OR_CREDENTIAL_OWNER
+ check_str=base.RULE_ADMIN_OR_CREDENTIAL_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_ec2_list_credentials = policy.DeprecatedRule(
name=base.IDENTITY % 'ec2_list_credentials',
- check_str=base.RULE_ADMIN_OR_OWNER
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-deprecated_ec2_create_credentials = policy.DeprecatedRule(
- name=base.IDENTITY % 'ec2_create_credentials',
- check_str=base.RULE_ADMIN_OR_OWNER
+deprecated_ec2_create_credential = policy.DeprecatedRule(
+ name=base.IDENTITY % 'ec2_create_credential',
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-deprecated_ec2_delete_credentials = policy.DeprecatedRule(
- name=base.IDENTITY % 'ec2_delete_credentials',
- check_str=base.RULE_ADMIN_OR_CREDENTIAL_OWNER
+deprecated_ec2_delete_credential = policy.DeprecatedRule(
+ name=base.IDENTITY % 'ec2_delete_credential',
+ check_str=base.RULE_ADMIN_OR_CREDENTIAL_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The EC2 credential API is now aware of system scope and default roles."
-)
ec2_credential_policies = [
policy.DocumentedRuleDefault(
@@ -45,9 +54,7 @@ ec2_credential_policies = [
operations=[{'path': ('/v3/users/{user_id}/credentials/OS-EC2/'
'{credential_id}'),
'method': 'GET'}],
- deprecated_rule=deprecated_ec2_get_credential,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
+ deprecated_rule=deprecated_ec2_get_credential
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'ec2_list_credentials',
@@ -57,8 +64,6 @@ ec2_credential_policies = [
operations=[{'path': '/v3/users/{user_id}/credentials/OS-EC2',
'method': 'GET'}],
deprecated_rule=deprecated_ec2_list_credentials,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'ec2_create_credential',
@@ -67,9 +72,7 @@ ec2_credential_policies = [
description='Create ec2 credential.',
operations=[{'path': '/v3/users/{user_id}/credentials/OS-EC2',
'method': 'POST'}],
- deprecated_rule=deprecated_ec2_create_credentials,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
+ deprecated_rule=deprecated_ec2_create_credential,
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'ec2_delete_credential',
@@ -79,9 +82,7 @@ ec2_credential_policies = [
operations=[{'path': ('/v3/users/{user_id}/credentials/OS-EC2/'
'{credential_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_ec2_delete_credentials,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN
+ deprecated_rule=deprecated_ec2_delete_credential,
)
]
diff --git a/keystone/common/policies/endpoint.py b/keystone/common/policies/endpoint.py
index b99a40e24..78582496f 100644
--- a/keystone/common/policies/endpoint.py
+++ b/keystone/common/policies/endpoint.py
@@ -15,24 +15,34 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The endpoint API is now aware of system scope and default roles."
+)
+
deprecated_get_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'get_endpoint', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_endpoints = policy.DeprecatedRule(
name=base.IDENTITY % 'list_endpoints', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'update_endpoint', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'create_endpoint', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_endpoint', check_str=base.RULE_ADMIN_REQUIRED,
-)
-
-DEPRECATED_REASON = (
- "The endpoint API is now aware of system scope and default roles."
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
@@ -44,9 +54,7 @@ endpoint_policies = [
description='Show endpoint details.',
operations=[{'path': '/v3/endpoints/{endpoint_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_get_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_endpoints',
check_str=base.SYSTEM_READER,
@@ -54,9 +62,7 @@ endpoint_policies = [
description='List endpoints.',
operations=[{'path': '/v3/endpoints',
'method': 'GET'}],
- deprecated_rule=deprecated_list_endpoints,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_endpoints),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_endpoint',
check_str=base.SYSTEM_ADMIN,
@@ -64,9 +70,7 @@ endpoint_policies = [
description='Create endpoint.',
operations=[{'path': '/v3/endpoints',
'method': 'POST'}],
- deprecated_rule=deprecated_create_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_endpoint',
check_str=base.SYSTEM_ADMIN,
@@ -74,9 +78,7 @@ endpoint_policies = [
description='Update endpoint.',
operations=[{'path': '/v3/endpoints/{endpoint_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_endpoint',
check_str=base.SYSTEM_ADMIN,
@@ -84,9 +86,7 @@ endpoint_policies = [
description='Delete endpoint.',
operations=[{'path': '/v3/endpoints/{endpoint_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN)
+ deprecated_rule=deprecated_delete_endpoint)
]
diff --git a/keystone/common/policies/endpoint_group.py b/keystone/common/policies/endpoint_group.py
index 691a6fe28..741e0b7ca 100644
--- a/keystone/common/policies/endpoint_group.py
+++ b/keystone/common/policies/endpoint_group.py
@@ -15,64 +15,85 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The endpoint groups API is now aware of system scope and default roles."
+)
+
deprecated_list_endpoint_groups = policy.DeprecatedRule(
name=base.IDENTITY % 'list_endpoint_groups',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_endpoint_group = policy.DeprecatedRule(
name=base.IDENTITY % 'get_endpoint_group',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_projects_assoc_with_endpoint_group = policy.DeprecatedRule(
name=base.IDENTITY % 'list_projects_associated_with_endpoint_group',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_endpoints_assoc_with_endpoint_group = policy.DeprecatedRule(
name=base.IDENTITY % 'list_endpoints_associated_with_endpoint_group',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_endpoint_group_in_project = policy.DeprecatedRule(
name=base.IDENTITY % 'get_endpoint_group_in_project',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_endpoint_groups_for_project = policy.DeprecatedRule(
name=base.IDENTITY % 'list_endpoint_groups_for_project',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_endpoint_group = policy.DeprecatedRule(
name=base.IDENTITY % 'create_endpoint_group',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_update_endpoint_group = policy.DeprecatedRule(
name=base.IDENTITY % 'update_endpoint_group',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_endpoint_group = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_endpoint_group',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_add_endpoint_group_to_project = policy.DeprecatedRule(
name=base.IDENTITY % 'add_endpoint_group_to_project',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_remove_endpoint_group_from_project = policy.DeprecatedRule(
name=base.IDENTITY % 'remove_endpoint_group_from_project',
check_str=base.RULE_ADMIN_REQUIRED,
-)
-
-
-DEPRECATED_REASON = (
- "The endpoint groups API is now aware of system scope and default roles."
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
@@ -84,9 +105,7 @@ group_endpoint_policies = [
description='Create endpoint group.',
operations=[{'path': '/v3/OS-EP-FILTER/endpoint_groups',
'method': 'POST'}],
- deprecated_rule=deprecated_create_endpoint_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_endpoint_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_endpoint_groups',
check_str=base.SYSTEM_READER,
@@ -94,9 +113,7 @@ group_endpoint_policies = [
description='List endpoint groups.',
operations=[{'path': '/v3/OS-EP-FILTER/endpoint_groups',
'method': 'GET'}],
- deprecated_rule=deprecated_list_endpoint_groups,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_endpoint_groups),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_endpoint_group',
check_str=base.SYSTEM_READER,
@@ -108,9 +125,7 @@ group_endpoint_policies = [
{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}'),
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_endpoint_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_endpoint_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_endpoint_group',
check_str=base.SYSTEM_ADMIN,
@@ -119,9 +134,7 @@ group_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}'),
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_endpoint_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_update_endpoint_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_endpoint_group',
check_str=base.SYSTEM_ADMIN,
@@ -130,9 +143,7 @@ group_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_endpoint_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_endpoint_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_projects_associated_with_endpoint_group',
check_str=base.SYSTEM_READER,
@@ -142,9 +153,7 @@ group_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects'),
'method': 'GET'}],
- deprecated_rule=deprecated_list_projects_assoc_with_endpoint_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_projects_assoc_with_endpoint_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_endpoints_associated_with_endpoint_group',
check_str=base.SYSTEM_READER,
@@ -153,9 +162,7 @@ group_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/endpoints'),
'method': 'GET'}],
- deprecated_rule=deprecated_list_endpoints_assoc_with_endpoint_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_endpoints_assoc_with_endpoint_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_endpoint_group_in_project',
check_str=base.SYSTEM_READER,
@@ -168,9 +175,7 @@ group_endpoint_policies = [
{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects/{project_id}'),
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_endpoint_group_in_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_endpoint_group_in_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_endpoint_groups_for_project',
check_str=base.SYSTEM_READER,
@@ -179,9 +184,7 @@ group_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/projects/{project_id}/'
'endpoint_groups'),
'method': 'GET'}],
- deprecated_rule=deprecated_list_endpoint_groups_for_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_endpoint_groups_for_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'add_endpoint_group_to_project',
check_str=base.SYSTEM_ADMIN,
@@ -190,9 +193,7 @@ group_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects/{project_id}'),
'method': 'PUT'}],
- deprecated_rule=deprecated_add_endpoint_group_to_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_add_endpoint_group_to_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'remove_endpoint_group_from_project',
check_str=base.SYSTEM_ADMIN,
@@ -201,9 +202,7 @@ group_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/endpoint_groups/'
'{endpoint_group_id}/projects/{project_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_remove_endpoint_group_from_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_remove_endpoint_group_from_project)
]
diff --git a/keystone/common/policies/grant.py b/keystone/common/policies/grant.py
index 09ef1c983..0e1b92876 100644
--- a/keystone/common/policies/grant.py
+++ b/keystone/common/policies/grant.py
@@ -66,54 +66,79 @@ SYSTEM_ADMIN_OR_DOMAIN_ADMIN = (
'(' + DOMAIN_MATCHES_ROLE + ')'
)
+DEPRECATED_REASON = (
+ "The assignment API is now aware of system scope and default roles."
+)
+
deprecated_check_system_grant_for_user = policy.DeprecatedRule(
name=base.IDENTITY % 'check_system_grant_for_user',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_system_grants_for_user = policy.DeprecatedRule(
name=base.IDENTITY % 'list_system_grants_for_user',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_system_grant_for_user = policy.DeprecatedRule(
name=base.IDENTITY % 'create_system_grant_for_user',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_revoke_system_grant_for_user = policy.DeprecatedRule(
name=base.IDENTITY % 'revoke_system_grant_for_user',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_check_system_grant_for_group = policy.DeprecatedRule(
name=base.IDENTITY % 'check_system_grant_for_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_system_grants_for_group = policy.DeprecatedRule(
name=base.IDENTITY % 'list_system_grants_for_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_system_grant_for_group = policy.DeprecatedRule(
name=base.IDENTITY % 'create_system_grant_for_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_revoke_system_grant_for_group = policy.DeprecatedRule(
name=base.IDENTITY % 'revoke_system_grant_for_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_grants = policy.DeprecatedRule(
- name=base.IDENTITY % 'list_grants', check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'list_grants', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_check_grant = policy.DeprecatedRule(
- name=base.IDENTITY % 'check_grant', check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'check_grant', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_grant = policy.DeprecatedRule(
- name=base.IDENTITY % 'create_grant', check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'create_grant', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_revoke_grant = policy.DeprecatedRule(
- name=base.IDENTITY % 'revoke_grant', check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'revoke_grant', check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
-DEPRECATED_REASON = (
- "The assignment API is now aware of system scope and default roles."
-)
resource_paths = [
'/projects/{project_id}/users/{user_id}/roles/{role_id}',
@@ -167,9 +192,7 @@ grant_policies = [
'are inherited to all projects in the subtree, if '
'applicable.'),
operations=list_operations(resource_paths, ['HEAD', 'GET']),
- deprecated_rule=deprecated_check_grant,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_check_grant),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_grants',
check_str=SYSTEM_READER_OR_DOMAIN_READER_LIST,
@@ -181,9 +204,7 @@ grant_policies = [
'domains, where grants are inherited to all projects '
'in the specified domain.'),
operations=list_grants_operations,
- deprecated_rule=deprecated_list_grants,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_grants),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_grant',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -195,9 +216,7 @@ grant_policies = [
'are inherited to all projects in the subtree, if '
'applicable.'),
operations=list_operations(resource_paths, ['PUT']),
- deprecated_rule=deprecated_create_grant,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_grant),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'revoke_grant',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -211,9 +230,7 @@ grant_policies = [
'the target would remove the logical effect of '
'inheriting it to the target\'s projects subtree.'),
operations=list_operations(resource_paths, ['DELETE']),
- deprecated_rule=deprecated_revoke_grant,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_revoke_grant),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_system_grants_for_user',
check_str=base.SYSTEM_READER,
@@ -226,8 +243,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_list_system_grants_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_system_grant_for_user',
@@ -241,8 +256,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_check_system_grant_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_system_grant_for_user',
@@ -256,8 +269,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_create_system_grant_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'revoke_system_grant_for_user',
@@ -271,8 +282,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_revoke_system_grant_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_system_grants_for_group',
@@ -286,8 +295,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_list_system_grants_for_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_system_grant_for_group',
@@ -301,8 +308,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_check_system_grant_for_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_system_grant_for_group',
@@ -316,8 +321,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_create_system_grant_for_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'revoke_system_grant_for_group',
@@ -331,8 +334,6 @@ grant_policies = [
}
],
deprecated_rule=deprecated_revoke_system_grant_for_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
)
]
diff --git a/keystone/common/policies/group.py b/keystone/common/policies/group.py
index d33da9289..0106bad6f 100644
--- a/keystone/common/policies/group.py
+++ b/keystone/common/policies/group.py
@@ -51,43 +51,63 @@ DEPRECATED_REASON = (
deprecated_get_group = policy.DeprecatedRule(
name=base.IDENTITY % 'get_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_groups = policy.DeprecatedRule(
name=base.IDENTITY % 'list_groups',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_groups_for_user = policy.DeprecatedRule(
name=base.IDENTITY % 'list_groups_for_user',
- check_str=base.RULE_ADMIN_OR_OWNER
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_users_in_group = policy.DeprecatedRule(
name=base.IDENTITY % 'list_users_in_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_check_user_in_group = policy.DeprecatedRule(
name=base.IDENTITY % 'check_user_in_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_group = policy.DeprecatedRule(
name=base.IDENTITY % 'create_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_group = policy.DeprecatedRule(
name=base.IDENTITY % 'update_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_group = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_remove_user_from_group = policy.DeprecatedRule(
name=base.IDENTITY % 'remove_user_from_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_add_user_to_group = policy.DeprecatedRule(
name=base.IDENTITY % 'add_user_to_group',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
group_policies = [
@@ -100,9 +120,7 @@ group_policies = [
'method': 'GET'},
{'path': '/v3/groups/{group_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_groups',
check_str=SYSTEM_READER_OR_DOMAIN_READER,
@@ -112,9 +130,7 @@ group_policies = [
'method': 'GET'},
{'path': '/v3/groups',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_groups,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_groups),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_groups_for_user',
check_str=SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_USER_OR_OWNER,
@@ -124,9 +140,7 @@ group_policies = [
'method': 'GET'},
{'path': '/v3/users/{user_id}/groups',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_groups_for_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_groups_for_user),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_group',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -134,9 +148,7 @@ group_policies = [
description='Create group.',
operations=[{'path': '/v3/groups',
'method': 'POST'}],
- deprecated_rule=deprecated_create_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_group',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -144,9 +156,7 @@ group_policies = [
description='Update group.',
operations=[{'path': '/v3/groups/{group_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_group',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -154,9 +164,7 @@ group_policies = [
description='Delete group.',
operations=[{'path': '/v3/groups/{group_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_delete_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_users_in_group',
check_str=SYSTEM_READER_OR_DOMAIN_READER,
@@ -166,9 +174,7 @@ group_policies = [
'method': 'GET'},
{'path': '/v3/groups/{group_id}/users',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_users_in_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_users_in_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'remove_user_from_group',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN_FOR_TARGET_GROUP_USER,
@@ -176,9 +182,7 @@ group_policies = [
description='Remove user from group.',
operations=[{'path': '/v3/groups/{group_id}/users/{user_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_remove_user_from_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_remove_user_from_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_user_in_group',
check_str=SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP_USER,
@@ -188,9 +192,7 @@ group_policies = [
'method': 'HEAD'},
{'path': '/v3/groups/{group_id}/users/{user_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_check_user_in_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_check_user_in_group),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'add_user_to_group',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN_FOR_TARGET_GROUP_USER,
@@ -198,9 +200,7 @@ group_policies = [
description='Add user to group.',
operations=[{'path': '/v3/groups/{group_id}/users/{user_id}',
'method': 'PUT'}],
- deprecated_rule=deprecated_add_user_to_group,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN)
+ deprecated_rule=deprecated_add_user_to_group)
]
diff --git a/keystone/common/policies/identity_provider.py b/keystone/common/policies/identity_provider.py
index 2236d2aea..c1b4d5a1e 100644
--- a/keystone/common/policies/identity_provider.py
+++ b/keystone/common/policies/identity_provider.py
@@ -15,30 +15,41 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The identity provider API is now aware of system scope and default roles."
+)
+
deprecated_get_idp = policy.DeprecatedRule(
- name=base.IDENTITY % 'get_identity_providers',
- check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'get_identity_provider',
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_idp = policy.DeprecatedRule(
name=base.IDENTITY % 'list_identity_providers',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_idp = policy.DeprecatedRule(
- name=base.IDENTITY % 'update_identity_providers',
- check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'update_identity_provider',
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_idp = policy.DeprecatedRule(
- name=base.IDENTITY % 'create_identity_providers',
- check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'create_identity_provider',
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_idp = policy.DeprecatedRule(
- name=base.IDENTITY % 'delete_identity_providers',
- check_str=base.RULE_ADMIN_REQUIRED
+ name=base.IDENTITY % 'delete_identity_provider',
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
-DEPRECATED_REASON = (
- "The identity provider API is now aware of system scope and default roles."
-)
identity_provider_policies = [
policy.DocumentedRuleDefault(
@@ -54,9 +65,7 @@ identity_provider_policies = [
description='Create identity provider.',
operations=[{'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}',
'method': 'PUT'}],
- deprecated_rule=deprecated_create_idp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_idp),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_identity_providers',
check_str=base.SYSTEM_READER,
@@ -73,8 +82,6 @@ identity_provider_policies = [
}
],
deprecated_rule=deprecated_list_idp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_identity_provider',
@@ -92,8 +99,6 @@ identity_provider_policies = [
}
],
deprecated_rule=deprecated_get_idp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_identity_provider',
@@ -102,9 +107,7 @@ identity_provider_policies = [
description='Update identity provider.',
operations=[{'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_idp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_idp),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_identity_provider',
check_str=base.SYSTEM_ADMIN,
@@ -112,9 +115,7 @@ identity_provider_policies = [
description='Delete identity provider.',
operations=[{'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_idp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_delete_idp),
]
diff --git a/keystone/common/policies/implied_role.py b/keystone/common/policies/implied_role.py
index 6d164b035..01bcc009b 100644
--- a/keystone/common/policies/implied_role.py
+++ b/keystone/common/policies/implied_role.py
@@ -15,33 +15,45 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The implied role API is now aware of system scope and default roles."
+)
+
deprecated_get_implied_role = policy.DeprecatedRule(
name=base.IDENTITY % 'get_implied_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_implied_roles = policy.DeprecatedRule(
name=base.IDENTITY % 'list_implied_roles',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_role_inference_rules = policy.DeprecatedRule(
name=base.IDENTITY % 'list_role_inference_rules',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_check_implied_role = policy.DeprecatedRule(
name=base.IDENTITY % 'check_implied_role',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_implied_role = policy.DeprecatedRule(
name=base.IDENTITY % 'create_implied_role',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_implied_role = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_implied_role',
check_str=base.RULE_ADMIN_REQUIRED,
-)
-
-DEPRECATED_REASON = (
- "The implied role API is now aware of system scope and default roles."
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
@@ -61,9 +73,7 @@ implied_role_policies = [
operations=[
{'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_get_implied_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_implied_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_implied_roles',
check_str=base.SYSTEM_READER,
@@ -77,9 +87,7 @@ implied_role_policies = [
operations=[
{'path': '/v3/roles/{prior_role_id}/implies', 'method': 'GET'},
{'path': '/v3/roles/{prior_role_id}/implies', 'method': 'HEAD'}],
- deprecated_rule=deprecated_list_implied_roles,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_implied_roles),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_implied_role',
check_str=base.SYSTEM_ADMIN,
@@ -91,9 +99,7 @@ implied_role_policies = [
operations=[
{'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}',
'method': 'PUT'}],
- deprecated_rule=deprecated_create_implied_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_implied_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_implied_role',
check_str=base.SYSTEM_ADMIN,
@@ -106,9 +112,7 @@ implied_role_policies = [
operations=[
{'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_implied_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_implied_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_role_inference_rules',
check_str=base.SYSTEM_READER,
@@ -120,9 +124,7 @@ implied_role_policies = [
operations=[
{'path': '/v3/role_inferences', 'method': 'GET'},
{'path': '/v3/role_inferences', 'method': 'HEAD'}],
- deprecated_rule=deprecated_list_role_inference_rules,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_role_inference_rules),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_implied_role',
check_str=base.SYSTEM_READER,
@@ -134,9 +136,7 @@ implied_role_policies = [
operations=[
{'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_check_implied_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_check_implied_role),
]
diff --git a/keystone/common/policies/mapping.py b/keystone/common/policies/mapping.py
index 498bc7c84..6c4f0de67 100644
--- a/keystone/common/policies/mapping.py
+++ b/keystone/common/policies/mapping.py
@@ -15,30 +15,41 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The federated mapping API is now aware of system scope and default roles."
+)
+
deprecated_get_mapping = policy.DeprecatedRule(
name=base.IDENTITY % 'get_mapping',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_mappings = policy.DeprecatedRule(
name=base.IDENTITY % 'list_mappings',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_mapping = policy.DeprecatedRule(
name=base.IDENTITY % 'update_mapping',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_mapping = policy.DeprecatedRule(
name=base.IDENTITY % 'create_mapping',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_mapping = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_mapping',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
-DEPRECATED_REASON = (
- "The federated mapping API is now aware of system scope and default roles."
-)
mapping_policies = [
policy.DocumentedRuleDefault(
@@ -55,9 +66,7 @@ mapping_policies = [
'more sets of rules.'),
operations=[{'path': '/v3/OS-FEDERATION/mappings/{mapping_id}',
'method': 'PUT'}],
- deprecated_rule=deprecated_create_mapping,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_mapping),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_mapping',
check_str=base.SYSTEM_READER,
@@ -73,9 +82,7 @@ mapping_policies = [
'method': 'HEAD'
}
],
- deprecated_rule=deprecated_get_mapping,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
+ deprecated_rule=deprecated_get_mapping
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_mappings',
@@ -93,8 +100,6 @@ mapping_policies = [
}
],
deprecated_rule=deprecated_list_mappings,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_mapping',
@@ -103,9 +108,7 @@ mapping_policies = [
description='Delete a federated mapping.',
operations=[{'path': '/v3/OS-FEDERATION/mappings/{mapping_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_mapping,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_delete_mapping),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_mapping',
check_str=base.SYSTEM_ADMIN,
@@ -113,9 +116,7 @@ mapping_policies = [
description='Update a federated mapping.',
operations=[{'path': '/v3/OS-FEDERATION/mappings/{mapping_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_mapping,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN)
+ deprecated_rule=deprecated_update_mapping)
]
diff --git a/keystone/common/policies/policy.py b/keystone/common/policies/policy.py
index 4c912f33c..502fa9de0 100644
--- a/keystone/common/policies/policy.py
+++ b/keystone/common/policies/policy.py
@@ -15,33 +15,43 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The policy API is now aware of system scope and default roles."
+)
+
deprecated_get_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'get_policy',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_policies = policy.DeprecatedRule(
name=base.IDENTITY % 'list_policies',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_update_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'update_policy',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy',
check_str=base.RULE_ADMIN_REQUIRED,
-)
-
-DEPRECATED_REASON = (
- "The policy API is now aware of system scope and default roles."
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
@@ -55,9 +65,7 @@ policy_policies = [
description='Show policy details.',
operations=[{'path': '/v3/policies/{policy_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_get_policy,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_policy),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_policies',
check_str=base.SYSTEM_READER,
@@ -65,9 +73,7 @@ policy_policies = [
description='List policies.',
operations=[{'path': '/v3/policies',
'method': 'GET'}],
- deprecated_rule=deprecated_list_policies,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_policies),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_policy',
check_str=base.SYSTEM_ADMIN,
@@ -75,9 +81,7 @@ policy_policies = [
description='Create policy.',
operations=[{'path': '/v3/policies',
'method': 'POST'}],
- deprecated_rule=deprecated_create_policy,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_policy),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_policy',
check_str=base.SYSTEM_ADMIN,
@@ -85,9 +89,7 @@ policy_policies = [
description='Update policy.',
operations=[{'path': '/v3/policies/{policy_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_policy,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_update_policy),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_policy',
check_str=base.SYSTEM_ADMIN,
@@ -95,9 +97,7 @@ policy_policies = [
description='Delete policy.',
operations=[{'path': '/v3/policies/{policy_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_policy,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_delete_policy)
]
diff --git a/keystone/common/policies/policy_association.py b/keystone/common/policies/policy_association.py
index af5790058..1cf6f86ec 100644
--- a/keystone/common/policies/policy_association.py
+++ b/keystone/common/policies/policy_association.py
@@ -19,65 +19,88 @@ from keystone.common.policies import base
# System-scoped tokens should be required to manage policy associations to
# existing system-level resources.
+DEPRECATED_REASON = (
+ "The policy association API is now aware of system scope and default "
+ "roles."
+)
+
deprecated_check_policy_assoc_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'check_policy_association_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_check_policy_assoc_for_service = policy.DeprecatedRule(
name=base.IDENTITY % 'check_policy_association_for_service',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_check_policy_assoc_for_region_and_service = policy.DeprecatedRule(
name=base.IDENTITY % 'check_policy_association_for_region_and_service',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_policy_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'get_policy_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_endpoints_for_policy = policy.DeprecatedRule(
name=base.IDENTITY % 'list_endpoints_for_policy',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_policy_assoc_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy_association_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_policy_assoc_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy_association_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_policy_assoc_for_service = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy_association_for_service',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_policy_assoc_for_service = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy_association_for_service',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_policy_assoc_for_region_and_service = policy.DeprecatedRule(
name=base.IDENTITY % 'create_policy_association_for_region_and_service',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_policy_assoc_for_region_and_service = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_policy_association_for_region_and_service',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The policy association API is now aware of system scope and default "
- "roles."
-)
policy_association_policies = [
policy.DocumentedRuleDefault(
@@ -88,9 +111,7 @@ policy_association_policies = [
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}'),
'method': 'PUT'}],
- deprecated_rule=deprecated_create_policy_assoc_for_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_policy_assoc_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_policy_association_for_endpoint',
check_str=base.SYSTEM_READER,
@@ -102,9 +123,7 @@ policy_association_policies = [
{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}'),
'method': 'HEAD'}],
- deprecated_rule=deprecated_check_policy_assoc_for_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_check_policy_assoc_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_policy_association_for_endpoint',
check_str=base.SYSTEM_ADMIN,
@@ -113,9 +132,7 @@ policy_association_policies = [
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints/{endpoint_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_policy_assoc_for_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_policy_assoc_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_policy_association_for_service',
check_str=base.SYSTEM_ADMIN,
@@ -124,9 +141,7 @@ policy_association_policies = [
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}'),
'method': 'PUT'}],
- deprecated_rule=deprecated_create_policy_assoc_for_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_policy_assoc_for_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_policy_association_for_service',
check_str=base.SYSTEM_READER,
@@ -138,9 +153,7 @@ policy_association_policies = [
{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}'),
'method': 'HEAD'}],
- deprecated_rule=deprecated_check_policy_assoc_for_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_check_policy_assoc_for_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_policy_association_for_service',
check_str=base.SYSTEM_ADMIN,
@@ -149,9 +162,7 @@ policy_association_policies = [
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_policy_assoc_for_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_policy_assoc_for_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % (
'create_policy_association_for_region_and_service'),
@@ -162,9 +173,7 @@ policy_association_policies = [
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}'),
'method': 'PUT'}],
- deprecated_rule=deprecated_create_policy_assoc_for_region_and_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_policy_assoc_for_region_and_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_policy_association_for_region_and_service',
check_str=base.SYSTEM_READER,
@@ -176,9 +185,7 @@ policy_association_policies = [
{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}'),
'method': 'HEAD'}],
- deprecated_rule=deprecated_check_policy_assoc_for_region_and_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_check_policy_assoc_for_region_and_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % (
'delete_policy_association_for_region_and_service'),
@@ -188,9 +195,7 @@ policy_association_policies = [
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'services/{service_id}/regions/{region_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_policy_assoc_for_region_and_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_policy_assoc_for_region_and_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_policy_for_endpoint',
check_str=base.SYSTEM_READER,
@@ -202,9 +207,7 @@ policy_association_policies = [
{'path': ('/v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/'
'policy'),
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_policy_for_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_policy_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_endpoints_for_policy',
check_str=base.SYSTEM_READER,
@@ -213,9 +216,7 @@ policy_association_policies = [
operations=[{'path': ('/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/'
'endpoints'),
'method': 'GET'}],
- deprecated_rule=deprecated_list_endpoints_for_policy,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_list_endpoints_for_policy)
]
diff --git a/keystone/common/policies/project.py b/keystone/common/policies/project.py
index c7b7c0a9d..db7cdee9f 100644
--- a/keystone/common/policies/project.py
+++ b/keystone/common/policies/project.py
@@ -52,60 +52,84 @@ SYSTEM_ADMIN_OR_DOMAIN_ADMIN = (
'(role:admin and domain_id:%(target.project.domain_id)s)'
)
+DEPRECATED_REASON = (
+ "The project API is now aware of system scope and default roles."
+)
+
deprecated_list_projects = policy.DeprecatedRule(
name=base.IDENTITY % 'list_projects',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_get_project = policy.DeprecatedRule(
name=base.IDENTITY % 'get_project',
- check_str=base.RULE_ADMIN_OR_TARGET_PROJECT
+ check_str=base.RULE_ADMIN_OR_TARGET_PROJECT,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_user_projects = policy.DeprecatedRule(
name=base.IDENTITY % 'list_user_projects',
- check_str=base.RULE_ADMIN_OR_OWNER
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_project = policy.DeprecatedRule(
name=base.IDENTITY % 'create_project',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_project = policy.DeprecatedRule(
name=base.IDENTITY % 'update_project',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_project = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_project',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_project_tags = policy.DeprecatedRule(
name=base.IDENTITY % 'list_project_tags',
- check_str=base.RULE_ADMIN_OR_TARGET_PROJECT
+ check_str=base.RULE_ADMIN_OR_TARGET_PROJECT,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_project_tag = policy.DeprecatedRule(
name=base.IDENTITY % 'get_project_tag',
- check_str=base.RULE_ADMIN_OR_TARGET_PROJECT
+ check_str=base.RULE_ADMIN_OR_TARGET_PROJECT,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_update_project_tag = policy.DeprecatedRule(
name=base.IDENTITY % 'update_project_tags',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_project_tag = policy.DeprecatedRule(
name=base.IDENTITY % 'create_project_tag',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_project_tag = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_project_tag',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_project_tags = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_project_tags',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The project API is now aware of system scope and default roles."
-)
-
TAGS_DEPRECATED_REASON = """
As of the Train release, the project tags API understands how to handle
system-scoped tokens in addition to project and domain tokens, making the API
@@ -122,9 +146,7 @@ project_policies = [
description='Show project details.',
operations=[{'path': '/v3/projects/{project_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_get_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_projects',
check_str=SYSTEM_READER_OR_DOMAIN_READER,
@@ -136,9 +158,7 @@ project_policies = [
description='List projects.',
operations=[{'path': '/v3/projects',
'method': 'GET'}],
- deprecated_rule=deprecated_list_projects,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_projects),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_user_projects',
check_str=SYSTEM_READER_OR_DOMAIN_READER_OR_OWNER,
@@ -146,9 +166,7 @@ project_policies = [
description='List projects for user.',
operations=[{'path': '/v3/users/{user_id}/projects',
'method': 'GET'}],
- deprecated_rule=deprecated_list_user_projects,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_user_projects),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_project',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -156,9 +174,7 @@ project_policies = [
description='Create project.',
operations=[{'path': '/v3/projects',
'method': 'POST'}],
- deprecated_rule=deprecated_create_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_project',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -166,9 +182,7 @@ project_policies = [
description='Update project.',
operations=[{'path': '/v3/projects/{project_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_project',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -176,9 +190,7 @@ project_policies = [
description='Delete project.',
operations=[{'path': '/v3/projects/{project_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_delete_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_project_tags',
check_str=SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER,
@@ -188,9 +200,7 @@ project_policies = [
'method': 'GET'},
{'path': '/v3/projects/{project_id}/tags',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_project_tags,
- deprecated_reason=TAGS_DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_project_tags),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_project_tag',
check_str=SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER,
@@ -200,9 +210,7 @@ project_policies = [
'method': 'GET'},
{'path': '/v3/projects/{project_id}/tags/{value}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_project_tag,
- deprecated_reason=TAGS_DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_project_tag),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_project_tags',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN,
@@ -210,9 +218,7 @@ project_policies = [
description='Replace all tags on a project with the new set of tags.',
operations=[{'path': '/v3/projects/{project_id}/tags',
'method': 'PUT'}],
- deprecated_rule=deprecated_update_project_tag,
- deprecated_reason=TAGS_DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_update_project_tag),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_project_tag',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN,
@@ -220,9 +226,7 @@ project_policies = [
description='Add a single tag to a project.',
operations=[{'path': '/v3/projects/{project_id}/tags/{value}',
'method': 'PUT'}],
- deprecated_rule=deprecated_create_project_tag,
- deprecated_reason=TAGS_DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_project_tag),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_project_tags',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN,
@@ -230,9 +234,7 @@ project_policies = [
description='Remove all tags from a project.',
operations=[{'path': '/v3/projects/{project_id}/tags',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_project_tags,
- deprecated_reason=TAGS_DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_project_tags),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_project_tag',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN,
@@ -240,9 +242,7 @@ project_policies = [
description='Delete a specified tag from project.',
operations=[{'path': '/v3/projects/{project_id}/tags/{value}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_project_tag,
- deprecated_reason=TAGS_DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_delete_project_tag)
]
diff --git a/keystone/common/policies/project_endpoint.py b/keystone/common/policies/project_endpoint.py
index c04cddd4d..86a020e02 100644
--- a/keystone/common/policies/project_endpoint.py
+++ b/keystone/common/policies/project_endpoint.py
@@ -15,39 +15,49 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = """
+As of the Train release, the project endpoint API now understands default
+roles and system-scoped tokens, making the API more granular by default without
+compromising security. The new policy defaults account for these changes
+automatically. Be sure to take these new defaults into consideration if you are
+relying on overrides in your deployment for the project endpoint API.
+"""
+
deprecated_list_projects_for_endpoint = policy.DeprecatedRule(
name=base.IDENTITY % 'list_projects_for_endpoint',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_add_endpoint_to_project = policy.DeprecatedRule(
name=base.IDENTITY % 'add_endpoint_to_project',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_check_endpoint_in_project = policy.DeprecatedRule(
name=base.IDENTITY % 'check_endpoint_in_project',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_endpoints_for_project = policy.DeprecatedRule(
name=base.IDENTITY % 'list_endpoints_for_project',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_remove_endpoint_from_project = policy.DeprecatedRule(
name=base.IDENTITY % 'remove_endpoint_from_project',
check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = """
-As of the Train release, the project endpoint API now understands default
-roles and system-scoped tokens, making the API more granular by default without
-compromising security. The new policy defaults account for these changes
-automatically. Be sure to take these new defaults into consideration if you are
-relying on overrides in your deployment for the project endpoint API.
-"""
-
project_endpoint_policies = [
@@ -63,9 +73,7 @@ project_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/endpoints/{endpoint_id}/'
'projects'),
'method': 'GET'}],
- deprecated_rule=deprecated_list_projects_for_endpoint,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_projects_for_endpoint),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'add_endpoint_to_project',
check_str=base.SYSTEM_ADMIN,
@@ -74,9 +82,7 @@ project_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/projects/{project_id}/'
'endpoints/{endpoint_id}'),
'method': 'PUT'}],
- deprecated_rule=deprecated_add_endpoint_to_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_add_endpoint_to_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'check_endpoint_in_project',
check_str=base.SYSTEM_READER,
@@ -88,9 +94,7 @@ project_endpoint_policies = [
{'path': ('/v3/OS-EP-FILTER/projects/{project_id}/'
'endpoints/{endpoint_id}'),
'method': 'HEAD'}],
- deprecated_rule=deprecated_check_endpoint_in_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_check_endpoint_in_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_endpoints_for_project',
check_str=base.SYSTEM_READER,
@@ -99,9 +103,7 @@ project_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/projects/{project_id}/'
'endpoints'),
'method': 'GET'}],
- deprecated_rule=deprecated_list_endpoints_for_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_endpoints_for_project),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'remove_endpoint_from_project',
check_str=base.SYSTEM_ADMIN,
@@ -111,9 +113,7 @@ project_endpoint_policies = [
operations=[{'path': ('/v3/OS-EP-FILTER/projects/{project_id}/'
'endpoints/{endpoint_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_remove_endpoint_from_project,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_remove_endpoint_from_project),
]
diff --git a/keystone/common/policies/protocol.py b/keystone/common/policies/protocol.py
index de2a7299e..887fc70df 100644
--- a/keystone/common/policies/protocol.py
+++ b/keystone/common/policies/protocol.py
@@ -15,31 +15,42 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The federated protocol API is now aware of system scope and default "
+ "roles."
+)
+
deprecated_get_protocol = policy.DeprecatedRule(
name=base.IDENTITY % 'get_protocol',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_protocols = policy.DeprecatedRule(
name=base.IDENTITY % 'list_protocols',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_protocol = policy.DeprecatedRule(
name=base.IDENTITY % 'update_protocol',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_protocol = policy.DeprecatedRule(
name=base.IDENTITY % 'create_protocol',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_protocol = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_protocol',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
-DEPRECATED_REASON = (
- "The federated protocol API is now aware of system scope and default "
- "roles."
-)
protocol_policies = [
policy.DocumentedRuleDefault(
@@ -53,9 +64,7 @@ protocol_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/identity_providers/{idp_id}/'
'protocols/{protocol_id}'),
'method': 'PUT'}],
- deprecated_rule=deprecated_create_protocol,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_protocol),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_protocol',
check_str=base.SYSTEM_ADMIN,
@@ -64,9 +73,7 @@ protocol_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/identity_providers/{idp_id}/'
'protocols/{protocol_id}'),
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_protocol,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_protocol),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_protocol',
check_str=base.SYSTEM_READER,
@@ -75,9 +82,7 @@ protocol_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/identity_providers/{idp_id}/'
'protocols/{protocol_id}'),
'method': 'GET'}],
- deprecated_rule=deprecated_get_protocol,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_protocol),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_protocols',
check_str=base.SYSTEM_READER,
@@ -86,9 +91,7 @@ protocol_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/identity_providers/{idp_id}/'
'protocols'),
'method': 'GET'}],
- deprecated_rule=deprecated_list_protocols,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_protocols),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_protocol',
check_str=base.SYSTEM_ADMIN,
@@ -97,9 +100,7 @@ protocol_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/identity_providers/{idp_id}/'
'protocols/{protocol_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_protocol,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN)
+ deprecated_rule=deprecated_delete_protocol)
]
diff --git a/keystone/common/policies/region.py b/keystone/common/policies/region.py
index bf60f8ff9..f13299dd2 100644
--- a/keystone/common/policies/region.py
+++ b/keystone/common/policies/region.py
@@ -15,22 +15,29 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The region API is now aware of system scope and default roles."
+)
+
deprecated_create_region = policy.DeprecatedRule(
name=base.IDENTITY % 'create_region',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_region = policy.DeprecatedRule(
name=base.IDENTITY % 'update_region',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_region = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_region',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
-DEPRECATED_REASON = (
- "The region API is now aware of system scope and default roles."
-)
region_policies = [
policy.DocumentedRuleDefault(
@@ -66,9 +73,7 @@ region_policies = [
'method': 'POST'},
{'path': '/v3/regions/{region_id}',
'method': 'PUT'}],
- deprecated_rule=deprecated_create_region,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_region),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_region',
check_str=base.SYSTEM_ADMIN,
@@ -76,9 +81,7 @@ region_policies = [
description='Update region.',
operations=[{'path': '/v3/regions/{region_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_region,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_region),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_region',
check_str=base.SYSTEM_ADMIN,
@@ -86,9 +89,7 @@ region_policies = [
description='Delete region.',
operations=[{'path': '/v3/regions/{region_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_region,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_delete_region),
]
diff --git a/keystone/common/policies/role.py b/keystone/common/policies/role.py
index 7d6a38e46..b372efbba 100644
--- a/keystone/common/policies/role.py
+++ b/keystone/common/policies/role.py
@@ -15,50 +15,71 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The role API is now aware of system scope and default roles."
+)
+
deprecated_get_role = policy.DeprecatedRule(
name=base.IDENTITY % 'get_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_role = policy.DeprecatedRule(
name=base.IDENTITY % 'list_roles',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_role = policy.DeprecatedRule(
name=base.IDENTITY % 'update_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_role = policy.DeprecatedRule(
name=base.IDENTITY % 'create_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_role = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_get_domain_role = policy.DeprecatedRule(
name=base.IDENTITY % 'get_domain_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_domain_roles = policy.DeprecatedRule(
name=base.IDENTITY % 'list_domain_roles',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_update_domain_role = policy.DeprecatedRule(
name=base.IDENTITY % 'update_domain_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_create_domain_role = policy.DeprecatedRule(
name=base.IDENTITY % 'create_domain_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_domain_role = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_domain_role',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The role API is now aware of system scope and default roles."
-)
role_policies = [
policy.DocumentedRuleDefault(
@@ -75,9 +96,7 @@ role_policies = [
'method': 'GET'},
{'path': '/v3/roles/{role_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_roles',
check_str=base.SYSTEM_READER,
@@ -87,9 +106,7 @@ role_policies = [
'method': 'GET'},
{'path': '/v3/roles',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_role',
check_str=base.SYSTEM_ADMIN,
@@ -97,9 +114,7 @@ role_policies = [
description='Create role.',
operations=[{'path': '/v3/roles',
'method': 'POST'}],
- deprecated_rule=deprecated_create_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_role',
check_str=base.SYSTEM_ADMIN,
@@ -107,9 +122,7 @@ role_policies = [
description='Update role.',
operations=[{'path': '/v3/roles/{role_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_role',
check_str=base.SYSTEM_ADMIN,
@@ -117,9 +130,7 @@ role_policies = [
description='Delete role.',
operations=[{'path': '/v3/roles/{role_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_delete_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_domain_role',
check_str=base.SYSTEM_READER,
@@ -134,9 +145,7 @@ role_policies = [
'method': 'GET'},
{'path': '/v3/roles/{role_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_domain_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_domain_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_domain_roles',
check_str=base.SYSTEM_READER,
@@ -146,9 +155,7 @@ role_policies = [
'method': 'GET'},
{'path': '/v3/roles?domain_id={domain_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_domain_roles,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_domain_roles),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_domain_role',
check_str=base.SYSTEM_ADMIN,
@@ -156,9 +163,7 @@ role_policies = [
scope_types=['system'],
operations=[{'path': '/v3/roles',
'method': 'POST'}],
- deprecated_rule=deprecated_create_domain_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_create_domain_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_domain_role',
check_str=base.SYSTEM_ADMIN,
@@ -166,9 +171,7 @@ role_policies = [
scope_types=['system'],
operations=[{'path': '/v3/roles/{role_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_domain_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_update_domain_role),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_domain_role',
check_str=base.SYSTEM_ADMIN,
@@ -176,9 +179,7 @@ role_policies = [
scope_types=['system'],
operations=[{'path': '/v3/roles/{role_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_domain_role,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_delete_domain_role)
]
diff --git a/keystone/common/policies/role_assignment.py b/keystone/common/policies/role_assignment.py
index c70f292f3..5dea3dc2f 100644
--- a/keystone/common/policies/role_assignment.py
+++ b/keystone/common/policies/role_assignment.py
@@ -25,18 +25,23 @@ SYSTEM_READER_OR_PROJECT_DOMAIN_READER_OR_PROJECT_ADMIN = (
'(role:admin and project_id:%(target.project.id)s)'
)
+DEPRECATED_REASON = (
+ "The assignment API is now aware of system scope and default roles."
+)
+
deprecated_list_role_assignments = policy.DeprecatedRule(
name=base.IDENTITY % 'list_role_assignments',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_role_assignments_for_tree = policy.DeprecatedRule(
name=base.IDENTITY % 'list_role_assignments_for_tree',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
-DEPRECATED_REASON = (
- "The assignment API is now aware of system scope and default roles."
-)
role_assignment_policies = [
policy.DocumentedRuleDefault(
@@ -48,9 +53,7 @@ role_assignment_policies = [
'method': 'GET'},
{'path': '/v3/role_assignments',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_role_assignments,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_role_assignments),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_role_assignments_for_tree',
check_str=SYSTEM_READER_OR_PROJECT_DOMAIN_READER_OR_PROJECT_ADMIN,
@@ -61,9 +64,7 @@ role_assignment_policies = [
'method': 'GET'},
{'path': '/v3/role_assignments?include_subtree',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_role_assignments_for_tree,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_role_assignments_for_tree),
]
diff --git a/keystone/common/policies/service.py b/keystone/common/policies/service.py
index 66d3aaa72..028707631 100644
--- a/keystone/common/policies/service.py
+++ b/keystone/common/policies/service.py
@@ -15,30 +15,41 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The service API is now aware of system scope and default roles."
+)
+
deprecated_get_service = policy.DeprecatedRule(
name=base.IDENTITY % 'get_service',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_service = policy.DeprecatedRule(
name=base.IDENTITY % 'list_services',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_service = policy.DeprecatedRule(
name=base.IDENTITY % 'update_service',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_service = policy.DeprecatedRule(
name=base.IDENTITY % 'create_service',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_service = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_service',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
-DEPRECATED_REASON = (
- "The service API is now aware of system scope and default roles."
-)
service_policies = [
policy.DocumentedRuleDefault(
@@ -48,9 +59,7 @@ service_policies = [
description='Show service details.',
operations=[{'path': '/v3/services/{service_id}',
'method': 'GET'}],
- deprecated_rule=deprecated_get_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_services',
check_str=base.SYSTEM_READER,
@@ -58,9 +67,7 @@ service_policies = [
description='List services.',
operations=[{'path': '/v3/services',
'method': 'GET'}],
- deprecated_rule=deprecated_list_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'create_service',
check_str=base.SYSTEM_ADMIN,
@@ -68,9 +75,7 @@ service_policies = [
description='Create service.',
operations=[{'path': '/v3/services',
'method': 'POST'}],
- deprecated_rule=deprecated_create_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_service',
check_str=base.SYSTEM_ADMIN,
@@ -78,9 +83,7 @@ service_policies = [
description='Update service.',
operations=[{'path': '/v3/services/{service_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_service),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_service',
check_str=base.SYSTEM_ADMIN,
@@ -88,9 +91,7 @@ service_policies = [
description='Delete service.',
operations=[{'path': '/v3/services/{service_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_service,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN)
+ deprecated_rule=deprecated_delete_service)
]
diff --git a/keystone/common/policies/service_provider.py b/keystone/common/policies/service_provider.py
index 4d0e3cb90..657368aea 100644
--- a/keystone/common/policies/service_provider.py
+++ b/keystone/common/policies/service_provider.py
@@ -15,30 +15,41 @@ from oslo_policy import policy
from keystone.common.policies import base
+DEPRECATED_REASON = (
+ "The service provider API is now aware of system scope and default roles."
+)
+
deprecated_get_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'get_service_provider',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'list_service_providers',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'update_service_provider',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'create_service_provider',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_sp = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_service_provider',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
-DEPRECATED_REASON = (
- "The service provider API is now aware of system scope and default roles."
-)
service_provider_policies = [
policy.DocumentedRuleDefault(
@@ -55,9 +66,7 @@ service_provider_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'PUT'}],
- deprecated_rule=deprecated_create_sp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_sp),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_service_providers',
check_str=base.SYSTEM_READER,
@@ -73,9 +82,7 @@ service_provider_policies = [
'method': 'HEAD'
}
],
- deprecated_rule=deprecated_list_sp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
+ deprecated_rule=deprecated_list_sp
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_service_provider',
@@ -94,9 +101,7 @@ service_provider_policies = [
'method': 'HEAD'
}
],
- deprecated_rule=deprecated_get_sp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN
+ deprecated_rule=deprecated_get_sp
),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_service_provider',
@@ -106,9 +111,7 @@ service_provider_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_sp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_sp),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_service_provider',
check_str=base.SYSTEM_ADMIN,
@@ -117,9 +120,7 @@ service_provider_policies = [
operations=[{'path': ('/v3/OS-FEDERATION/service_providers/'
'{service_provider_id}'),
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_sp,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN)
+ deprecated_rule=deprecated_delete_sp)
]
diff --git a/keystone/common/policies/token.py b/keystone/common/policies/token.py
index 9fa3c52f1..cb321b059 100644
--- a/keystone/common/policies/token.py
+++ b/keystone/common/policies/token.py
@@ -21,15 +21,21 @@ DEPRECATED_REASON = (
deprecated_check_token = policy.DeprecatedRule(
name=base.IDENTITY % 'check_token',
- check_str=base.RULE_ADMIN_OR_TOKEN_SUBJECT
+ check_str=base.RULE_ADMIN_OR_TOKEN_SUBJECT,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_validate_token = policy.DeprecatedRule(
name=base.IDENTITY % 'validate_token',
- check_str=base.RULE_SERVICE_ADMIN_OR_TOKEN_SUBJECT
+ check_str=base.RULE_SERVICE_ADMIN_OR_TOKEN_SUBJECT,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_revoke_token = policy.DeprecatedRule(
name=base.IDENTITY % 'revoke_token',
- check_str=base.RULE_ADMIN_OR_TOKEN_SUBJECT
+ check_str=base.RULE_ADMIN_OR_TOKEN_SUBJECT,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
SYSTEM_ADMIN_OR_TOKEN_SUBJECT = (
@@ -52,9 +58,7 @@ token_policies = [
description='Check a token.',
operations=[{'path': '/v3/auth/tokens',
'method': 'HEAD'}],
- deprecated_rule=deprecated_check_token,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_check_token),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'validate_token',
check_str=SYSTEM_USER_OR_SERVICE_OR_TOKEN_SUBJECT,
@@ -62,9 +66,7 @@ token_policies = [
description='Validate a token.',
operations=[{'path': '/v3/auth/tokens',
'method': 'GET'}],
- deprecated_rule=deprecated_validate_token,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_validate_token),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'revoke_token',
check_str=SYSTEM_ADMIN_OR_TOKEN_SUBJECT,
@@ -72,9 +74,7 @@ token_policies = [
description='Revoke a token.',
operations=[{'path': '/v3/auth/tokens',
'method': 'DELETE'}],
- deprecated_rule=deprecated_revoke_token,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_revoke_token)
]
diff --git a/keystone/common/policies/trust.py b/keystone/common/policies/trust.py
index 82acb0a93..7678106a8 100644
--- a/keystone/common/policies/trust.py
+++ b/keystone/common/policies/trust.py
@@ -24,29 +24,39 @@ SYSTEM_READER_OR_TRUSTOR = base.SYSTEM_READER + ' or ' + RULE_TRUSTOR
SYSTEM_READER_OR_TRUSTEE = base.SYSTEM_READER + ' or ' + RULE_TRUSTEE
SYSTEM_ADMIN_OR_TRUSTOR = base.SYSTEM_ADMIN + ' or ' + RULE_TRUSTOR
+DEPRECATED_REASON = (
+ "The trust API is now aware of system scope and default roles."
+)
+
deprecated_list_trusts = policy.DeprecatedRule(
name=base.IDENTITY % 'list_trusts',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_list_roles_for_trust = policy.DeprecatedRule(
name=base.IDENTITY % 'list_roles_for_trust',
- check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE
+ check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_role_for_trust = policy.DeprecatedRule(
name=base.IDENTITY % 'get_role_for_trust',
- check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE
+ check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_delete_trust = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_trust',
- check_str=RULE_TRUSTOR
+ check_str=RULE_TRUSTOR,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
deprecated_get_trust = policy.DeprecatedRule(
name=base.IDENTITY % 'get_trust',
- check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE
-)
-
-DEPRECATED_REASON = (
- "The trust API is now aware of system scope and default roles."
+ check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.TRAIN
)
trust_policies = [
@@ -69,9 +79,7 @@ trust_policies = [
'method': 'GET'},
{'path': '/v3/OS-TRUST/trusts',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_trusts,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_trusts),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_trusts_for_trustor',
check_str=SYSTEM_READER_OR_TRUSTOR,
@@ -103,9 +111,7 @@ trust_policies = [
'method': 'GET'},
{'path': '/v3/OS-TRUST/trusts/{trust_id}/roles',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_roles_for_trust,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_list_roles_for_trust),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_role_for_trust',
check_str=SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE,
@@ -115,9 +121,7 @@ trust_policies = [
'method': 'GET'},
{'path': '/v3/OS-TRUST/trusts/{trust_id}/roles/{role_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_role_for_trust,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_get_role_for_trust),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_trust',
check_str=SYSTEM_ADMIN_OR_TRUSTOR,
@@ -125,9 +129,7 @@ trust_policies = [
description='Revoke trust.',
operations=[{'path': '/v3/OS-TRUST/trusts/{trust_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_trust,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN),
+ deprecated_rule=deprecated_delete_trust),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'get_trust',
check_str=SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE,
@@ -137,9 +139,7 @@ trust_policies = [
'method': 'GET'},
{'path': '/v3/OS-TRUST/trusts/{trust_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_trust,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.TRAIN)
+ deprecated_rule=deprecated_get_trust)
]
diff --git a/keystone/common/policies/user.py b/keystone/common/policies/user.py
index 75a0062cf..0534f70f6 100644
--- a/keystone/common/policies/user.py
+++ b/keystone/common/policies/user.py
@@ -36,23 +36,33 @@ DEPRECATED_REASON = (
deprecated_get_user = policy.DeprecatedRule(
name=base.IDENTITY % 'get_user',
- check_str=base.RULE_ADMIN_OR_OWNER
+ check_str=base.RULE_ADMIN_OR_OWNER,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_list_users = policy.DeprecatedRule(
name=base.IDENTITY % 'list_users',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_create_user = policy.DeprecatedRule(
name=base.IDENTITY % 'create_user',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_update_user = policy.DeprecatedRule(
name=base.IDENTITY % 'update_user',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
deprecated_delete_user = policy.DeprecatedRule(
name=base.IDENTITY % 'delete_user',
- check_str=base.RULE_ADMIN_REQUIRED
+ check_str=base.RULE_ADMIN_REQUIRED,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since=versionutils.deprecated.STEIN
)
user_policies = [
@@ -65,9 +75,7 @@ user_policies = [
'method': 'GET'},
{'path': '/v3/users/{user_id}',
'method': 'HEAD'}],
- deprecated_rule=deprecated_get_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_get_user),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_users',
check_str=SYSTEM_READER_OR_DOMAIN_READER,
@@ -77,9 +85,7 @@ user_policies = [
'method': 'GET'},
{'path': '/v3/users',
'method': 'HEAD'}],
- deprecated_rule=deprecated_list_users,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_list_users),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'list_projects_for_user',
check_str='',
@@ -111,9 +117,7 @@ user_policies = [
description='Create a user.',
operations=[{'path': '/v3/users',
'method': 'POST'}],
- deprecated_rule=deprecated_create_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_create_user),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'update_user',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -121,9 +125,7 @@ user_policies = [
description='Update a user, including administrative password resets.',
operations=[{'path': '/v3/users/{user_id}',
'method': 'PATCH'}],
- deprecated_rule=deprecated_update_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN),
+ deprecated_rule=deprecated_update_user),
policy.DocumentedRuleDefault(
name=base.IDENTITY % 'delete_user',
check_str=SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
@@ -131,9 +133,7 @@ user_policies = [
description='Delete a user.',
operations=[{'path': '/v3/users/{user_id}',
'method': 'DELETE'}],
- deprecated_rule=deprecated_delete_user,
- deprecated_reason=DEPRECATED_REASON,
- deprecated_since=versionutils.deprecated.STEIN)
+ deprecated_rule=deprecated_delete_user)
]
diff --git a/keystone/common/sql/alembic.ini b/keystone/common/sql/alembic.ini
new file mode 100644
index 000000000..6818c4db7
--- /dev/null
+++ b/keystone/common/sql/alembic.ini
@@ -0,0 +1,100 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = %(here)s/migrations
+
+# template used to generate migration files
+# file_template = %%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python-dateutil library that can be
+# installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to dateutil.tz.gettz()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the
+# "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to keystone/common/sql/migrations/versions. When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator"
+# version_locations = %(here)s/bar:%(here)s/bat:keystone/common/sql/migrations/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. Valid values are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+# version_path_separator = os
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = sqlite:///keystone.db
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts. See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/keystone/common/sql/contract_repo/README b/keystone/common/sql/contract_repo/README
deleted file mode 100644
index 131117104..000000000
--- a/keystone/common/sql/contract_repo/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-https://opendev.org/openstack/sqlalchemy-migrate
diff --git a/keystone/common/sql/contract_repo/versions/002_password_created_at_not_nullable.py b/keystone/common/sql/contract_repo/versions/002_password_created_at_not_nullable.py
deleted file mode 100644
index da2981e0d..000000000
--- a/keystone/common/sql/contract_repo/versions/002_password_created_at_not_nullable.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- password = sql.Table('password', meta, autoload=True)
- # Because it's difficult to get a timestamp server default working among
- # all of the supported databases and versions, I'm choosing to drop and
- # then recreate the column as I think this is a more cleaner option. This
- # will only impact operators that have already deployed the 105 migration;
- # resetting the password created_at for security compliance features, if
- # enabled.
- password.c.created_at.drop()
- # sqlite doesn't support server_default=sql.func.now(), so skipping.
- if migrate_engine.name == 'sqlite':
- created_at = sql.Column('created_at', sql.TIMESTAMP, nullable=True)
- else:
- # Changing type to timestamp as mysql 5.5 and older doesn't support
- # datetime defaults.
- created_at = sql.Column('created_at', sql.TIMESTAMP, nullable=False,
- default=datetime.datetime.utcnow,
- server_default=sql.func.now())
- password.create_column(created_at)
diff --git a/keystone/common/sql/contract_repo/versions/003_remove_unencrypted_blob_column_from_credential.py b/keystone/common/sql/contract_repo/versions/003_remove_unencrypted_blob_column_from_credential.py
deleted file mode 100644
index 3c169e644..000000000
--- a/keystone/common/sql/contract_repo/versions/003_remove_unencrypted_blob_column_from_credential.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.common.sql import upgrades
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- credential_table = sql.Table('credential', meta, autoload=True)
- credential_table.c.blob.drop()
-
- if upgrades.USE_TRIGGERS:
- if migrate_engine.name == 'postgresql':
- drop_credential_update_trigger = (
- 'DROP TRIGGER credential_update_read_only on credential;'
- )
- drop_credential_insert_trigger = (
- 'DROP TRIGGER credential_insert_read_only on credential;'
- )
- elif migrate_engine.name == 'mysql':
- drop_credential_update_trigger = (
- 'DROP TRIGGER credential_update_read_only;'
- )
- drop_credential_insert_trigger = (
- 'DROP TRIGGER credential_insert_read_only;'
- )
- else:
- # NOTE(lbragstad, henry-nash): Apparently sqlalchemy and sqlite
- # behave weird when using triggers, which is why we use the `IF
- # EXISTS` conditional here. I think what is happening is that the
- # credential_table.c.blob.drop() causes sqlalchemy to create a new
- # credential table - but it doesn't copy the triggers over, which
- # causes the DROP TRIGGER statement to fail without `IF EXISTS`
- # because the trigger doesn't exist in the new table(?!).
- drop_credential_update_trigger = (
- 'DROP TRIGGER IF EXISTS credential_update_read_only;'
- )
- drop_credential_insert_trigger = (
- 'DROP TRIGGER IF EXISTS credential_insert_read_only;'
- )
- migrate_engine.execute(drop_credential_update_trigger)
- migrate_engine.execute(drop_credential_insert_trigger)
-
- # NOTE(lbragstad): We close these so that they are not nullable because
- # Newton code (and anything after) would always populate these values.
- credential_table.c.encrypted_blob.alter(nullable=False)
- credential_table.c.key_hash.alter(nullable=False)
diff --git a/keystone/common/sql/contract_repo/versions/004_reset_password_created_at.py b/keystone/common/sql/contract_repo/versions/004_reset_password_created_at.py
deleted file mode 100644
index f453d135d..000000000
--- a/keystone/common/sql/contract_repo/versions/004_reset_password_created_at.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import sqlalchemy as sql
-import sqlalchemy.sql.expression as expression
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- password = sql.Table('password', meta, autoload=True)
- # reset created_at column
- password.c.created_at.drop()
- created_at = sql.Column('created_at', sql.DateTime(),
- nullable=True,
- default=datetime.datetime.utcnow)
- password.create_column(created_at)
- # update created_at value
- now = datetime.datetime.utcnow()
- values = {'created_at': now}
- stmt = password.update().where(
- password.c.created_at == expression.null()).values(values)
- stmt.execute()
- # set not nullable
- password.c.created_at.alter(nullable=False)
diff --git a/keystone/common/sql/contract_repo/versions/005_placeholder.py b/keystone/common/sql/contract_repo/versions/005_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/contract_repo/versions/005_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/006_placeholder.py b/keystone/common/sql/contract_repo/versions/006_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/contract_repo/versions/006_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/007_placeholder.py b/keystone/common/sql/contract_repo/versions/007_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/contract_repo/versions/007_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/008_placeholder.py b/keystone/common/sql/contract_repo/versions/008_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/contract_repo/versions/008_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/009_placeholder.py b/keystone/common/sql/contract_repo/versions/009_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/contract_repo/versions/009_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/010_contract_add_revocation_event_index.py b/keystone/common/sql/contract_repo/versions/010_contract_add_revocation_event_index.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/010_contract_add_revocation_event_index.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/011_contract_user_id_unique_for_nonlocal_user.py b/keystone/common/sql/contract_repo/versions/011_contract_user_id_unique_for_nonlocal_user.py
deleted file mode 100644
index 5c397c575..000000000
--- a/keystone/common/sql/contract_repo/versions/011_contract_user_id_unique_for_nonlocal_user.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- nonlocal_user = sql.Table('nonlocal_user', meta, autoload=True)
- migrate.UniqueConstraint(nonlocal_user.c.user_id,
- name='ixu_nonlocal_user_user_id').create()
diff --git a/keystone/common/sql/contract_repo/versions/012_contract_add_domain_id_to_idp.py b/keystone/common/sql/contract_repo/versions/012_contract_add_domain_id_to_idp.py
deleted file mode 100644
index b919f93dc..000000000
--- a/keystone/common/sql/contract_repo/versions/012_contract_add_domain_id_to_idp.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- idp_table = sql.Table('identity_provider', meta, autoload=True)
- idp_table.c.domain_id.alter(nullable=False, unique=True)
-
- if upgrades.USE_TRIGGERS:
- if migrate_engine.name == 'postgresql':
- drop_idp_insert_trigger = (
- 'DROP TRIGGER idp_insert_read_only on identity_provider;'
- )
- elif migrate_engine.name == 'mysql':
- drop_idp_insert_trigger = (
- 'DROP TRIGGER idp_insert_read_only;'
- )
- else:
- drop_idp_insert_trigger = (
- 'DROP TRIGGER IF EXISTS idp_insert_read_only;'
- )
- migrate_engine.execute(drop_idp_insert_trigger)
diff --git a/keystone/common/sql/contract_repo/versions/013_contract_protocol_cascade_delete_for_federated_user.py b/keystone/common/sql/contract_repo/versions/013_contract_protocol_cascade_delete_for_federated_user.py
deleted file mode 100644
index 9c6c9bc2d..000000000
--- a/keystone/common/sql/contract_repo/versions/013_contract_protocol_cascade_delete_for_federated_user.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- federated_table = sql.Table('federated_user', meta, autoload=True)
- protocol_table = sql.Table('federation_protocol', meta, autoload=True)
-
- migrate.ForeignKeyConstraint(
- columns=[federated_table.c.protocol_id, federated_table.c.idp_id],
- refcolumns=[protocol_table.c.id, protocol_table.c.idp_id]).drop()
-
- migrate.ForeignKeyConstraint(
- columns=[federated_table.c.protocol_id, federated_table.c.idp_id],
- refcolumns=[protocol_table.c.id, protocol_table.c.idp_id],
- ondelete='CASCADE').create()
diff --git a/keystone/common/sql/contract_repo/versions/014_contract_add_domain_id_to_user_table.py b/keystone/common/sql/contract_repo/versions/014_contract_add_domain_id_to_user_table.py
deleted file mode 100644
index 86eaeae3f..000000000
--- a/keystone/common/sql/contract_repo/versions/014_contract_add_domain_id_to_user_table.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- inspector = sql.inspect(migrate_engine)
-
- user = sql.Table('user', meta, autoload=True)
- local_user = sql.Table('local_user', meta, autoload=True)
- nonlocal_user = sql.Table('nonlocal_user', meta, autoload=True)
-
- # drop previous fk constraints
- fk_name = _get_fk_name(inspector, 'local_user', 'user_id')
- if fk_name:
- migrate.ForeignKeyConstraint(columns=[local_user.c.user_id],
- refcolumns=[user.c.id],
- name=fk_name).drop()
-
- fk_name = _get_fk_name(inspector, 'nonlocal_user', 'user_id')
- if fk_name:
- migrate.ForeignKeyConstraint(columns=[nonlocal_user.c.user_id],
- refcolumns=[user.c.id],
- name=fk_name).drop()
-
- # create user unique constraint needed for the new composite fk constraint
- migrate.UniqueConstraint(user.c.id, user.c.domain_id,
- name='ixu_user_id_domain_id').create()
- # create new composite fk constraints
- migrate.ForeignKeyConstraint(
- columns=[local_user.c.user_id, local_user.c.domain_id],
- refcolumns=[user.c.id, user.c.domain_id],
- onupdate='CASCADE', ondelete='CASCADE').create()
- migrate.ForeignKeyConstraint(
- columns=[nonlocal_user.c.user_id, nonlocal_user.c.domain_id],
- refcolumns=[user.c.id, user.c.domain_id],
- onupdate='CASCADE', ondelete='CASCADE').create()
-
- # drop triggers
- if upgrades.USE_TRIGGERS:
- if migrate_engine.name == 'postgresql':
- drop_local_user_insert_trigger = (
- 'DROP TRIGGER local_user_after_insert_trigger on local_user;')
- drop_local_user_update_trigger = (
- 'DROP TRIGGER local_user_after_update_trigger on local_user;')
- drop_nonlocal_user_insert_trigger = (
- 'DROP TRIGGER nonlocal_user_after_insert_trigger '
- 'on nonlocal_user;')
- drop_nonlocal_user_update_trigger = (
- 'DROP TRIGGER nonlocal_user_after_update_trigger '
- 'on nonlocal_user;')
- elif migrate_engine.name == 'mysql':
- drop_local_user_insert_trigger = (
- 'DROP TRIGGER local_user_after_insert_trigger;')
- drop_local_user_update_trigger = (
- 'DROP TRIGGER local_user_after_update_trigger;')
- drop_nonlocal_user_insert_trigger = (
- 'DROP TRIGGER nonlocal_user_after_insert_trigger;')
- drop_nonlocal_user_update_trigger = (
- 'DROP TRIGGER nonlocal_user_after_update_trigger;')
- else:
- drop_local_user_insert_trigger = (
- 'DROP TRIGGER IF EXISTS local_user_after_insert_trigger;')
- drop_local_user_update_trigger = (
- 'DROP TRIGGER IF EXISTS local_user_after_update_trigger;')
- drop_nonlocal_user_insert_trigger = (
- 'DROP TRIGGER IF EXISTS nonlocal_user_after_insert_trigger;')
- drop_nonlocal_user_update_trigger = (
- 'DROP TRIGGER IF EXISTS nonlocal_user_after_update_trigger;')
- migrate_engine.execute(drop_local_user_insert_trigger)
- migrate_engine.execute(drop_local_user_update_trigger)
- migrate_engine.execute(drop_nonlocal_user_insert_trigger)
- migrate_engine.execute(drop_nonlocal_user_update_trigger)
-
-
-def _get_fk_name(inspector, table, fk_column):
- for fk in inspector.get_foreign_keys(table):
- if fk_column in fk['constrained_columns']:
- return fk['name']
diff --git a/keystone/common/sql/contract_repo/versions/015_contract_update_federated_user_domain.py b/keystone/common/sql/contract_repo/versions/015_contract_update_federated_user_domain.py
deleted file mode 100644
index 8dd77c60a..000000000
--- a/keystone/common/sql/contract_repo/versions/015_contract_update_federated_user_domain.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
- user_table.c.domain_id.alter(nullable=False)
-
- if upgrades.USE_TRIGGERS:
- if migrate_engine.name == 'postgresql':
- drop_trigger_stmt = 'DROP TRIGGER federated_user_insert_trigger '
- drop_trigger_stmt += 'on federated_user;'
- elif migrate_engine.name == 'mysql':
- drop_trigger_stmt = 'DROP TRIGGER federated_user_insert_trigger;'
- else:
- drop_trigger_stmt = (
- 'DROP TRIGGER IF EXISTS federated_user_insert_trigger;')
- migrate_engine.execute(drop_trigger_stmt)
diff --git a/keystone/common/sql/contract_repo/versions/016_contract_add_user_options.py b/keystone/common/sql/contract_repo/versions/016_contract_add_user_options.py
deleted file mode 100644
index 9b6593fe6..000000000
--- a/keystone/common/sql/contract_repo/versions/016_contract_add_user_options.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- # NOTE(notmorgan): This is a no-op, no data-migration needed.
- pass
diff --git a/keystone/common/sql/contract_repo/versions/017_placeholder.py b/keystone/common/sql/contract_repo/versions/017_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/contract_repo/versions/017_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/018_placeholder.py b/keystone/common/sql/contract_repo/versions/018_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/contract_repo/versions/018_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/019_placeholder.py b/keystone/common/sql/contract_repo/versions/019_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/contract_repo/versions/019_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/020_placeholder.py b/keystone/common/sql/contract_repo/versions/020_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/contract_repo/versions/020_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/021_placeholder.py b/keystone/common/sql/contract_repo/versions/021_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/contract_repo/versions/021_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/022_contract_add_default_project_id_index.py b/keystone/common/sql/contract_repo/versions/022_contract_add_default_project_id_index.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/022_contract_add_default_project_id_index.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/023_contract_add_second_password_column_for_expanded_hash_sizes.py b/keystone/common/sql/contract_repo/versions/023_contract_add_second_password_column_for_expanded_hash_sizes.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/023_contract_add_second_password_column_for_expanded_hash_sizes.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/024_contract_create_created_at_int_columns.py b/keystone/common/sql/contract_repo/versions/024_contract_create_created_at_int_columns.py
deleted file mode 100644
index 986e19d0c..000000000
--- a/keystone/common/sql/contract_repo/versions/024_contract_create_created_at_int_columns.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import pytz
-import sqlalchemy as sql
-from sqlalchemy.orm import sessionmaker
-
-
-_epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
-
-
-def _convert_value_datetime_to_int(dt):
- dt = dt.replace(tzinfo=pytz.utc)
- return int((dt - _epoch).total_seconds() * 1000000)
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- maker = sessionmaker(bind=migrate_engine)
- session = maker()
-
- password_table = sql.Table('password', meta, autoload=True)
- passwords = list(password_table.select().execute())
-
- for passwd in passwords:
- values = {
- 'created_at_int': _convert_value_datetime_to_int(passwd.created_at)
- }
-
- if passwd.expires_at is not None:
- values['expires_at_int'] = _convert_value_datetime_to_int(
- passwd.expires_at)
-
- update = password_table.update().where(
- password_table.c.id == passwd.id).values(values)
- session.execute(update)
- session.commit()
-
- password_table = sql.Table('password', meta, autoload=True)
- # The created_at_int data cannot really be nullable long term. This
- # corrects the data to be not nullable, but must be done in the contract
- # phase for two reasons. The first is due to "additive only" requirements.
- # The second is because we need to ensure all nodes in the deployment are
- # running the Pike code-base before we migrate all password entries. This
- # avoids locking the password table or having a partial outage while doing
- # the migration.
- password_table.c.created_at_int.alter(nullable=False, default=0,
- server_default='0')
- session.close()
diff --git a/keystone/common/sql/contract_repo/versions/025_placeholder.py b/keystone/common/sql/contract_repo/versions/025_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/contract_repo/versions/025_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/026_placeholder.py b/keystone/common/sql/contract_repo/versions/026_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/contract_repo/versions/026_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/027_placeholder.py b/keystone/common/sql/contract_repo/versions/027_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/contract_repo/versions/027_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/028_placeholder.py b/keystone/common/sql/contract_repo/versions/028_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/contract_repo/versions/028_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/029_placeholder.py b/keystone/common/sql/contract_repo/versions/029_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/contract_repo/versions/029_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/030_contract_add_project_tags_table.py b/keystone/common/sql/contract_repo/versions/030_contract_add_project_tags_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/030_contract_add_project_tags_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/031_contract_system_assignment_table.py b/keystone/common/sql/contract_repo/versions/031_contract_system_assignment_table.py
deleted file mode 100644
index 18a28170c..000000000
--- a/keystone/common/sql/contract_repo/versions/031_contract_system_assignment_table.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- # NOTE(lbragstad): System assignments only require additive changes.
- pass
diff --git a/keystone/common/sql/contract_repo/versions/032_contract_add_expired_at_int_to_trust.py b/keystone/common/sql/contract_repo/versions/032_contract_add_expired_at_int_to_trust.py
deleted file mode 100644
index 5839b8caa..000000000
--- a/keystone/common/sql/contract_repo/versions/032_contract_add_expired_at_int_to_trust.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-from migrate import UniqueConstraint
-import pytz
-import sqlalchemy as sql
-from sqlalchemy.orm import sessionmaker
-
-
-_epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
-
-
-def _convert_value_datetime_to_int(dt):
- dt = dt.replace(tzinfo=pytz.utc)
- return int((dt - _epoch).total_seconds() * 1000000)
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- maker = sessionmaker(bind=migrate_engine)
- session = maker()
-
- trust_table = sql.Table('trust', meta, autoload=True)
- trusts = list(trust_table.select().execute())
-
- for trust in trusts:
- values = {}
- if trust.expires_at is not None:
- values['expires_at_int'] = _convert_value_datetime_to_int(
- trust.expires_at)
-
- update = trust_table.update().where(
- trust_table.c.id == trust.id).values(values)
- session.execute(update)
- session.commit()
-
- UniqueConstraint(table=trust_table,
- name='duplicate_trust_constraint').drop()
- session.close()
diff --git a/keystone/common/sql/contract_repo/versions/033_contract_add_limits_tables.py b/keystone/common/sql/contract_repo/versions/033_contract_add_limits_tables.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/033_contract_add_limits_tables.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/034_contract_add_application_credentials_table.py b/keystone/common/sql/contract_repo/versions/034_contract_add_application_credentials_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/034_contract_add_application_credentials_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/035_contract_add_system_column_to_application_credential_table.py b/keystone/common/sql/contract_repo/versions/035_contract_add_system_column_to_application_credential_table.py
deleted file mode 100644
index 192391a54..000000000
--- a/keystone/common/sql/contract_repo/versions/035_contract_add_system_column_to_application_credential_table.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- application_credential_table = sql.Table(
- 'application_credential', meta, autoload=True
- )
- application_credential_table.c.project_id.alter(nullable=True)
diff --git a/keystone/common/sql/contract_repo/versions/036_contract_rename_application_credential_restriction_column.py b/keystone/common/sql/contract_repo/versions/036_contract_rename_application_credential_restriction_column.py
deleted file mode 100644
index f8ef7e1a7..000000000
--- a/keystone/common/sql/contract_repo/versions/036_contract_rename_application_credential_restriction_column.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- application_credential_table = sql.Table(
- 'application_credential', meta, autoload=True
- )
- if migrate_engine.name == 'sqlite':
- old_table = sql.Table('application_credential', meta, autoload=True)
- new_table = sql.Table('application_credential_temp', meta,
- autoload=True)
- old_table.drop()
- new_table.rename('application_credential')
- else:
- table = application_credential_table
- # NOTE(cmurphy) because of lb#1744948, some deployments could already
- # have made it past the expand step and be stuck on the contract step.
- # If necessary, do the expand step here.
- # At this point this API is not yet exposed and there should be no data
- # in this table.
- if 'unrestricted' not in table.columns:
- unrestricted = sql.Column('unrestricted', sql.Boolean())
- table.create_column(unrestricted)
- column = table.c.allow_application_credential_creation
- column.drop()
diff --git a/keystone/common/sql/contract_repo/versions/037_contract_remove_service_and_region_fk_for_registered_limit.py b/keystone/common/sql/contract_repo/versions/037_contract_remove_service_and_region_fk_for_registered_limit.py
deleted file mode 100644
index 72a3f315f..000000000
--- a/keystone/common/sql/contract_repo/versions/037_contract_remove_service_and_region_fk_for_registered_limit.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- registered_limit_table = sql.Table('registered_limit', meta, autoload=True)
- service_table = sql.Table('service', meta, autoload=True)
- region_table = sql.Table('region', meta, autoload=True)
-
- inspector = sql.inspect(migrate_engine)
- for fk in inspector.get_foreign_keys('registered_limit'):
- if fk['referred_table'] == 'service':
- fkey = ForeignKeyConstraint([registered_limit_table.c.service_id],
- [service_table.c.id],
- name=fk['name'])
- fkey.drop()
- else:
- fkey = ForeignKeyConstraint([registered_limit_table.c.region_id],
- [region_table.c.id],
- name=fk['name'])
- fkey.drop()
diff --git a/keystone/common/sql/contract_repo/versions/038_placeholder.py b/keystone/common/sql/contract_repo/versions/038_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/contract_repo/versions/038_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/039_placeholder.py b/keystone/common/sql/contract_repo/versions/039_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/contract_repo/versions/039_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/040_placeholder.py b/keystone/common/sql/contract_repo/versions/040_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/contract_repo/versions/040_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/041_placeholder.py b/keystone/common/sql/contract_repo/versions/041_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/contract_repo/versions/041_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/042_placeholder.py b/keystone/common/sql/contract_repo/versions/042_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/contract_repo/versions/042_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/043_placeholder.py b/keystone/common/sql/contract_repo/versions/043_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/contract_repo/versions/043_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/044_placeholder.py b/keystone/common/sql/contract_repo/versions/044_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/contract_repo/versions/044_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/045_contract_add_description_to_limit.py b/keystone/common/sql/contract_repo/versions/045_contract_add_description_to_limit.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/contract_repo/versions/045_contract_add_description_to_limit.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/046_contract_old_password_data_to_password_hash_column.py b/keystone/common/sql/contract_repo/versions/046_contract_old_password_data_to_password_hash_column.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/046_contract_old_password_data_to_password_hash_column.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/047_contract_expand_update_pk_for_unified_limit.py b/keystone/common/sql/contract_repo/versions/047_contract_expand_update_pk_for_unified_limit.py
deleted file mode 100644
index d750bde53..000000000
--- a/keystone/common/sql/contract_repo/versions/047_contract_expand_update_pk_for_unified_limit.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # For Mysql and PostgreSQL, drop the FK in limit table, drop the unique
- # constraint in registered limit and limit tables.
- #
- # For SQLite, drop the old tables, then rename the new tables.
- limit_table = sql.Table('limit', meta, autoload=True)
- registered_limit_table = sql.Table('registered_limit', meta, autoload=True)
-
- if migrate_engine.name != 'sqlite':
- project_table = sql.Table('project', meta, autoload=True)
- inspector = sql.inspect(migrate_engine)
- for fk in inspector.get_foreign_keys('limit'):
- fkey = migrate.ForeignKeyConstraint(
- [limit_table.c.project_id],
- [project_table.c.id],
- name=fk['name'])
- fkey.drop()
- for uc in inspector.get_unique_constraints('limit'):
- if set(uc['column_names']) == set(['project_id', 'service_id',
- 'region_id', 'resource_name']):
- uc = migrate.UniqueConstraint(limit_table.c.project_id,
- limit_table.c.service_id,
- limit_table.c.region_id,
- limit_table.c.resource_name,
- name=uc['name'])
- uc.drop()
- for uc in inspector.get_unique_constraints('registered_limit'):
- if set(uc['column_names']) == set(['service_id', 'region_id',
- 'resource_name']):
- uc = migrate.UniqueConstraint(
- registered_limit_table.c.service_id,
- registered_limit_table.c.region_id,
- registered_limit_table.c.resource_name,
- name=uc['name'])
- uc.drop()
-
- else:
- registered_limit_table_new = sql.Table('registered_limit_new', meta,
- autoload=True)
- limit_table_new = sql.Table('limit_new', meta, autoload=True)
-
- limit_table.drop()
- limit_table_new.rename('limit')
- registered_limit_table.drop()
- registered_limit_table_new.rename('registered_limit')
diff --git a/keystone/common/sql/contract_repo/versions/048_contract_add_registered_limit_id_column_for_limit.py b/keystone/common/sql/contract_repo/versions/048_contract_add_registered_limit_id_column_for_limit.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/048_contract_add_registered_limit_id_column_for_limit.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/049_placeholder.py b/keystone/common/sql/contract_repo/versions/049_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/contract_repo/versions/049_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/050_placeholder.py b/keystone/common/sql/contract_repo/versions/050_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/contract_repo/versions/050_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/051_placeholder.py b/keystone/common/sql/contract_repo/versions/051_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/contract_repo/versions/051_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/052_placeholder.py b/keystone/common/sql/contract_repo/versions/052_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/contract_repo/versions/052_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/053_contract_add_role_description_to_role_table.py b/keystone/common/sql/contract_repo/versions/053_contract_add_role_description_to_role_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/053_contract_add_role_description_to_role_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/055_contract_add_domain_to_limit.py b/keystone/common/sql/contract_repo/versions/055_contract_add_domain_to_limit.py
deleted file mode 100644
index 36d641768..000000000
--- a/keystone/common/sql/contract_repo/versions/055_contract_add_domain_to_limit.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- limit_table = sql.Table('limit', meta, autoload=True)
- limit_table.c.project_id.alter(nullable=True)
diff --git a/keystone/common/sql/contract_repo/versions/056_contract_add_application_credential_access_rules.py b/keystone/common/sql/contract_repo/versions/056_contract_add_application_credential_access_rules.py
deleted file mode 100644
index 8066b50bb..000000000
--- a/keystone/common/sql/contract_repo/versions/056_contract_add_application_credential_access_rules.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2019 SUSE Linux GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/057_placeholder.py b/keystone/common/sql/contract_repo/versions/057_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/contract_repo/versions/057_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/058_placeholder.py b/keystone/common/sql/contract_repo/versions/058_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/contract_repo/versions/058_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/059_placeholder.py b/keystone/common/sql/contract_repo/versions/059_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/contract_repo/versions/059_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/060_placeholder.py b/keystone/common/sql/contract_repo/versions/060_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/contract_repo/versions/060_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/061_placeholder.py b/keystone/common/sql/contract_repo/versions/061_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/contract_repo/versions/061_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/062_contract_extract_redelegation_data_from_extras.py b/keystone/common/sql/contract_repo/versions/062_contract_extract_redelegation_data_from_extras.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/contract_repo/versions/062_contract_extract_redelegation_data_from_extras.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/063_contract_drop_limit_columns.py b/keystone/common/sql/contract_repo/versions/063_contract_drop_limit_columns.py
deleted file mode 100644
index 8858ba916..000000000
--- a/keystone/common/sql/contract_repo/versions/063_contract_drop_limit_columns.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- limit_table = sql.Table('limit', meta, autoload=True)
- limit_table.c.service_id.drop()
- limit_table.c.region_id.drop()
- limit_table.c.resource_name.drop()
diff --git a/keystone/common/sql/contract_repo/versions/064_contract_add_remote_id_attribute_to_federation_protocol_table.py b/keystone/common/sql/contract_repo/versions/064_contract_add_remote_id_attribute_to_federation_protocol_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/064_contract_add_remote_id_attribute_to_federation_protocol_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/065_contract_add_user_external_id_to_access_rule.py b/keystone/common/sql/contract_repo/versions/065_contract_add_user_external_id_to_access_rule.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/065_contract_add_user_external_id_to_access_rule.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/066_contract_add_resource_options_table.py b/keystone/common/sql/contract_repo/versions/066_contract_add_resource_options_table.py
deleted file mode 100644
index d1f20e252..000000000
--- a/keystone/common/sql/contract_repo/versions/066_contract_add_resource_options_table.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# NOTE(morgan): there is nothing to do here, no contract action to take
-# at this time
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/067_placeholder.py b/keystone/common/sql/contract_repo/versions/067_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/contract_repo/versions/067_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/068_placeholder.py b/keystone/common/sql/contract_repo/versions/068_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/contract_repo/versions/068_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/069_placeholder.py b/keystone/common/sql/contract_repo/versions/069_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/contract_repo/versions/069_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/070_placeholder.py b/keystone/common/sql/contract_repo/versions/070_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/contract_repo/versions/070_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/071_placeholder.py b/keystone/common/sql/contract_repo/versions/071_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/contract_repo/versions/071_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/contract_repo/versions/072_contract_drop_domain_id_fk.py b/keystone/common/sql/contract_repo/versions/072_contract_drop_domain_id_fk.py
deleted file mode 100644
index 7e00c1e9f..000000000
--- a/keystone/common/sql/contract_repo/versions/072_contract_drop_domain_id_fk.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2019 SUSE LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- user = sql.Table('user', meta, autoload=True)
- project = sql.Table('project', meta, autoload=True)
-
- fk_name = [
- c for c in user.constraints
- if isinstance(c, sql.ForeignKeyConstraint)
- and c.column_keys == ['domain_id']
- ][0].name
- fk_constraint = migrate.ForeignKeyConstraint(
- columns=[user.c.domain_id], refcolumns=[project.c.id])
- fk_constraint.name = fk_name
- fk_constraint.drop()
-
- identity_provider = sql.Table('identity_provider', meta, autoload=True)
- fk_name = [
- c for c in identity_provider.constraints
- if isinstance(c, sql.ForeignKeyConstraint)
- and c.column_keys == ['domain_id']
- ][0].name
- fk_constraint = migrate.ForeignKeyConstraint(
- columns=[identity_provider.c.domain_id], refcolumns=[project.c.id])
- fk_constraint.name = fk_name
- fk_constraint.drop()
diff --git a/keystone/common/sql/contract_repo/versions/073_contract_expiring_group_membership.py b/keystone/common/sql/contract_repo/versions/073_contract_expiring_group_membership.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/contract_repo/versions/073_contract_expiring_group_membership.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/core.py b/keystone/common/sql/core.py
index ed84e5893..7670c47aa 100644
--- a/keystone/common/sql/core.py
+++ b/keystone/common/sql/core.py
@@ -119,6 +119,11 @@ ModelBase.__init__ = initialize_decorator(ModelBase.__init__)
class JsonBlob(sql_types.TypeDecorator):
impl = sql.Text
+ # NOTE(ralonsoh): set to True as any other TypeDecorator in SQLAlchemy
+ # https://docs.sqlalchemy.org/en/14/core/custom_types.html# \
+ # sqlalchemy.types.TypeDecorator.cache_ok
+ cache_ok = True
+ """This type is safe to cache."""
def process_bind_param(self, value, dialect):
return jsonutils.dumps(value)
@@ -144,6 +149,11 @@ class DateTimeInt(sql_types.TypeDecorator):
impl = sql.BigInteger
epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
+ # NOTE(ralonsoh): set to True as any other TypeDecorator in SQLAlchemy
+ # https://docs.sqlalchemy.org/en/14/core/custom_types.html# \
+ # sqlalchemy.types.TypeDecorator.cache_ok
+ cache_ok = True
+ """This type is safe to cache."""
def process_bind_param(self, value, dialect):
if value is None:
diff --git a/keystone/common/sql/data_migration_repo/README b/keystone/common/sql/data_migration_repo/README
deleted file mode 100644
index 131117104..000000000
--- a/keystone/common/sql/data_migration_repo/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-https://opendev.org/openstack/sqlalchemy-migrate
diff --git a/keystone/common/sql/data_migration_repo/versions/002_password_created_at_not_nullable.py b/keystone/common/sql/data_migration_repo/versions/002_password_created_at_not_nullable.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/data_migration_repo/versions/002_password_created_at_not_nullable.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/003_migrate_unencrypted_credentials.py b/keystone/common/sql/data_migration_repo/versions/003_migrate_unencrypted_credentials.py
deleted file mode 100644
index 7f51b75e1..000000000
--- a/keystone/common/sql/data_migration_repo/versions/003_migrate_unencrypted_credentials.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.credential.providers import fernet as credential_fernet
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- session = sql.orm.sessionmaker(bind=migrate_engine)()
-
- credential_table = sql.Table('credential', meta, autoload=True)
- credentials = list(credential_table.select().execute())
-
- for credential in credentials:
- crypto, keys = credential_fernet.get_multi_fernet_keys()
- primary_key_hash = credential_fernet.primary_key_hash(keys)
- encrypted_blob = crypto.encrypt(credential['blob'].encode('utf-8'))
- values = {
- 'encrypted_blob': encrypted_blob,
- 'key_hash': primary_key_hash
- }
- update = credential_table.update().where(
- credential_table.c.id == credential.id
- ).values(values)
- session.execute(update)
- session.commit()
- session.close()
diff --git a/keystone/common/sql/data_migration_repo/versions/004_reset_password_created_at.py b/keystone/common/sql/data_migration_repo/versions/004_reset_password_created_at.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/data_migration_repo/versions/004_reset_password_created_at.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/005_placeholder.py b/keystone/common/sql/data_migration_repo/versions/005_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/data_migration_repo/versions/005_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/006_placeholder.py b/keystone/common/sql/data_migration_repo/versions/006_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/data_migration_repo/versions/006_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/007_placeholder.py b/keystone/common/sql/data_migration_repo/versions/007_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/data_migration_repo/versions/007_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/008_placeholder.py b/keystone/common/sql/data_migration_repo/versions/008_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/data_migration_repo/versions/008_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/009_placeholder.py b/keystone/common/sql/data_migration_repo/versions/009_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/data_migration_repo/versions/009_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/010_migrate_add_revocation_event_index.py b/keystone/common/sql/data_migration_repo/versions/010_migrate_add_revocation_event_index.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/010_migrate_add_revocation_event_index.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py b/keystone/common/sql/data_migration_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/012_migrate_add_domain_id_to_idp.py b/keystone/common/sql/data_migration_repo/versions/012_migrate_add_domain_id_to_idp.py
deleted file mode 100644
index d8b931aed..000000000
--- a/keystone/common/sql/data_migration_repo/versions/012_migrate_add_domain_id_to_idp.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import uuid
-
-import sqlalchemy as sql
-from sqlalchemy.orm import sessionmaker
-
-from keystone.resource.backends import base
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- maker = sessionmaker(bind=migrate_engine)
- session = maker()
-
- idp_table = sql.Table('identity_provider', meta, autoload=True)
-
- for idp_row in idp_table.select().execute():
- domain_id = _create_federated_domain(meta, session, idp_row['id'])
- # update idp with the new federated domain_id
- values = {'domain_id': domain_id}
- stmt = idp_table.update().where(
- idp_table.c.id == idp_row['id']).values(values)
- stmt.execute()
-
-
-def _create_federated_domain(meta, session, idp_id):
- domain_id = uuid.uuid4().hex
- desc = 'Auto generated federated domain for Identity Provider: ' + idp_id
- federated_domain = {
- 'id': domain_id,
- 'name': domain_id,
- 'enabled': True,
- 'description': desc,
- 'domain_id': base.NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None,
- 'extra': '{}'
- }
- project_table = sql.Table('project', meta, autoload=True)
- new_row = project_table.insert().values(**federated_domain)
- session.execute(new_row)
- session.commit()
- return domain_id
diff --git a/keystone/common/sql/data_migration_repo/versions/013_migrate_protocol_cascade_delete_for_federated_user.py b/keystone/common/sql/data_migration_repo/versions/013_migrate_protocol_cascade_delete_for_federated_user.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/data_migration_repo/versions/013_migrate_protocol_cascade_delete_for_federated_user.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/014_migrate_add_domain_id_to_user_table.py b/keystone/common/sql/data_migration_repo/versions/014_migrate_add_domain_id_to_user_table.py
deleted file mode 100644
index b4437fe59..000000000
--- a/keystone/common/sql/data_migration_repo/versions/014_migrate_add_domain_id_to_user_table.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-import sqlalchemy.sql.expression as expression
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
-
- # update user domain_id from local_user
- local_table = sql.Table('local_user', meta, autoload=True)
- _update_user_domain_id(migrate_engine, user_table, local_table)
-
- # update user domain_id from nonlocal_user
- nonlocal_table = sql.Table('nonlocal_user', meta, autoload=True)
- _update_user_domain_id(migrate_engine, user_table, nonlocal_table)
-
-
-def _update_user_domain_id(migrate_engine, user_table, child_user_table):
- join = sql.join(user_table, child_user_table,
- user_table.c.id == child_user_table.c.user_id)
- where = user_table.c.domain_id == expression.null()
- sel = (
- sql.select([user_table.c.id, child_user_table.c.domain_id])
- .select_from(join).where(where)
- )
- with migrate_engine.begin() as conn:
- for user in conn.execute(sel):
- values = {'domain_id': user['domain_id']}
- stmt = user_table.update().where(
- user_table.c.id == user['id']).values(values)
- conn.execute(stmt)
diff --git a/keystone/common/sql/data_migration_repo/versions/015_migrate_update_federated_user_domain.py b/keystone/common/sql/data_migration_repo/versions/015_migrate_update_federated_user_domain.py
deleted file mode 100644
index 83ac4d36e..000000000
--- a/keystone/common/sql/data_migration_repo/versions/015_migrate_update_federated_user_domain.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-import sqlalchemy.sql.expression as expression
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
- federated_table = sql.Table('federated_user', meta, autoload=True)
- idp_table = sql.Table('identity_provider', meta, autoload=True)
-
- join = sql.join(federated_table, idp_table,
- federated_table.c.idp_id == idp_table.c.id)
- sel = sql.select(
- [federated_table.c.user_id, idp_table.c.domain_id]).select_from(join)
- with migrate_engine.begin() as conn:
- for user in conn.execute(sel):
- values = {'domain_id': user['domain_id']}
- stmt = user_table.update().where(
- sql.and_(
- user_table.c.domain_id == expression.null(),
- user_table.c.id == user['user_id'])).values(values)
- conn.execute(stmt)
diff --git a/keystone/common/sql/data_migration_repo/versions/016_migrate_add_user_options.py b/keystone/common/sql/data_migration_repo/versions/016_migrate_add_user_options.py
deleted file mode 100644
index 9b6593fe6..000000000
--- a/keystone/common/sql/data_migration_repo/versions/016_migrate_add_user_options.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- # NOTE(notmorgan): This is a no-op, no data-migration needed.
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/017_placeholder.py b/keystone/common/sql/data_migration_repo/versions/017_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/data_migration_repo/versions/017_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/018_placeholder.py b/keystone/common/sql/data_migration_repo/versions/018_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/data_migration_repo/versions/018_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/019_placeholder.py b/keystone/common/sql/data_migration_repo/versions/019_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/data_migration_repo/versions/019_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/020_placeholder.py b/keystone/common/sql/data_migration_repo/versions/020_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/data_migration_repo/versions/020_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/021_placeholder.py b/keystone/common/sql/data_migration_repo/versions/021_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/data_migration_repo/versions/021_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/022_migrate_add_default_project_id_index.py b/keystone/common/sql/data_migration_repo/versions/022_migrate_add_default_project_id_index.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/022_migrate_add_default_project_id_index.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/023_migrate_add_second_password_column_for_expanded_hash_sizes.py b/keystone/common/sql/data_migration_repo/versions/023_migrate_add_second_password_column_for_expanded_hash_sizes.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/023_migrate_add_second_password_column_for_expanded_hash_sizes.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/024_migrate_create_created_at_int_columns.py b/keystone/common/sql/data_migration_repo/versions/024_migrate_create_created_at_int_columns.py
deleted file mode 100644
index 5dcd05d82..000000000
--- a/keystone/common/sql/data_migration_repo/versions/024_migrate_create_created_at_int_columns.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- # A migration here is not needed because the actual marshalling of data
- # from the old column to the new column is done in the contract phase. This
- # is because using triggers to convert datetime objects to integers is
- # complex and error-prone. Instead, we'll migrate the data once all
- # keystone nodes are on the Pike code-base. From an operator perspective,
- # this shouldn't affect operability of a rolling upgrade since all nodes
- # must be running Pike before the contract takes place.
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/025_placeholder.py b/keystone/common/sql/data_migration_repo/versions/025_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/data_migration_repo/versions/025_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/026_placeholder.py b/keystone/common/sql/data_migration_repo/versions/026_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/data_migration_repo/versions/026_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/027_placeholder.py b/keystone/common/sql/data_migration_repo/versions/027_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/data_migration_repo/versions/027_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/028_placeholder.py b/keystone/common/sql/data_migration_repo/versions/028_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/data_migration_repo/versions/028_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/029_placeholder.py b/keystone/common/sql/data_migration_repo/versions/029_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/data_migration_repo/versions/029_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/030_migrate_add_project_tags_table.py b/keystone/common/sql/data_migration_repo/versions/030_migrate_add_project_tags_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/030_migrate_add_project_tags_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/031_migrate_system_assignment_table.py b/keystone/common/sql/data_migration_repo/versions/031_migrate_system_assignment_table.py
deleted file mode 100644
index c02f78c4e..000000000
--- a/keystone/common/sql/data_migration_repo/versions/031_migrate_system_assignment_table.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- # NOTE(lbragstad): A migration isn't required here since system assignments
- # are a new feature in Queens.
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/032_migrate_add_expired_at_int_to_trust.py b/keystone/common/sql/data_migration_repo/versions/032_migrate_add_expired_at_int_to_trust.py
deleted file mode 100644
index ce4496ee0..000000000
--- a/keystone/common/sql/data_migration_repo/versions/032_migrate_add_expired_at_int_to_trust.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- # A migration here is not needed because the actual marshalling of data
- # from the old column to the new column is done in the contract phase. This
- # is because using triggers to convert datetime objects to integers is
- # complex and error-prone. Instead, we'll migrate the data once all
- # keystone nodes are on the Queens code-base. From an operator perspective,
- # this shouldn't affect operability of a rolling upgrade since all nodes
- # must be running Queens before the contract takes place.
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/033_migrate_add_limits_tables.py b/keystone/common/sql/data_migration_repo/versions/033_migrate_add_limits_tables.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/033_migrate_add_limits_tables.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/034_migrate_add_application_credentials_table.py b/keystone/common/sql/data_migration_repo/versions/034_migrate_add_application_credentials_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/034_migrate_add_application_credentials_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/035_migrate_add_system_column_to_application_credential_table.py b/keystone/common/sql/data_migration_repo/versions/035_migrate_add_system_column_to_application_credential_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/035_migrate_add_system_column_to_application_credential_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/036_migrate_rename_application_credential_restriction_column.py b/keystone/common/sql/data_migration_repo/versions/036_migrate_rename_application_credential_restriction_column.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/036_migrate_rename_application_credential_restriction_column.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/037_migrate_remove_service_and_region_fk_for_registered_limit.py b/keystone/common/sql/data_migration_repo/versions/037_migrate_remove_service_and_region_fk_for_registered_limit.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/data_migration_repo/versions/037_migrate_remove_service_and_region_fk_for_registered_limit.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/038_placeholder.py b/keystone/common/sql/data_migration_repo/versions/038_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/data_migration_repo/versions/038_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/039_placeholder.py b/keystone/common/sql/data_migration_repo/versions/039_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/data_migration_repo/versions/039_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/040_placeholder.py b/keystone/common/sql/data_migration_repo/versions/040_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/data_migration_repo/versions/040_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/041_placeholder.py b/keystone/common/sql/data_migration_repo/versions/041_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/data_migration_repo/versions/041_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/042_placeholder.py b/keystone/common/sql/data_migration_repo/versions/042_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/data_migration_repo/versions/042_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/043_placeholder.py b/keystone/common/sql/data_migration_repo/versions/043_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/data_migration_repo/versions/043_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/044_placeholder.py b/keystone/common/sql/data_migration_repo/versions/044_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/data_migration_repo/versions/044_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/045_migrate_add_description_to_limit.py b/keystone/common/sql/data_migration_repo/versions/045_migrate_add_description_to_limit.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/data_migration_repo/versions/045_migrate_add_description_to_limit.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/046_migrate_old_password_data_to_password_hash_column.py b/keystone/common/sql/data_migration_repo/versions/046_migrate_old_password_data_to_password_hash_column.py
deleted file mode 100644
index 0a69f32e5..000000000
--- a/keystone/common/sql/data_migration_repo/versions/046_migrate_old_password_data_to_password_hash_column.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-import sqlalchemy.sql.expression as expression
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- password_table = sql.Table('password', meta, autoload=True)
- with migrate_engine.begin() as conn:
- stmt = password_table.update().where(
- password_table.c.password_hash == expression.null()).values(
- {'password_hash': password_table.c.password})
- conn.execute(stmt)
diff --git a/keystone/common/sql/data_migration_repo/versions/047_migrate_update_pk_for_unified_limit.py b/keystone/common/sql/data_migration_repo/versions/047_migrate_update_pk_for_unified_limit.py
deleted file mode 100644
index e76298c15..000000000
--- a/keystone/common/sql/data_migration_repo/versions/047_migrate_update_pk_for_unified_limit.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # For SQLite, migrate the data from old tables to new ones.
- if migrate_engine == 'sqlite':
- registered_limit_table = sql.Table('registered_limit', meta,
- autoload=True)
- registered_limit_table_new = sql.Table('registered_limit_new', meta,
- autoload=True)
-
- limit_table = sql.Table('limit', meta, autoload=True)
- limit_table_new = sql.Table('limit_new', meta, autoload=True)
-
- registered_limit_table_new.insert().from_select(
- ['id', 'service_id', 'region_id', 'resource_name', 'default_limit',
- 'description'],
- registered_limit_table.select()).execute()
-
- limit_table_new.insert().from_select(
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description'],
- limit_table.select()).execute()
diff --git a/keystone/common/sql/data_migration_repo/versions/048_migrate_add_registered_limit_id_column_for_limit.py b/keystone/common/sql/data_migration_repo/versions/048_migrate_add_registered_limit_id_column_for_limit.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/048_migrate_add_registered_limit_id_column_for_limit.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/049_placeholder.py b/keystone/common/sql/data_migration_repo/versions/049_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/data_migration_repo/versions/049_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/050_placeholder.py b/keystone/common/sql/data_migration_repo/versions/050_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/data_migration_repo/versions/050_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/051_placeholder.py b/keystone/common/sql/data_migration_repo/versions/051_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/data_migration_repo/versions/051_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/052_placeholder.py b/keystone/common/sql/data_migration_repo/versions/052_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/data_migration_repo/versions/052_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/053_migrate_add_role_description_to_role_table.py b/keystone/common/sql/data_migration_repo/versions/053_migrate_add_role_description_to_role_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/053_migrate_add_role_description_to_role_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/054_migrate_drop_old_passoword_column.py b/keystone/common/sql/data_migration_repo/versions/054_migrate_drop_old_passoword_column.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/054_migrate_drop_old_passoword_column.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/055_migrate_add_domain_to_limit.py b/keystone/common/sql/data_migration_repo/versions/055_migrate_add_domain_to_limit.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/055_migrate_add_domain_to_limit.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/056_migrate_add_application_credential_access_rules.py b/keystone/common/sql/data_migration_repo/versions/056_migrate_add_application_credential_access_rules.py
deleted file mode 100644
index 8066b50bb..000000000
--- a/keystone/common/sql/data_migration_repo/versions/056_migrate_add_application_credential_access_rules.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2019 SUSE Linux GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/057_placeholder.py b/keystone/common/sql/data_migration_repo/versions/057_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/data_migration_repo/versions/057_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/058_placeholder.py b/keystone/common/sql/data_migration_repo/versions/058_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/data_migration_repo/versions/058_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/059_placeholder.py b/keystone/common/sql/data_migration_repo/versions/059_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/data_migration_repo/versions/059_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/060_placeholder.py b/keystone/common/sql/data_migration_repo/versions/060_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/data_migration_repo/versions/060_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/061_placeholder.py b/keystone/common/sql/data_migration_repo/versions/061_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/data_migration_repo/versions/061_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/062_migrate_extract_redelegation_data_from_extras.py b/keystone/common/sql/data_migration_repo/versions/062_migrate_extract_redelegation_data_from_extras.py
deleted file mode 100644
index ddb30368d..000000000
--- a/keystone/common/sql/data_migration_repo/versions/062_migrate_extract_redelegation_data_from_extras.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- trust_table = sql.Table('trust', meta, autoload=True)
- trust_list = list(trust_table.select().execute())
-
- # Loop through all the trusts and move the redelegeated trust id out of
- # extras.
- for trust in trust_list:
- if trust.extra is not None:
- extra_dict = jsonutils.loads(trust.extra)
- else:
- extra_dict = {}
-
- new_values = {}
-
- new_values['redelegated_trust_id'] = extra_dict.pop(
- 'redelegated_trust_id', None)
- new_values['redelegation_count'] = extra_dict.pop(
- 'redelegation_count', None)
-
- new_values['extra'] = jsonutils.dumps(extra_dict)
-
- clause = trust_table.c.id == trust.id
- update = trust_table.update().where(clause).values(new_values)
- migrate_engine.execute(update)
diff --git a/keystone/common/sql/data_migration_repo/versions/063_migrate_drop_limit_columns.py b/keystone/common/sql/data_migration_repo/versions/063_migrate_drop_limit_columns.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/063_migrate_drop_limit_columns.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/064_migrate_add_remote_id_attribute_to_federation_protocol_table.py b/keystone/common/sql/data_migration_repo/versions/064_migrate_add_remote_id_attribute_to_federation_protocol_table.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/064_migrate_add_remote_id_attribute_to_federation_protocol_table.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/065_migrate_add_user_external_id_to_access_rule.py b/keystone/common/sql/data_migration_repo/versions/065_migrate_add_user_external_id_to_access_rule.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/065_migrate_add_user_external_id_to_access_rule.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/066_migrate_add_resource_options_table.py b/keystone/common/sql/data_migration_repo/versions/066_migrate_add_resource_options_table.py
deleted file mode 100644
index b1e5fdddf..000000000
--- a/keystone/common/sql/data_migration_repo/versions/066_migrate_add_resource_options_table.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# NOTE(morgan): there is nothing to do here, data migration for user
-# resource options will occur in a future change.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/067_placeholder.py b/keystone/common/sql/data_migration_repo/versions/067_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/data_migration_repo/versions/067_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/068_placeholder.py b/keystone/common/sql/data_migration_repo/versions/068_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/data_migration_repo/versions/068_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/069_placeholder.py b/keystone/common/sql/data_migration_repo/versions/069_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/data_migration_repo/versions/069_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/070_placeholder.py b/keystone/common/sql/data_migration_repo/versions/070_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/data_migration_repo/versions/070_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/071_placeholder.py b/keystone/common/sql/data_migration_repo/versions/071_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/data_migration_repo/versions/071_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/072_migrate_drop_domain_id_fk.py b/keystone/common/sql/data_migration_repo/versions/072_migrate_drop_domain_id_fk.py
deleted file mode 100644
index bb90c3de3..000000000
--- a/keystone/common/sql/data_migration_repo/versions/072_migrate_drop_domain_id_fk.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2019 SUSE LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/data_migration_repo/versions/073_migrate_expiring_group_membership.py b/keystone/common/sql/data_migration_repo/versions/073_migrate_expiring_group_membership.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/data_migration_repo/versions/073_migrate_expiring_group_membership.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/README b/keystone/common/sql/expand_repo/README
deleted file mode 100644
index 131117104..000000000
--- a/keystone/common/sql/expand_repo/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-https://opendev.org/openstack/sqlalchemy-migrate
diff --git a/keystone/common/sql/expand_repo/versions/002_password_created_at_not_nullable.py b/keystone/common/sql/expand_repo/versions/002_password_created_at_not_nullable.py
deleted file mode 100644
index 1cd34e617..000000000
--- a/keystone/common/sql/expand_repo/versions/002_password_created_at_not_nullable.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# A null initial migration to open this repo. Do not re-use replace this with
-# a real migration, add additional ones in subsequent version scripts.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/003_add_key_hash_and_encrypted_blob_to_credential.py b/keystone/common/sql/expand_repo/versions/003_add_key_hash_and_encrypted_blob_to_credential.py
deleted file mode 100644
index 3e9f25b03..000000000
--- a/keystone/common/sql/expand_repo/versions/003_add_key_hash_and_encrypted_blob_to_credential.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-from keystone.common.sql import upgrades
-
-
-# NOTE(lbragstad): MySQL error state of 45000 is a generic unhandled exception.
-# Keystone will return a 500 in this case.
-MYSQL_INSERT_TRIGGER = """
-CREATE TRIGGER credential_insert_read_only BEFORE INSERT ON credential
-FOR EACH ROW
-BEGIN
- SIGNAL SQLSTATE '45000'
- SET MESSAGE_TEXT = '%s';
-END;
-"""
-
-MYSQL_UPDATE_TRIGGER = """
-CREATE TRIGGER credential_update_read_only BEFORE UPDATE ON credential
-FOR EACH ROW
-BEGIN
- IF NEW.encrypted_blob IS NULL THEN
- SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
- END IF;
- IF NEW.encrypted_blob IS NOT NULL AND OLD.blob IS NULL THEN
- SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
- END IF;
-END;
-"""
-
-SQLITE_INSERT_TRIGGER = """
-CREATE TRIGGER credential_insert_read_only BEFORE INSERT ON credential
-BEGIN
- SELECT RAISE (ABORT, '%s');
-END;
-"""
-
-SQLITE_UPDATE_TRIGGER = """
-CREATE TRIGGER credential_update_read_only BEFORE UPDATE ON credential
-WHEN NEW.encrypted_blob IS NULL
-BEGIN
- SELECT RAISE (ABORT, '%s');
-END;
-"""
-
-POSTGRESQL_INSERT_TRIGGER = """
-CREATE OR REPLACE FUNCTION keystone_read_only_insert()
- RETURNS trigger AS
-$BODY$
-BEGIN
- RAISE EXCEPTION '%s';
-END
-$BODY$ LANGUAGE plpgsql;
-
-CREATE TRIGGER credential_insert_read_only BEFORE INSERT ON credential
-FOR EACH ROW
-EXECUTE PROCEDURE keystone_read_only_insert();
-"""
-
-POSTGRESQL_UPDATE_TRIGGER = """
-CREATE OR REPLACE FUNCTION keystone_read_only_update()
- RETURNS trigger AS
-$BODY$
-BEGIN
- IF NEW.encrypted_blob IS NULL THEN
- RAISE EXCEPTION '%s';
- END IF;
- IF NEW.encrypted_blob IS NOT NULL AND OLD.blob IS NULL THEN
- RAISE EXCEPTION '%s';
- END IF;
- RETURN NEW;
-END
-$BODY$ LANGUAGE plpgsql;
-
-CREATE TRIGGER credential_update_read_only BEFORE UPDATE ON credential
-FOR EACH ROW
-EXECUTE PROCEDURE keystone_read_only_update();
-"""
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- key_hash = sql.Column('key_hash', sql.String(64), nullable=True)
- encrypted_blob = sql.Column(
- 'encrypted_blob',
- ks_sql.Text,
- nullable=True
- )
- credential_table = sql.Table('credential', meta, autoload=True)
- credential_table.create_column(key_hash)
- credential_table.create_column(encrypted_blob)
- credential_table.c.blob.alter(nullable=True)
-
- if not upgrades.USE_TRIGGERS:
- # Skip managing triggers if we're doing an offline upgrade.
- return
-
- error_message = ('Credential migration in progress. Cannot perform '
- 'writes to credential table.')
- if migrate_engine.name == 'postgresql':
- credential_insert_trigger = POSTGRESQL_INSERT_TRIGGER % error_message
- credential_update_trigger = POSTGRESQL_UPDATE_TRIGGER % (
- error_message, error_message
- )
- elif migrate_engine.name == 'sqlite':
- credential_insert_trigger = SQLITE_INSERT_TRIGGER % error_message
- credential_update_trigger = SQLITE_UPDATE_TRIGGER % error_message
- else:
- credential_insert_trigger = MYSQL_INSERT_TRIGGER % error_message
- credential_update_trigger = MYSQL_UPDATE_TRIGGER % (
- error_message, error_message
- )
-
- migrate_engine.execute(credential_insert_trigger)
- migrate_engine.execute(credential_update_trigger)
diff --git a/keystone/common/sql/expand_repo/versions/004_reset_password_created_at.py b/keystone/common/sql/expand_repo/versions/004_reset_password_created_at.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/expand_repo/versions/004_reset_password_created_at.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/005_placeholder.py b/keystone/common/sql/expand_repo/versions/005_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/expand_repo/versions/005_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/006_placeholder.py b/keystone/common/sql/expand_repo/versions/006_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/expand_repo/versions/006_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/007_placeholder.py b/keystone/common/sql/expand_repo/versions/007_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/expand_repo/versions/007_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/008_placeholder.py b/keystone/common/sql/expand_repo/versions/008_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/expand_repo/versions/008_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/009_placeholder.py b/keystone/common/sql/expand_repo/versions/009_placeholder.py
deleted file mode 100644
index b259427e4..000000000
--- a/keystone/common/sql/expand_repo/versions/009_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Newton backports. Do not use this number for new
-# Ocata work. New Ocata work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/010_expand_add_revocation_event_index.py b/keystone/common/sql/expand_repo/versions/010_expand_add_revocation_event_index.py
deleted file mode 100644
index 1eee406f8..000000000
--- a/keystone/common/sql/expand_repo/versions/010_expand_add_revocation_event_index.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- revocation_event = sql.Table('revocation_event', meta, autoload=True)
- sql.Index('ix_revocation_event_issued_before',
- revocation_event.c.issued_before).create()
- sql.Index('ix_revocation_event_project_id_issued_before',
- revocation_event.c.project_id,
- revocation_event.c.issued_before).create()
- sql.Index('ix_revocation_event_user_id_issued_before',
- revocation_event.c.user_id,
- revocation_event.c.issued_before).create()
- sql.Index('ix_revocation_event_audit_id_issued_before',
- revocation_event.c.audit_id,
- revocation_event.c.issued_before).create()
diff --git a/keystone/common/sql/expand_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py b/keystone/common/sql/expand_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/expand_repo/versions/011_expand_user_id_unique_for_nonlocal_user.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/012_expand_add_domain_id_to_idp.py b/keystone/common/sql/expand_repo/versions/012_expand_add_domain_id_to_idp.py
deleted file mode 100644
index ef4522f59..000000000
--- a/keystone/common/sql/expand_repo/versions/012_expand_add_domain_id_to_idp.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-MYSQL_INSERT_TRIGGER = """
-CREATE TRIGGER idp_insert_read_only BEFORE INSERT ON identity_provider
-FOR EACH ROW
-BEGIN
- SIGNAL SQLSTATE '45000'
- SET MESSAGE_TEXT = '%s';
-END;
-"""
-
-SQLITE_INSERT_TRIGGER = """
-CREATE TRIGGER idp_insert_read_only BEFORE INSERT ON identity_provider
-BEGIN
- SELECT RAISE (ABORT, '%s');
-END;
-"""
-
-POSTGRESQL_INSERT_TRIGGER = """
-CREATE OR REPLACE FUNCTION keystone_read_only_insert()
- RETURNS trigger AS
-$BODY$
-BEGIN
- RAISE EXCEPTION '%s';
-END
-$BODY$ LANGUAGE plpgsql;
-
-CREATE TRIGGER idp_insert_read_only BEFORE INSERT ON identity_provider
-FOR EACH ROW
-EXECUTE PROCEDURE keystone_read_only_insert();
-"""
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- idp = sql.Table('identity_provider', meta, autoload=True)
- project = sql.Table('project', meta, autoload=True)
- domain_id = sql.Column('domain_id', sql.String(64),
- sql.ForeignKey(project.c.id), nullable=True)
- idp.create_column(domain_id)
-
- if upgrades.USE_TRIGGERS:
- # Setting idp to be read-only to prevent old code from creating an idp
- # without a domain_id during an upgrade. This should be okay as it is
- # highly unlikely that an idp would be created during the migration and
- # the impact from preventing creations is minor.
- error_message = ('Identity provider migration in progress. Cannot '
- 'insert new rows into the identity_provider table at '
- 'this time.')
- if migrate_engine.name == 'postgresql':
- idp_insert_trigger = POSTGRESQL_INSERT_TRIGGER % error_message
- elif migrate_engine.name == 'sqlite':
- idp_insert_trigger = SQLITE_INSERT_TRIGGER % error_message
- else:
- idp_insert_trigger = MYSQL_INSERT_TRIGGER % error_message
- migrate_engine.execute(idp_insert_trigger)
diff --git a/keystone/common/sql/expand_repo/versions/013_expand_protocol_cascade_delete_for_federated_user.py b/keystone/common/sql/expand_repo/versions/013_expand_protocol_cascade_delete_for_federated_user.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/expand_repo/versions/013_expand_protocol_cascade_delete_for_federated_user.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/014_expand_add_domain_id_to_user_table.py b/keystone/common/sql/expand_repo/versions/014_expand_add_domain_id_to_user_table.py
deleted file mode 100644
index 27ae96487..000000000
--- a/keystone/common/sql/expand_repo/versions/014_expand_add_domain_id_to_user_table.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-# define the local_user triggers for insert and update
-MYSQL_LOCAL_USER_INSERT_TRIGGER = """
-CREATE TRIGGER local_user_after_insert_trigger
-AFTER INSERT
- ON local_user FOR EACH ROW
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id IS NULL;
-END;
-"""
-
-MYSQL_LOCAL_USER_UPDATE_TRIGGER = """
-CREATE TRIGGER local_user_after_update_trigger
-AFTER UPDATE
- ON local_user FOR EACH ROW
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id <> NEW.domain_id;
-END;
-"""
-
-SQLITE_LOCAL_USER_INSERT_TRIGGER = """
-CREATE TRIGGER local_user_after_insert_trigger
-AFTER INSERT
- ON local_user
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id IS NULL;
-END;
-"""
-
-SQLITE_LOCAL_USER_UPDATE_TRIGGER = """
-CREATE TRIGGER local_user_after_update_trigger
-AFTER UPDATE
- ON local_user
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id <> NEW.domain_id;
-END;
-"""
-
-POSTGRESQL_LOCAL_USER_INSERT_TRIGGER = """
-CREATE OR REPLACE FUNCTION update_user_domain_id()
- RETURNS trigger AS
-$BODY$
-BEGIN
- UPDATE "user" SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id;
- RETURN NULL;
-END
-$BODY$ LANGUAGE plpgsql;
-
-CREATE TRIGGER local_user_after_insert_trigger AFTER INSERT ON local_user
-FOR EACH ROW
-EXECUTE PROCEDURE update_user_domain_id();
-"""
-
-POSTGRESQL_LOCAL_USER_UPDATE_TRIGGER = """
-CREATE TRIGGER local_user_after_update_trigger AFTER UPDATE ON local_user
-FOR EACH ROW
-EXECUTE PROCEDURE update_user_domain_id();
-"""
-
-MYSQL_NONLOCAL_USER_INSERT_TRIGGER = """
-CREATE TRIGGER nonlocal_user_after_insert_trigger
-AFTER INSERT
- ON nonlocal_user FOR EACH ROW
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id IS NULL;
-END;
-"""
-
-# define the nonlocal_user triggers for insert and update
-MYSQL_NONLOCAL_USER_UPDATE_TRIGGER = """
-CREATE TRIGGER nonlocal_user_after_update_trigger
-AFTER UPDATE
- ON nonlocal_user FOR EACH ROW
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id <> NEW.domain_id;
-END;
-"""
-
-SQLITE_NONLOCAL_USER_INSERT_TRIGGER = """
-CREATE TRIGGER nonlocal_user_after_insert_trigger
-AFTER INSERT
- ON nonlocal_user
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id IS NULL;
-END;
-"""
-
-SQLITE_NONLOCAL_USER_UPDATE_TRIGGER = """
-CREATE TRIGGER nonlocal_user_after_update_trigger
-AFTER UPDATE
- ON nonlocal_user
-BEGIN
- UPDATE user SET domain_id = NEW.domain_id
- WHERE id = NEW.user_id and domain_id <> NEW.domain_id;
-END;
-"""
-
-POSTGRESQL_NONLOCAL_USER_INSERT_TRIGGER = """
-CREATE TRIGGER nonlocal_user_after_insert_trigger AFTER INSERT ON nonlocal_user
-FOR EACH ROW
-EXECUTE PROCEDURE update_user_domain_id();
-"""
-
-POSTGRESQL_NONLOCAL_USER_UPDATE_TRIGGER = """
-CREATE TRIGGER nonlocal_user_after_update_trigger AFTER UPDATE ON nonlocal_user
-FOR EACH ROW
-EXECUTE PROCEDURE update_user_domain_id();
-"""
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user = sql.Table('user', meta, autoload=True)
- project = sql.Table('project', meta, autoload=True)
- domain_id = sql.Column('domain_id', sql.String(64),
- sql.ForeignKey(project.c.id), nullable=True)
- user.create_column(domain_id)
-
- if upgrades.USE_TRIGGERS:
- if migrate_engine.name == 'postgresql':
- local_user_insert_trigger = POSTGRESQL_LOCAL_USER_INSERT_TRIGGER
- local_user_update_trigger = POSTGRESQL_LOCAL_USER_UPDATE_TRIGGER
- nonlocal_user_insert_trigger = (
- POSTGRESQL_NONLOCAL_USER_INSERT_TRIGGER)
- nonlocal_user_update_trigger = (
- POSTGRESQL_NONLOCAL_USER_UPDATE_TRIGGER)
- elif migrate_engine.name == 'sqlite':
- local_user_insert_trigger = SQLITE_LOCAL_USER_INSERT_TRIGGER
- local_user_update_trigger = SQLITE_LOCAL_USER_UPDATE_TRIGGER
- nonlocal_user_insert_trigger = SQLITE_NONLOCAL_USER_INSERT_TRIGGER
- nonlocal_user_update_trigger = SQLITE_NONLOCAL_USER_UPDATE_TRIGGER
- else:
- local_user_insert_trigger = MYSQL_LOCAL_USER_INSERT_TRIGGER
- local_user_update_trigger = MYSQL_LOCAL_USER_UPDATE_TRIGGER
- nonlocal_user_insert_trigger = MYSQL_NONLOCAL_USER_INSERT_TRIGGER
- nonlocal_user_update_trigger = MYSQL_NONLOCAL_USER_UPDATE_TRIGGER
- migrate_engine.execute(local_user_insert_trigger)
- migrate_engine.execute(local_user_update_trigger)
- migrate_engine.execute(nonlocal_user_insert_trigger)
- migrate_engine.execute(nonlocal_user_update_trigger)
diff --git a/keystone/common/sql/expand_repo/versions/015_expand_update_federated_user_domain.py b/keystone/common/sql/expand_repo/versions/015_expand_update_federated_user_domain.py
deleted file mode 100644
index 5a078aef4..000000000
--- a/keystone/common/sql/expand_repo/versions/015_expand_update_federated_user_domain.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-MYSQL_INSERT_TRIGGER = """
-CREATE TRIGGER federated_user_insert_trigger
-AFTER INSERT
- ON federated_user FOR EACH ROW
-BEGIN
- UPDATE user SET domain_id = (
- SELECT domain_id FROM identity_provider WHERE id = NEW.idp_id)
- WHERE id = NEW.user_id and domain_id IS NULL;
-END;
-"""
-
-SQLITE_INSERT_TRIGGER = """
-CREATE TRIGGER federated_user_insert_trigger
-AFTER INSERT
- ON federated_user
-BEGIN
- UPDATE user SET domain_id = (
- SELECT domain_id FROM identity_provider WHERE id = NEW.idp_id)
- WHERE id = NEW.user_id and domain_id IS NULL;
-END;
-"""
-
-POSTGRESQL_INSERT_TRIGGER = """
-CREATE OR REPLACE FUNCTION update_federated_user_domain_id()
- RETURNS trigger AS
-$BODY$
-BEGIN
- UPDATE "user" SET domain_id = (
- SELECT domain_id FROM identity_provider WHERE id = NEW.idp_id)
- WHERE id = NEW.user_id and domain_id IS NULL;
- RETURN NULL;
-END
-$BODY$ LANGUAGE plpgsql;
-
-CREATE TRIGGER federated_user_insert_trigger AFTER INSERT ON federated_user
-FOR EACH ROW
-EXECUTE PROCEDURE update_federated_user_domain_id();
-"""
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- if upgrades.USE_TRIGGERS:
- if migrate_engine.name == 'postgresql':
- insert_trigger = POSTGRESQL_INSERT_TRIGGER
- elif migrate_engine.name == 'sqlite':
- insert_trigger = SQLITE_INSERT_TRIGGER
- else:
- insert_trigger = MYSQL_INSERT_TRIGGER
- migrate_engine.execute(insert_trigger)
diff --git a/keystone/common/sql/expand_repo/versions/016_expand_add_user_options.py b/keystone/common/sql/expand_repo/versions/016_expand_add_user_options.py
deleted file mode 100644
index eec3378c7..000000000
--- a/keystone/common/sql/expand_repo/versions/016_expand_add_user_options.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- user_table = sql.Table('user', meta, autoload=True)
-
- user_option = sql.Table(
- 'user_option',
- meta,
- sql.Column('user_id', sql.String(64), sql.ForeignKey(user_table.c.id,
- ondelete='CASCADE'), nullable=False, primary_key=True),
- sql.Column('option_id', sql.String(4), nullable=False,
- primary_key=True),
- sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- user_option.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/expand_repo/versions/017_placeholder.py b/keystone/common/sql/expand_repo/versions/017_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/expand_repo/versions/017_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/018_placeholder.py b/keystone/common/sql/expand_repo/versions/018_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/expand_repo/versions/018_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/019_placeholder.py b/keystone/common/sql/expand_repo/versions/019_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/expand_repo/versions/019_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/020_placeholder.py b/keystone/common/sql/expand_repo/versions/020_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/expand_repo/versions/020_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/021_placeholder.py b/keystone/common/sql/expand_repo/versions/021_placeholder.py
deleted file mode 100644
index cd0769c5e..000000000
--- a/keystone/common/sql/expand_repo/versions/021_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Ocata backports. Do not use this number for new
-# Pike work. New Pike work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/022_expand_add_default_project_id_index.py b/keystone/common/sql/expand_repo/versions/022_expand_add_default_project_id_index.py
deleted file mode 100644
index 37413d0f9..000000000
--- a/keystone/common/sql/expand_repo/versions/022_expand_add_default_project_id_index.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user = sql.Table('user', meta, autoload=True)
- sql.Index('ix_default_project_id', user.c.default_project_id).create()
diff --git a/keystone/common/sql/expand_repo/versions/023_expand_add_second_password_column_for_expanded_hash_sizes.py b/keystone/common/sql/expand_repo/versions/023_expand_add_second_password_column_for_expanded_hash_sizes.py
deleted file mode 100644
index ebd2b8bbf..000000000
--- a/keystone/common/sql/expand_repo/versions/023_expand_add_second_password_column_for_expanded_hash_sizes.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- # NOTE(notmorgan): To support the full range of scrypt and pbkfd password
- # hash lengths, this should be closer to varchar(1500) instead of
- # varchar(255).
- password_hash = sql.Column('password_hash', sql.String(255), nullable=True)
- password_table = sql.Table('password', meta, autoload=True)
- password_table.create_column(password_hash)
diff --git a/keystone/common/sql/expand_repo/versions/024_expand_create_created_at_int_columns.py b/keystone/common/sql/expand_repo/versions/024_expand_create_created_at_int_columns.py
deleted file mode 100644
index d836a861f..000000000
--- a/keystone/common/sql/expand_repo/versions/024_expand_create_created_at_int_columns.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- # NOTE(morgan): column is nullable here for migration purposes
- # it is set to not-nullable in the contract phase to ensure we can handle
- # rolling upgrades in a sane way. This differs from the model in
- # keystone.identity.backends.sql_model by design.
- created_at = sql.Column('created_at_int', ks_sql.DateTimeInt(),
- nullable=True)
- expires_at = sql.Column('expires_at_int', ks_sql.DateTimeInt(),
- nullable=True)
- password_table = sql.Table('password', meta, autoload=True)
- password_table.create_column(created_at)
- password_table.create_column(expires_at)
diff --git a/keystone/common/sql/expand_repo/versions/025_placeholder.py b/keystone/common/sql/expand_repo/versions/025_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/expand_repo/versions/025_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/026_placeholder.py b/keystone/common/sql/expand_repo/versions/026_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/expand_repo/versions/026_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/027_placeholder.py b/keystone/common/sql/expand_repo/versions/027_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/expand_repo/versions/027_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/028_placeholder.py b/keystone/common/sql/expand_repo/versions/028_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/expand_repo/versions/028_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/029_placeholder.py b/keystone/common/sql/expand_repo/versions/029_placeholder.py
deleted file mode 100644
index a96cd6f36..000000000
--- a/keystone/common/sql/expand_repo/versions/029_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Pike backports. Do not use this number for new
-# Queens work. New Queens work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/030_expand_add_project_tags_table.py b/keystone/common/sql/expand_repo/versions/030_expand_add_project_tags_table.py
deleted file mode 100644
index 71ff49d43..000000000
--- a/keystone/common/sql/expand_repo/versions/030_expand_add_project_tags_table.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- project_table = sql.Table('project', meta, autoload=True)
-
- # NOTE(lamt) To allow tag name to be case sensitive for MySQL, the 'name'
- # column needs to use collation, which is incompatible with Postgresql.
- # Using unicode to mirror nova's server tag:
- # https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/models.py
- project_tags_table = sql.Table(
- 'project_tag',
- meta,
- sql.Column('project_id',
- sql.String(64),
- sql.ForeignKey(project_table.c.id, ondelete='CASCADE'),
- nullable=False,
- primary_key=True),
- sql.Column('name',
- sql.Unicode(255),
- nullable=False,
- primary_key=True),
- sql.UniqueConstraint('project_id', 'name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- project_tags_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/expand_repo/versions/031_expand_system_assignment_table.py b/keystone/common/sql/expand_repo/versions/031_expand_system_assignment_table.py
deleted file mode 100644
index 45af9863b..000000000
--- a/keystone/common/sql/expand_repo/versions/031_expand_system_assignment_table.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
- system_assignment = sql.Table(
- 'system_assignment',
- meta,
- sql.Column('type', sql.String(64), nullable=False),
- sql.Column('actor_id', sql.String(64), nullable=False),
- sql.Column('target_id', sql.String(64), nullable=False),
- sql.Column('role_id', sql.String(64), nullable=False),
- sql.Column('inherited', sql.Boolean, default=False, nullable=False),
- sql.PrimaryKeyConstraint(
- 'type', 'actor_id', 'target_id', 'role_id', 'inherited'
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
- system_assignment.create()
diff --git a/keystone/common/sql/expand_repo/versions/032_expand_add_expired_at_int_to_trust.py b/keystone/common/sql/expand_repo/versions/032_expand_add_expired_at_int_to_trust.py
deleted file mode 100644
index fd5d6ad65..000000000
--- a/keystone/common/sql/expand_repo/versions/032_expand_add_expired_at_int_to_trust.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from migrate import UniqueConstraint
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- # NOTE(morgan): column is nullable here for migration purposes
- # it is set to not-nullable in the contract phase to ensure we can handle
- # rolling upgrades in a sane way. This differs from the model in
- # keystone.identity.backends.sql_model by design.
- expires_at = sql.Column('expires_at_int', ks_sql.DateTimeInt())
- trust_table = sql.Table('trust', meta, autoload=True)
- trust_table.create_column(expires_at)
-
- UniqueConstraint('trustor_user_id', 'trustee_user_id', 'project_id',
- 'impersonation', 'expires_at', 'expires_at_int',
- table=trust_table,
- name='duplicate_trust_constraint_expanded').create()
diff --git a/keystone/common/sql/expand_repo/versions/033_expand_add_limits_tables.py b/keystone/common/sql/expand_repo/versions/033_expand_add_limits_tables.py
deleted file mode 100644
index cd6149c14..000000000
--- a/keystone/common/sql/expand_repo/versions/033_expand_add_limits_tables.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2018 SUSE Linux Gmbh
-# Copyright 2018 Huawei
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- service_table = sql.Table('service', meta, autoload=True)
- region_table = sql.Table('region', meta, autoload=True)
- project_table = sql.Table('project', meta, autoload=True)
-
- registered_limit_table = sql.Table(
- 'registered_limit',
- meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('service_id',
- sql.String(255),
- sql.ForeignKey(service_table.c.id)),
- sql.Column('region_id',
- sql.String(64),
- sql.ForeignKey(region_table.c.id), nullable=True),
- sql.Column('resource_name', sql.String(255)),
- sql.Column('default_limit', sql.Integer, nullable=False),
- sql.UniqueConstraint('service_id', 'region_id', 'resource_name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- registered_limit_table.create(migrate_engine, checkfirst=True)
-
- limit_table = sql.Table(
- 'limit',
- meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('project_id',
- sql.String(64),
- sql.ForeignKey(project_table.c.id)),
- sql.Column('service_id', sql.String(255)),
- sql.Column('region_id', sql.String(64), nullable=True),
- sql.Column('resource_name', sql.String(255)),
- sql.Column('resource_limit', sql.Integer, nullable=False),
- sql.UniqueConstraint('project_id', 'service_id', 'region_id',
- 'resource_name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- limit_table.create(migrate_engine, checkfirst=True)
-
- migrate.ForeignKeyConstraint(
- columns=[limit_table.c.service_id,
- limit_table.c.region_id,
- limit_table.c.resource_name],
- refcolumns=[registered_limit_table.c.service_id,
- registered_limit_table.c.region_id,
- registered_limit_table.c.resource_name]).create()
diff --git a/keystone/common/sql/expand_repo/versions/034_expand_add_application_credential_table.py b/keystone/common/sql/expand_repo/versions/034_expand_add_application_credential_table.py
deleted file mode 100644
index 3ddb812bb..000000000
--- a/keystone/common/sql/expand_repo/versions/034_expand_add_application_credential_table.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-
-
-def upgrade(migrate_engine):
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- application_credential = sql.Table(
- 'application_credential', meta,
- sql.Column('internal_id', sql.Integer, primary_key=True,
- nullable=False),
- sql.Column('id', sql.String(length=64), nullable=False),
- sql.Column('name', sql.String(length=255), nullable=False),
- sql.Column('secret_hash', sql.String(length=255), nullable=False),
- sql.Column('description', sql.Text),
- sql.Column('user_id', sql.String(length=64), nullable=False),
- sql.Column('project_id', sql.String(64), nullable=False),
- sql.Column('expires_at', ks_sql.DateTimeInt()),
- sql.Column('allow_application_credential_creation', sql.Boolean),
- sql.UniqueConstraint('user_id', 'name',
- name='duplicate_app_cred_constraint'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- application_credential_role = sql.Table(
- 'application_credential_role', meta,
- sql.Column('application_credential_id', sql.Integer,
- sql.ForeignKey(application_credential.c.internal_id,
- ondelete='CASCADE'),
- primary_key=True, nullable=False),
- sql.Column('role_id', sql.String(length=64), primary_key=True,
- nullable=False),
- mysql_engine='InnoDB', mysql_charset='utf8')
-
- application_credential.create(migrate_engine, checkfirst=True)
- application_credential_role.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/expand_repo/versions/036_expand_rename_application_credential_restriction_column.py b/keystone/common/sql/expand_repo/versions/036_expand_rename_application_credential_restriction_column.py
deleted file mode 100644
index 5d5b3ef06..000000000
--- a/keystone/common/sql/expand_repo/versions/036_expand_rename_application_credential_restriction_column.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- table = sql.Table(
- 'application_credential', meta, autoload=True
- )
- # MySQL and PostgreSQL can handle a column rename.
- # Only Sqlite is special. Since Sqlite can't support an online upgrade
- # anyway, just brute-force the migration by copying the table.
- if migrate_engine.name == 'sqlite':
- old_table = table
-
- args = []
- for column in old_table.columns:
- if column.name != 'allow_application_credential_creation':
- args.append(column.copy())
- unrestricted = sql.Column('unrestricted', sql.Boolean)
- args.append(unrestricted)
- constraint = sql.UniqueConstraint('user_id', 'name',
- name='duplicate_app_cred_constraint')
- args.append(constraint)
- new_table = sql.Table('application_credential_temp',
- old_table.metadata, *args)
- new_table.create(migrate_engine, checkfirst=True)
- else:
- unrestricted = sql.Column('unrestricted', sql.Boolean())
- table.create_column(unrestricted)
diff --git a/keystone/common/sql/expand_repo/versions/037_expand_remove_service_and_region_fk_for_registered_limit.py b/keystone/common/sql/expand_repo/versions/037_expand_remove_service_and_region_fk_for_registered_limit.py
deleted file mode 100644
index 9cb40b454..000000000
--- a/keystone/common/sql/expand_repo/versions/037_expand_remove_service_and_region_fk_for_registered_limit.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/038_placeholder.py b/keystone/common/sql/expand_repo/versions/038_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/expand_repo/versions/038_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/039_placeholder.py b/keystone/common/sql/expand_repo/versions/039_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/expand_repo/versions/039_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/040_placeholder.py b/keystone/common/sql/expand_repo/versions/040_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/expand_repo/versions/040_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/041_placeholder.py b/keystone/common/sql/expand_repo/versions/041_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/expand_repo/versions/041_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/042_placeholder.py b/keystone/common/sql/expand_repo/versions/042_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/expand_repo/versions/042_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/043_placeholder.py b/keystone/common/sql/expand_repo/versions/043_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/expand_repo/versions/043_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/044_placeholder.py b/keystone/common/sql/expand_repo/versions/044_placeholder.py
deleted file mode 100644
index 71faccf92..000000000
--- a/keystone/common/sql/expand_repo/versions/044_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Queens backports. Do not use this number for new
-# Rocky work. New Rocky work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/045_expand_add_description_to_limit.py b/keystone/common/sql/expand_repo/versions/045_expand_add_description_to_limit.py
deleted file mode 100644
index 76ea72d54..000000000
--- a/keystone/common/sql/expand_repo/versions/045_expand_add_description_to_limit.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- registered_limit_table = sql.Table(
- 'registered_limit', meta, autoload=True
- )
- description = sql.Column('description', sql.Text)
- registered_limit_table.create_column(description)
-
- limit_table = sql.Table('limit', meta, autoload=True)
- description = sql.Column('description', sql.Text)
- limit_table.create_column(description)
diff --git a/keystone/common/sql/expand_repo/versions/046_expand_old_password_data_to_password_hash_column.py b/keystone/common/sql/expand_repo/versions/046_expand_old_password_data_to_password_hash_column.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/expand_repo/versions/046_expand_old_password_data_to_password_hash_column.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/047_expand_update_pk_for_unified_limit.py b/keystone/common/sql/expand_repo/versions/047_expand_update_pk_for_unified_limit.py
deleted file mode 100644
index 13ed1b635..000000000
--- a/keystone/common/sql/expand_repo/versions/047_expand_update_pk_for_unified_limit.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-MYSQL_CREATE_ID_PRIMARY_KEY_COLUMN = """
-ALTER TABLE `%s` ADD `internal_id` INT NOT NULL AUTO_INCREMENT PRIMARY KEY;
-"""
-
-POSTGRESQL_CREATE_ID_PRIMARY_KEY_COLUMN = """
-ALTER TABLE "%s" ADD COLUMN "internal_id" SERIAL PRIMARY KEY;
-"""
-
-
-def upgrade(migrate_engine):
-
- # For both registered_limit and limit tables in MySQL and PostgreSQL:
- #
- # 1. drop the primary key on `id` column.
- # 2. create a auto increment `internal_id` column with primary key.
- # 3. add unique constraint on `id` column.
- #
- # But SQLite doesn't support add primary key to a existed table, so for
- # SQLite, we'll follow the steps, take the registered_limit as an example:
- #
- # 1. Add a new table `registered_limit_new` which contains `internal_id`
- # column.
- # 2. migrate the data from `registered_limit` to `registered_limit_new`
- # 3. drop the `registered_limit`, rename `registered_limit_new` to
- # `registered_limit`.
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
- registered_limit_table = sql.Table('registered_limit', meta, autoload=True)
- limit_table = sql.Table('limit', meta, autoload=True)
-
- if migrate_engine.name != 'sqlite':
- pk = migrate.PrimaryKeyConstraint('id', table=registered_limit_table)
- pk.drop()
- if migrate_engine.name == 'mysql':
- migrate_engine.execute(
- MYSQL_CREATE_ID_PRIMARY_KEY_COLUMN % 'registered_limit')
- else:
- migrate_engine.execute(
- POSTGRESQL_CREATE_ID_PRIMARY_KEY_COLUMN % 'registered_limit')
- unique_constraint = migrate.UniqueConstraint(
- 'id', table=registered_limit_table)
- unique_constraint.create()
-
- pk = migrate.PrimaryKeyConstraint('id', table=limit_table)
- pk.drop()
- if migrate_engine.name == 'mysql':
- migrate_engine.execute(
- MYSQL_CREATE_ID_PRIMARY_KEY_COLUMN % 'limit')
- else:
- migrate_engine.execute(
- POSTGRESQL_CREATE_ID_PRIMARY_KEY_COLUMN % 'limit')
- unique_constraint = migrate.UniqueConstraint('id', table=limit_table)
- unique_constraint.create()
- else:
- # SQLite case
- registered_limit_table_new = sql.Table(
- 'registered_limit_new',
- meta,
- sql.Column('internal_id', sql.Integer, primary_key=True),
- sql.Column('id', sql.String(length=64), unique=True),
- sql.Column('service_id',
- sql.String(64)),
- sql.Column('region_id',
- sql.String(64),
- nullable=True),
- sql.Column('resource_name', sql.String(255)),
- sql.Column('default_limit', sql.Integer, nullable=False),
- sql.Column('description', sql.Text),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- registered_limit_table_new.create(migrate_engine, checkfirst=True)
-
- limit_table_new = sql.Table(
- 'limit_new',
- meta,
- sql.Column('internal_id', sql.Integer, primary_key=True),
- sql.Column('id', sql.String(length=64), unique=True),
- sql.Column('project_id', sql.String(64)),
- sql.Column('service_id', sql.String(64)),
- sql.Column('region_id', sql.String(64), nullable=True),
- sql.Column('resource_name', sql.String(255)),
- sql.Column('resource_limit', sql.Integer, nullable=False),
- sql.Column('description', sql.Text),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- limit_table_new.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/expand_repo/versions/048_expand_add_registered_limit_id_column_for_limit.py b/keystone/common/sql/expand_repo/versions/048_expand_add_registered_limit_id_column_for_limit.py
deleted file mode 100644
index 05ee9c826..000000000
--- a/keystone/common/sql/expand_repo/versions/048_expand_add_registered_limit_id_column_for_limit.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
- registered_limit_table = sql.Table('registered_limit', meta, autoload=True)
- limit_table = sql.Table('limit', meta, autoload=True)
-
- registered_limit_id = sql.Column(
- 'registered_limit_id', sql.String(64),
- sql.ForeignKey(registered_limit_table.c.id))
- limit_table.create_column(registered_limit_id)
-
- if migrate_engine.name == 'sqlite':
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # "limit_new" is the table created in 047 expand script for SQLite
- # case.
- try:
- limit_table_new = sql.Table('limit_new', meta, autoload=True)
- registered_limit_id = sql.Column(
- 'registered_limit_id', sql.String(64),
- sql.ForeignKey(registered_limit_table.c.id))
- limit_table_new.create_column(registered_limit_id)
- except sql.exc.NoSuchTableError:
- pass
diff --git a/keystone/common/sql/expand_repo/versions/049_placeholder.py b/keystone/common/sql/expand_repo/versions/049_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/expand_repo/versions/049_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/050_placeholder.py b/keystone/common/sql/expand_repo/versions/050_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/expand_repo/versions/050_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/051_placeholder.py b/keystone/common/sql/expand_repo/versions/051_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/expand_repo/versions/051_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/052_placeholder.py b/keystone/common/sql/expand_repo/versions/052_placeholder.py
deleted file mode 100644
index 8f51a8962..000000000
--- a/keystone/common/sql/expand_repo/versions/052_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Rocky backports. Do not use this number for new
-# Stein work. New Stein work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/053_expand_add_role_description_to_role_table.py b/keystone/common/sql/expand_repo/versions/053_expand_add_role_description_to_role_table.py
deleted file mode 100644
index 99e41ff15..000000000
--- a/keystone/common/sql/expand_repo/versions/053_expand_add_role_description_to_role_table.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role_table = sql.Table('role', meta, autoload=True)
- description = sql.Column('description', sql.String(255),
- nullable=True)
- role_table.create_column(description)
diff --git a/keystone/common/sql/expand_repo/versions/054_expand_drop_old_passoword_column.py b/keystone/common/sql/expand_repo/versions/054_expand_drop_old_passoword_column.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/expand_repo/versions/054_expand_drop_old_passoword_column.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/055_expand_add_domain_to_limit.py b/keystone/common/sql/expand_repo/versions/055_expand_add_domain_to_limit.py
deleted file mode 100644
index c0f88ee57..000000000
--- a/keystone/common/sql/expand_repo/versions/055_expand_add_domain_to_limit.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- limit_table = sql.Table('limit', meta, autoload=True)
- domain_id = sql.Column('domain_id', sql.String(64), nullable=True)
- limit_table.create_column(domain_id)
-
- if migrate_engine.name == 'sqlite':
- meta = sql.MetaData()
- meta.bind = migrate_engine
- # "limit_new" is the table created in 047 expand script for SQLite
- # case.
- try:
- limit_table_new = sql.Table('limit_new', meta, autoload=True)
- domain_id = sql.Column('domain_id', sql.String(64), nullable=True)
- limit_table_new.create_column(domain_id)
- except sql.exc.NoSuchTableError:
- pass
diff --git a/keystone/common/sql/expand_repo/versions/056_expand_add_application_credential_access_rules.py b/keystone/common/sql/expand_repo/versions/056_expand_add_application_credential_access_rules.py
deleted file mode 100644
index 5df205b00..000000000
--- a/keystone/common/sql/expand_repo/versions/056_expand_add_application_credential_access_rules.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2019 SUSE Linux GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- application_credential = sql.Table(
- 'application_credential', meta, autoload=True)
- access_rule = sql.Table(
- 'access_rule', meta,
- sql.Column('id', sql.Integer, primary_key=True, nullable=False),
- sql.Column('service', sql.String(64)),
- sql.Column('path', sql.String(128)),
- sql.Column('method', sql.String(16)),
- mysql_engine='InnoDB', mysql_charset='utf8'
- )
- app_cred_access_rule = sql.Table(
- 'application_credential_access_rule', meta,
- sql.Column('application_credential_id', sql.Integer,
- sql.ForeignKey(application_credential.c.internal_id,
- ondelete='CASCADE'),
- primary_key=True, nullable=False),
- sql.Column('access_rule_id', sql.Integer,
- sql.ForeignKey(access_rule.c.id,
- ondelete='CASCADE'),
- primary_key=True, nullable=False),
- mysql_engine='InnoDB', mysql_charset='utf8'
- )
- access_rule.create(migrate_engine, checkfirst=True)
- app_cred_access_rule.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/expand_repo/versions/057_placeholder.py b/keystone/common/sql/expand_repo/versions/057_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/expand_repo/versions/057_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/058_placeholder.py b/keystone/common/sql/expand_repo/versions/058_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/expand_repo/versions/058_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/059_placeholder.py b/keystone/common/sql/expand_repo/versions/059_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/expand_repo/versions/059_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/060_placeholder.py b/keystone/common/sql/expand_repo/versions/060_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/expand_repo/versions/060_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/061_placeholder.py b/keystone/common/sql/expand_repo/versions/061_placeholder.py
deleted file mode 100644
index dff6bd138..000000000
--- a/keystone/common/sql/expand_repo/versions/061_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Stein backports. Do not use this number for new
-# Train work. New Train work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/062_expand_extract_redelegation_data_from_extras.py b/keystone/common/sql/expand_repo/versions/062_expand_extract_redelegation_data_from_extras.py
deleted file mode 100644
index 7e3019eb5..000000000
--- a/keystone/common/sql/expand_repo/versions/062_expand_extract_redelegation_data_from_extras.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- trust_table = sql.Table('trust', meta, autoload=True)
- trust_id_column = sql.Column(
- 'redelegated_trust_id',
- sql.String(64),
- nullable=True)
- count_column = sql.Column(
- 'redelegation_count',
- sql.Integer,
- nullable=True)
-
- trust_table.create_column(trust_id_column)
- trust_table.create_column(count_column)
diff --git a/keystone/common/sql/expand_repo/versions/063_expand_drop_limit_columns.py b/keystone/common/sql/expand_repo/versions/063_expand_drop_limit_columns.py
deleted file mode 100644
index 8aa15c1ef..000000000
--- a/keystone/common/sql/expand_repo/versions/063_expand_drop_limit_columns.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/064_expand_add_remote_id_attribute_to_federation_protocol_table.py b/keystone/common/sql/expand_repo/versions/064_expand_add_remote_id_attribute_to_federation_protocol_table.py
deleted file mode 100644
index e16c90eeb..000000000
--- a/keystone/common/sql/expand_repo/versions/064_expand_add_remote_id_attribute_to_federation_protocol_table.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- federation_protocol_table = sql.Table(
- 'federation_protocol', meta, autoload=True)
- remote_id_attribute = sql.Column('remote_id_attribute', sql.String(64))
- federation_protocol_table.create_column(remote_id_attribute)
diff --git a/keystone/common/sql/expand_repo/versions/065_expand_add_user_external_id_to_access_rule.py b/keystone/common/sql/expand_repo/versions/065_expand_add_user_external_id_to_access_rule.py
deleted file mode 100644
index 1f687ccf3..000000000
--- a/keystone/common/sql/expand_repo/versions/065_expand_add_user_external_id_to_access_rule.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2019 SUSE Linux GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- access_rule = sql.Table('access_rule', meta, autoload=True)
-
- external_id = sql.Column('external_id', sql.String(64))
- access_rule.create_column(external_id)
- sql.Index('external_id', access_rule.c.external_id).create()
- unique_constraint_id = migrate.UniqueConstraint('external_id',
- table=access_rule)
- unique_constraint_id.create()
-
- user_id = sql.Column('user_id', sql.String(64))
- access_rule.create_column(user_id)
- sql.Index('user_id', access_rule.c.user_id).create()
- unique_constraint_rule_for_user = migrate.UniqueConstraint(
- 'user_id', 'service', 'path', 'method',
- name='duplicate_access_rule_for_user_constraint',
- table=access_rule)
- unique_constraint_rule_for_user.create()
diff --git a/keystone/common/sql/expand_repo/versions/066_expand_add_role_and_project_option_tables.py b/keystone/common/sql/expand_repo/versions/066_expand_add_role_and_project_option_tables.py
deleted file mode 100644
index a051b5336..000000000
--- a/keystone/common/sql/expand_repo/versions/066_expand_add_role_and_project_option_tables.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common import sql as ks_sql
-
-
-def upgrade(migrate_engine):
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role_table = sql.Table('role', meta, autoload=True)
- project_table = sql.Table('project', meta, autoload=True)
-
- role_resource_options_table = sql.Table(
- 'role_option',
- meta,
- sql.Column('role_id', sql.String(64), sql.ForeignKey(role_table.c.id,
- ondelete='CASCADE'), nullable=False, primary_key=True),
- sql.Column('option_id', sql.String(4), nullable=False,
- primary_key=True),
- sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
- project_resource_options_table = sql.Table(
- 'project_option',
- meta,
- sql.Column('project_id', sql.String(64),
- sql.ForeignKey(project_table.c.id, ondelete='CASCADE'),
- nullable=False, primary_key=True),
- sql.Column('option_id', sql.String(4), nullable=False,
- primary_key=True),
- sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- project_resource_options_table.create()
- role_resource_options_table.create()
diff --git a/keystone/common/sql/expand_repo/versions/067_placeholder.py b/keystone/common/sql/expand_repo/versions/067_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/expand_repo/versions/067_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/068_placeholder.py b/keystone/common/sql/expand_repo/versions/068_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/expand_repo/versions/068_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/069_placeholder.py b/keystone/common/sql/expand_repo/versions/069_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/expand_repo/versions/069_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/070_placeholder.py b/keystone/common/sql/expand_repo/versions/070_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/expand_repo/versions/070_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/071_placeholder.py b/keystone/common/sql/expand_repo/versions/071_placeholder.py
deleted file mode 100644
index 8522ef3ce..000000000
--- a/keystone/common/sql/expand_repo/versions/071_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/072_expand_drop_domain_id_fk.py b/keystone/common/sql/expand_repo/versions/072_expand_drop_domain_id_fk.py
deleted file mode 100644
index bb90c3de3..000000000
--- a/keystone/common/sql/expand_repo/versions/072_expand_drop_domain_id_fk.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2019 SUSE LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Train backports. Do not use this number for new
-# Ussuri work. New Ussuri work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/expand_repo/versions/073_expand_expiring_group_membership.py b/keystone/common/sql/expand_repo/versions/073_expand_expiring_group_membership.py
deleted file mode 100644
index 8577ee052..000000000
--- a/keystone/common/sql/expand_repo/versions/073_expand_expiring_group_membership.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- identity_provider = sql.Table('identity_provider', meta, autoload=True)
- authorization_ttl = sql.Column('authorization_ttl', sql.Integer,
- nullable=True)
- identity_provider.create_column(authorization_ttl)
-
- user_table = sql.Table('user', meta, autoload=True)
- group_table = sql.Table('group', meta, autoload=True)
- idp_table = sql.Table('identity_provider', meta, autoload=True)
-
- expiring_user_group_membership = sql.Table(
- 'expiring_user_group_membership', meta,
-
- sql.Column('user_id', sql.String(64),
- sql.ForeignKey(user_table.c.id), primary_key=True),
- sql.Column('group_id', sql.String(64),
- sql.ForeignKey(group_table.c.id), primary_key=True),
- sql.Column('idp_id',
- sql.String(64),
- sql.ForeignKey(idp_table.c.id,
- ondelete='CASCADE'),
- primary_key=True),
- sql.Column('last_verified', sql.DateTime(), nullable=False),
-
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- expiring_user_group_membership.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/contract_repo/__init__.py b/keystone/common/sql/legacy_migrations/__init__.py
index e69de29bb..e69de29bb 100644
--- a/keystone/common/sql/contract_repo/__init__.py
+++ b/keystone/common/sql/legacy_migrations/__init__.py
diff --git a/keystone/common/sql/legacy_migrations/contract_repo/README.rst b/keystone/common/sql/legacy_migrations/contract_repo/README.rst
new file mode 100644
index 000000000..6ecb178cc
--- /dev/null
+++ b/keystone/common/sql/legacy_migrations/contract_repo/README.rst
@@ -0,0 +1,13 @@
+Contract repo migrations
+========================
+
+.. warning::
+
+ This repo is deprecated and will be removed in a future release. All new
+ migrations should be alembic-based and placed in
+ ``keystone/common/sql/migrations``.
+
+Contract-style or destructive migrations for the database.
+
+This is a database migration repository. More information at
+https://opendev.org/x/sqlalchemy-migrate
diff --git a/keystone/common/sql/contract_repo/versions/__init__.py b/keystone/common/sql/legacy_migrations/contract_repo/__init__.py
index e69de29bb..e69de29bb 100644
--- a/keystone/common/sql/contract_repo/versions/__init__.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/__init__.py
diff --git a/keystone/common/sql/contract_repo/manage.py b/keystone/common/sql/legacy_migrations/contract_repo/manage.py
index 41cba1adb..41cba1adb 100644
--- a/keystone/common/sql/contract_repo/manage.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/manage.py
diff --git a/keystone/common/sql/contract_repo/migrate.cfg b/keystone/common/sql/legacy_migrations/contract_repo/migrate.cfg
index fd50aa546..fd50aa546 100644
--- a/keystone/common/sql/contract_repo/migrate.cfg
+++ b/keystone/common/sql/legacy_migrations/contract_repo/migrate.cfg
diff --git a/keystone/common/sql/contract_repo/versions/001_contract_initial_null_migration.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/073_contract_initial_migration.py
index 1cd34e617..1cd34e617 100644
--- a/keystone/common/sql/contract_repo/versions/001_contract_initial_null_migration.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/073_contract_initial_migration.py
diff --git a/keystone/common/sql/contract_repo/versions/074_placeholder.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/074_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/contract_repo/versions/074_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/074_placeholder.py
diff --git a/keystone/common/sql/contract_repo/versions/075_placeholder.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/075_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/contract_repo/versions/075_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/075_placeholder.py
diff --git a/keystone/common/sql/contract_repo/versions/076_placeholder.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/076_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/contract_repo/versions/076_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/076_placeholder.py
diff --git a/keystone/common/sql/contract_repo/versions/077_placeholder.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/077_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/contract_repo/versions/077_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/077_placeholder.py
diff --git a/keystone/common/sql/contract_repo/versions/078_placeholder.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/078_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/contract_repo/versions/078_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/078_placeholder.py
diff --git a/keystone/common/sql/data_migration_repo/versions/074_placeholder.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/079_contract_update_local_id_limit.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/data_migration_repo/versions/074_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/079_contract_update_local_id_limit.py
diff --git a/keystone/common/sql/data_migration_repo/__init__.py b/keystone/common/sql/legacy_migrations/contract_repo/versions/__init__.py
index e69de29bb..e69de29bb 100644
--- a/keystone/common/sql/data_migration_repo/__init__.py
+++ b/keystone/common/sql/legacy_migrations/contract_repo/versions/__init__.py
diff --git a/keystone/common/sql/legacy_migrations/data_migration_repo/README.rst b/keystone/common/sql/legacy_migrations/data_migration_repo/README.rst
new file mode 100644
index 000000000..0b4202f9f
--- /dev/null
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/README.rst
@@ -0,0 +1,13 @@
+Data migration repo migrations
+==============================
+
+.. warning::
+
+ This repo is deprecated and will be removed in a future release. All new
+ migrations should be alembic-based and placed in
+ ``keystone/common/sql/migrations``.
+
+Data migrations for the database.
+
+This is a database migration repository. More information at
+https://opendev.org/x/sqlalchemy-migrate
diff --git a/keystone/common/sql/data_migration_repo/versions/__init__.py b/keystone/common/sql/legacy_migrations/data_migration_repo/__init__.py
index e69de29bb..e69de29bb 100644
--- a/keystone/common/sql/data_migration_repo/versions/__init__.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/__init__.py
diff --git a/keystone/common/sql/data_migration_repo/manage.py b/keystone/common/sql/legacy_migrations/data_migration_repo/manage.py
index 41cba1adb..41cba1adb 100644
--- a/keystone/common/sql/data_migration_repo/manage.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/manage.py
diff --git a/keystone/common/sql/data_migration_repo/migrate.cfg b/keystone/common/sql/legacy_migrations/data_migration_repo/migrate.cfg
index 97f8e1d0e..97f8e1d0e 100644
--- a/keystone/common/sql/data_migration_repo/migrate.cfg
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/migrate.cfg
diff --git a/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/073_migrate_initial_migration.py
index 477c719a6..d05b151b8 100644
--- a/keystone/common/sql/migrate_repo/versions/089_add_root_of_all_domains.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/073_migrate_initial_migration.py
@@ -10,11 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+# A null initial migration to open this repo. Do not re-use replace this with
+# a real migration, add additional ones in subsequent version scripts.
+import sqlalchemy as sql
+import sqlalchemy.orm
-_PROJECT_TABLE_NAME = 'project'
-_DOMAIN_TABLE_NAME = 'domain'
NULL_DOMAIN_ID = '<<keystone.domain.root>>'
@@ -37,39 +38,18 @@ def upgrade(migrate_engine):
'domain_id': NULL_DOMAIN_ID,
'is_domain': True,
'parent_id': None,
- 'extra': '{}'
+ 'extra': '{}',
}
return project_ref
- def _generate_root_domain():
- # Generate a similar root for the domain table, this is an interim
- # step so as to allow continuation of current project domain_id FK.
- #
- # This special domain is filtered out by the driver, so is never
- # visible to the manager or API.
-
- domain_ref = {
- 'id': NULL_DOMAIN_ID,
- 'name': NULL_DOMAIN_ID,
- 'enabled': False,
- 'extra': '{}'
- }
- return domain_ref
-
meta = sql.MetaData()
meta.bind = migrate_engine
session = sql.orm.sessionmaker(bind=migrate_engine)()
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
-
- root_domain = _generate_root_domain()
- new_entry = domain_table.insert().values(**root_domain)
- session.execute(new_entry)
- session.commit()
+ project = sql.Table('project', meta, autoload=True)
root_domain_project = _generate_root_domain_project()
- new_entry = project_table.insert().values(**root_domain_project)
+ new_entry = project.insert().values(**root_domain_project)
session.execute(new_entry)
session.commit()
diff --git a/keystone/common/sql/expand_repo/versions/074_placeholder.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/074_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/expand_repo/versions/074_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/074_placeholder.py
diff --git a/keystone/common/sql/data_migration_repo/versions/075_placeholder.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/075_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/data_migration_repo/versions/075_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/075_placeholder.py
diff --git a/keystone/common/sql/data_migration_repo/versions/076_placeholder.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/076_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/data_migration_repo/versions/076_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/076_placeholder.py
diff --git a/keystone/common/sql/data_migration_repo/versions/077_placeholder.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/077_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/data_migration_repo/versions/077_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/077_placeholder.py
diff --git a/keystone/common/sql/data_migration_repo/versions/078_placeholder.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/078_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/data_migration_repo/versions/078_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/078_placeholder.py
diff --git a/keystone/common/sql/expand_repo/versions/075_placeholder.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/079_migrate_update_local_id_limit.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/expand_repo/versions/075_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/079_migrate_update_local_id_limit.py
diff --git a/keystone/common/sql/migrate_repo/__init__.py b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/__init__.py
index e69de29bb..e69de29bb 100644
--- a/keystone/common/sql/migrate_repo/__init__.py
+++ b/keystone/common/sql/legacy_migrations/data_migration_repo/versions/__init__.py
diff --git a/keystone/common/sql/legacy_migrations/expand_repo/README.rst b/keystone/common/sql/legacy_migrations/expand_repo/README.rst
new file mode 100644
index 000000000..6019e2d6c
--- /dev/null
+++ b/keystone/common/sql/legacy_migrations/expand_repo/README.rst
@@ -0,0 +1,13 @@
+Expand repo migrations
+======================
+
+.. warning::
+
+ This repo is deprecated and will be removed in a future release. All new
+ migrations should be alembic-based and placed in
+ ``keystone/common/sql/migrations``.
+
+Expand-style or additive migrations for the database.
+
+This is a database migration repository. More information at
+https://opendev.org/x/sqlalchemy-migrate
diff --git a/keystone/common/sql/expand_repo/__init__.py b/keystone/common/sql/legacy_migrations/expand_repo/__init__.py
index 84e0fb83b..84e0fb83b 100644
--- a/keystone/common/sql/expand_repo/__init__.py
+++ b/keystone/common/sql/legacy_migrations/expand_repo/__init__.py
diff --git a/keystone/common/sql/expand_repo/manage.py b/keystone/common/sql/legacy_migrations/expand_repo/manage.py
index 41cba1adb..41cba1adb 100644
--- a/keystone/common/sql/expand_repo/manage.py
+++ b/keystone/common/sql/legacy_migrations/expand_repo/manage.py
diff --git a/keystone/common/sql/expand_repo/migrate.cfg b/keystone/common/sql/legacy_migrations/expand_repo/migrate.cfg
index 74a33e330..74a33e330 100644
--- a/keystone/common/sql/expand_repo/migrate.cfg
+++ b/keystone/common/sql/legacy_migrations/expand_repo/migrate.cfg
diff --git a/keystone/common/sql/legacy_migrations/expand_repo/versions/073_expand_initial_migration.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/073_expand_initial_migration.py
new file mode 100644
index 000000000..00efa6ee1
--- /dev/null
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/073_expand_initial_migration.py
@@ -0,0 +1,1183 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+import textwrap
+
+import migrate
+from oslo_log import log
+import sqlalchemy as sql
+
+from keystone.assignment.backends import sql as assignment_sql
+from keystone.common import sql as ks_sql
+import keystone.conf
+from keystone.identity.mapping_backends import mapping as mapping_backend
+
+CONF = keystone.conf.CONF
+LOG = log.getLogger(__name__)
+
+# FIXME(stephenfin): Remove this as soon as we're done reworking the
+# migrations. Until then, this is necessary to allow us to use the native
+# sqlalchemy-migrate tooling (which won't register opts). Alternatively, maybe
+# the server default *shouldn't* rely on a (changeable) config option value?
+try:
+ service_provider_relay_state_prefix_default = CONF.saml.relay_state_prefix
+except Exception:
+ service_provider_relay_state_prefix_default = 'ss:mem:'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ if migrate_engine.name == 'mysql':
+ # In Folsom we explicitly converted migrate_version to UTF8.
+ migrate_engine.execute(
+ 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8'
+ )
+ # Set default DB charset to UTF8.
+ migrate_engine.execute(
+ 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8'
+ % migrate_engine.url.database
+ )
+
+ application_credential = sql.Table(
+ 'application_credential',
+ meta,
+ sql.Column(
+ 'internal_id', sql.Integer, primary_key=True, nullable=False
+ ),
+ sql.Column('id', sql.String(length=64), nullable=False),
+ sql.Column('name', sql.String(length=255), nullable=False),
+ sql.Column('secret_hash', sql.String(length=255), nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(64), nullable=True),
+ sql.Column('expires_at', ks_sql.DateTimeInt()),
+ sql.Column('system', sql.String(64), nullable=True),
+ sql.Column('unrestricted', sql.Boolean),
+ sql.UniqueConstraint(
+ 'user_id', 'name', name='duplicate_app_cred_constraint'
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ assignment = sql.Table(
+ 'assignment',
+ meta,
+ sql.Column(
+ 'type',
+ sql.Enum(
+ assignment_sql.AssignmentType.USER_PROJECT,
+ assignment_sql.AssignmentType.GROUP_PROJECT,
+ assignment_sql.AssignmentType.USER_DOMAIN,
+ assignment_sql.AssignmentType.GROUP_DOMAIN,
+ name='type',
+ ),
+ nullable=False,
+ ),
+ sql.Column('actor_id', sql.String(64), nullable=False),
+ sql.Column('target_id', sql.String(64), nullable=False),
+ sql.Column('role_id', sql.String(64), nullable=False),
+ sql.Column('inherited', sql.Boolean, default=False, nullable=False),
+ sql.PrimaryKeyConstraint(
+ 'type',
+ 'actor_id',
+ 'target_id',
+ 'role_id',
+ 'inherited',
+ ),
+ sql.Index('ix_actor_id', 'actor_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ access_rule = sql.Table(
+ 'access_rule',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('service', sql.String(64)),
+ sql.Column('path', sql.String(128)),
+ sql.Column('method', sql.String(16)),
+ sql.Column('external_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.UniqueConstraint(
+ 'external_id',
+ name='access_rule_external_id_key',
+ ),
+ sql.UniqueConstraint(
+ 'user_id',
+ 'service',
+ 'path',
+ 'method',
+ name='duplicate_access_rule_for_user_constraint',
+ ),
+ sql.Index('user_id', 'user_id'),
+ sql.Index('external_id', 'external_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ config_register = sql.Table(
+ 'config_register',
+ meta,
+ sql.Column('type', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ consumer = sql.Table(
+ 'consumer',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('description', sql.String(64), nullable=True),
+ sql.Column('secret', sql.String(64), nullable=False),
+ sql.Column('extra', sql.Text(), nullable=False),
+ )
+
+ credential = sql.Table(
+ 'credential',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(length=64)),
+ sql.Column('type', sql.String(length=255), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('key_hash', sql.String(64), nullable=False),
+ sql.Column(
+ 'encrypted_blob',
+ ks_sql.Text,
+ nullable=False,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ group = sql.Table(
+ 'group',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('domain_id', sql.String(length=64), nullable=False),
+ sql.Column('name', sql.String(length=64), nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ migrate.UniqueConstraint(
+ 'domain_id',
+ 'name',
+ name='ixu_group_name_domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ id_mapping = sql.Table(
+ 'id_mapping',
+ meta,
+ sql.Column('public_id', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('local_id', sql.String(64), nullable=False),
+ sql.Column(
+ 'entity_type',
+ sql.Enum(
+ mapping_backend.EntityType.USER,
+ mapping_backend.EntityType.GROUP,
+ name='entity_type',
+ ),
+ nullable=False,
+ ),
+ migrate.UniqueConstraint(
+ 'domain_id',
+ 'local_id',
+ 'entity_type',
+ name='domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ identity_provider = sql.Table(
+ 'identity_provider',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('authorization_ttl', sql.Integer, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ idp_remote_ids = sql.Table(
+ 'idp_remote_ids',
+ meta,
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey(identity_provider.c.id, ondelete='CASCADE'),
+ ),
+ sql.Column('remote_id', sql.String(255), primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ mapping = sql.Table(
+ 'mapping',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('rules', sql.Text(), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ policy = sql.Table(
+ 'policy',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('type', sql.String(length=255), nullable=False),
+ sql.Column('blob', ks_sql.JsonBlob, nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ policy_association = sql.Table(
+ 'policy_association',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('policy_id', sql.String(64), nullable=False),
+ sql.Column('endpoint_id', sql.String(64), nullable=True),
+ sql.Column('service_id', sql.String(64), nullable=True),
+ sql.Column('region_id', sql.String(64), nullable=True),
+ sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ project = sql.Table(
+ 'project',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=64), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('description', sql.Text),
+ sql.Column('enabled', sql.Boolean),
+ sql.Column(
+ 'domain_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ 'project.id',
+ name='project_domain_id_fkey',
+ ),
+ nullable=False,
+ ),
+ sql.Column(
+ 'parent_id',
+ sql.String(64),
+ sql.ForeignKey(
+ 'project.id',
+ name='project_parent_id_fkey',
+ ),
+ nullable=True,
+ ),
+ sql.Column(
+ 'is_domain',
+ sql.Boolean,
+ nullable=False,
+ server_default='0',
+ default=False,
+ ),
+ migrate.UniqueConstraint(
+ 'domain_id',
+ 'name',
+ name='ixu_project_name_domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ project_endpoint = sql.Table(
+ 'project_endpoint',
+ meta,
+ sql.Column(
+ 'endpoint_id', sql.String(64), primary_key=True, nullable=False
+ ),
+ sql.Column(
+ 'project_id', sql.String(64), primary_key=True, nullable=False
+ ),
+ )
+
+ project_option = sql.Table(
+ 'project_option',
+ meta,
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ sql.ForeignKey(project.c.id, ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column(
+ 'option_id', sql.String(4), nullable=False, primary_key=True
+ ),
+ sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ # NOTE(lamt) To allow tag name to be case sensitive for MySQL, the 'name'
+ # column needs to use collation, which is incompatible with Postgresql.
+ # Using unicode to mirror nova's server tag:
+ # https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/models.py
+ project_tag = sql.Table(
+ 'project_tag',
+ meta,
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ sql.ForeignKey(project.c.id, ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column('name', sql.Unicode(255), nullable=False, primary_key=True),
+ sql.UniqueConstraint('project_id', 'name'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ region = sql.Table(
+ 'region',
+ meta,
+ sql.Column('id', sql.String(255), primary_key=True),
+ sql.Column('description', sql.String(255), nullable=False),
+ sql.Column('parent_region_id', sql.String(255), nullable=True),
+ sql.Column('extra', sql.Text()),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ registered_limit = sql.Table(
+ 'registered_limit',
+ meta,
+ sql.Column('id', sql.String(length=64), nullable=False),
+ sql.Column('service_id', sql.String(255)),
+ sql.Column('region_id', sql.String(64), nullable=True),
+ sql.Column('resource_name', sql.String(255)),
+ sql.Column('default_limit', sql.Integer, nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('internal_id', sql.Integer, primary_key=True),
+ # NOTE(stephenfin): Name chosen to preserve backwards compatibility
+ # with names used for primary key unique constraints
+ sql.UniqueConstraint('id', name='registered_limit_id_key'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ request_token = sql.Table(
+ 'request_token',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('request_secret', sql.String(64), nullable=False),
+ sql.Column('verifier', sql.String(64), nullable=True),
+ sql.Column('authorizing_user_id', sql.String(64), nullable=True),
+ sql.Column('requested_project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=True),
+ sql.Column(
+ 'consumer_id',
+ sql.String(64),
+ sql.ForeignKey(consumer.c.id),
+ nullable=False,
+ index=True,
+ ),
+ sql.Column('expires_at', sql.String(64), nullable=True),
+ )
+
+ revocation_event = sql.Table(
+ 'revocation_event',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True),
+ sql.Column('domain_id', sql.String(64)),
+ sql.Column('project_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.Column('role_id', sql.String(64)),
+ sql.Column('trust_id', sql.String(64)),
+ sql.Column('consumer_id', sql.String(64)),
+ sql.Column('access_token_id', sql.String(64)),
+ sql.Column('issued_before', sql.DateTime(), nullable=False),
+ sql.Column('expires_at', sql.DateTime()),
+ sql.Column('revoked_at', sql.DateTime(), nullable=False),
+ sql.Column('audit_id', sql.String(32), nullable=True),
+ sql.Column('audit_chain_id', sql.String(32), nullable=True),
+ # NOTE(stephenfin): The '_new' suffix here is due to migration 095,
+ # which changed the 'id' column from String(64) to Integer. It did this
+ # by creating a 'revocation_event_new' table and populating it with
+ # data from the 'revocation_event' table before deleting the
+ # 'revocation_event' table and renaming the 'revocation_event_new'
+ # table to 'revocation_event'. Because the 'revoked_at' column had
+ # 'index=True', sqlalchemy automatically generated the index name as
+ # 'ix_{table}_{column}'. However, when intitially created, '{table}'
+ # was 'revocation_event_new' so the index got that name. We may wish to
+ # rename this eventually.
+ sql.Index('ix_revocation_event_new_revoked_at', 'revoked_at'),
+ sql.Index('ix_revocation_event_issued_before', 'issued_before'),
+ sql.Index(
+ 'ix_revocation_event_project_id_issued_before',
+ 'project_id',
+ 'issued_before',
+ ),
+ sql.Index(
+ 'ix_revocation_event_user_id_issued_before',
+ 'user_id',
+ 'issued_before',
+ ),
+ sql.Index(
+ 'ix_revocation_event_audit_id_issued_before',
+ 'audit_id',
+ 'issued_before',
+ ),
+ )
+
+ role = sql.Table(
+ 'role',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=255), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column(
+ 'domain_id',
+ sql.String(64),
+ nullable=False,
+ server_default='<<null>>',
+ ),
+ sql.Column('description', sql.String(255), nullable=True),
+ migrate.UniqueConstraint(
+ 'name',
+ 'domain_id',
+ name='ixu_role_name_domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ role_option = sql.Table(
+ 'role_option',
+ meta,
+ sql.Column(
+ 'role_id',
+ sql.String(64),
+ sql.ForeignKey(role.c.id, ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column(
+ 'option_id', sql.String(4), nullable=False, primary_key=True
+ ),
+ sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ sensitive_config = sql.Table(
+ 'sensitive_config',
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ service = sql.Table(
+ 'service',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('type', sql.String(length=255)),
+ sql.Column(
+ 'enabled',
+ sql.Boolean,
+ nullable=False,
+ default=True,
+ server_default='1',
+ ),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ service_provider = sql.Table(
+ 'service_provider',
+ meta,
+ sql.Column('auth_url', sql.String(256), nullable=False),
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ sql.Column('sp_url', sql.String(256), nullable=False),
+ sql.Column(
+ 'relay_state_prefix',
+ sql.String(256),
+ nullable=False,
+ server_default=service_provider_relay_state_prefix_default,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ system_assignment = sql.Table(
+ 'system_assignment',
+ meta,
+ sql.Column('type', sql.String(64), nullable=False),
+ sql.Column('actor_id', sql.String(64), nullable=False),
+ sql.Column('target_id', sql.String(64), nullable=False),
+ sql.Column('role_id', sql.String(64), nullable=False),
+ sql.Column('inherited', sql.Boolean, default=False, nullable=False),
+ sql.PrimaryKeyConstraint(
+ 'type', 'actor_id', 'target_id', 'role_id', 'inherited'
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ token = sql.Table(
+ 'token',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('expires', sql.DateTime, default=None),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('valid', sql.Boolean, default=True, nullable=False),
+ sql.Column('trust_id', sql.String(length=64)),
+ sql.Column('user_id', sql.String(length=64)),
+ sql.Index('ix_token_expires', 'expires'),
+ sql.Index('ix_token_expires_valid', 'expires', 'valid'),
+ sql.Index('ix_token_user_id', 'user_id'),
+ sql.Index('ix_token_trust_id', 'trust_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ trust = sql.Table(
+ 'trust',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('trustor_user_id', sql.String(length=64), nullable=False),
+ sql.Column('trustee_user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(length=64)),
+ sql.Column('impersonation', sql.Boolean, nullable=False),
+ sql.Column('deleted_at', sql.DateTime),
+ sql.Column('expires_at', sql.DateTime),
+ sql.Column('remaining_uses', sql.Integer, nullable=True),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('expires_at_int', ks_sql.DateTimeInt()),
+ sql.UniqueConstraint(
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ 'expires_at_int',
+ name='duplicate_trust_constraint_expanded',
+ ),
+ sql.Column(
+ 'redelegated_trust_id',
+ sql.String(64),
+ nullable=True,
+ ),
+ sql.Column(
+ 'redelegation_count',
+ sql.Integer,
+ nullable=True,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ trust_role = sql.Table(
+ 'trust_role',
+ meta,
+ sql.Column(
+ 'trust_id', sql.String(length=64), primary_key=True, nullable=False
+ ),
+ sql.Column(
+ 'role_id', sql.String(length=64), primary_key=True, nullable=False
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ user = sql.Table(
+ 'user',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('enabled', sql.Boolean),
+ sql.Column('default_project_id', sql.String(length=64)),
+ sql.Column('created_at', sql.DateTime(), nullable=True),
+ sql.Column('last_active_at', sql.Date(), nullable=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.UniqueConstraint('id', 'domain_id', name='ixu_user_id_domain_id'),
+ sql.Index('ix_default_project_id', 'default_project_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ user_group_membership = sql.Table(
+ 'user_group_membership',
+ meta,
+ sql.Column(
+ 'user_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ user.c.id,
+ name='fk_user_group_membership_user_id',
+ ),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'group_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ group.c.id,
+ name='fk_user_group_membership_group_id',
+ ),
+ primary_key=True,
+ ),
+ # NOTE(stevemar): The index was named 'group_id' in
+ # 050_fk_consistent_indexes.py and needs to be preserved
+ sql.Index('group_id', 'group_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ user_option = sql.Table(
+ 'user_option',
+ meta,
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ sql.ForeignKey(user.c.id, ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column(
+ 'option_id', sql.String(4), nullable=False, primary_key=True
+ ),
+ sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ whitelisted_config = sql.Table(
+ 'whitelisted_config',
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ access_token = sql.Table(
+ 'access_token',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('access_secret', sql.String(64), nullable=False),
+ sql.Column(
+ 'authorizing_user_id', sql.String(64), nullable=False, index=True
+ ),
+ sql.Column('project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=False),
+ sql.Column(
+ 'consumer_id',
+ sql.String(64),
+ sql.ForeignKey(consumer.c.id),
+ nullable=False,
+ index=True,
+ ),
+ sql.Column('expires_at', sql.String(64), nullable=True),
+ )
+
+ application_credential_role = sql.Table(
+ 'application_credential_role',
+ meta,
+ sql.Column(
+ 'application_credential_id',
+ sql.Integer,
+ sql.ForeignKey(
+ application_credential.c.internal_id, ondelete='CASCADE'
+ ),
+ primary_key=True,
+ nullable=False,
+ ),
+ sql.Column(
+ 'role_id', sql.String(length=64), primary_key=True, nullable=False
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ application_credential_access_rule = sql.Table(
+ 'application_credential_access_rule',
+ meta,
+ sql.Column(
+ 'application_credential_id',
+ sql.Integer,
+ sql.ForeignKey(
+ application_credential.c.internal_id, ondelete='CASCADE'
+ ),
+ primary_key=True,
+ nullable=False,
+ ),
+ sql.Column(
+ 'access_rule_id',
+ sql.Integer,
+ sql.ForeignKey(access_rule.c.id, ondelete='CASCADE'),
+ primary_key=True,
+ nullable=False,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ endpoint = sql.Table(
+ 'endpoint',
+ meta,
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('legacy_endpoint_id', sql.String(length=64)),
+ sql.Column('interface', sql.String(length=8), nullable=False),
+ sql.Column(
+ 'service_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ service.c.id,
+ name='endpoint_service_id_fkey',
+ ),
+ nullable=False,
+ ),
+ sql.Column('url', sql.Text, nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column(
+ 'enabled',
+ sql.Boolean,
+ nullable=False,
+ default=True,
+ server_default='1',
+ ),
+ sql.Column(
+ 'region_id',
+ sql.String(length=255),
+ sql.ForeignKey(
+ region.c.id,
+ name='fk_endpoint_region_id',
+ ),
+ nullable=True,
+ ),
+ # NOTE(stevemar): The index was named 'service_id' in
+ # 050_fk_consistent_indexes.py and needs to be preserved
+ sql.Index('service_id', 'service_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ endpoint_group = sql.Table(
+ 'endpoint_group',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.Column('description', sql.Text, nullable=True),
+ sql.Column('filters', sql.Text(), nullable=False),
+ )
+
+ expiring_user_group_membership = sql.Table(
+ 'expiring_user_group_membership',
+ meta,
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ sql.ForeignKey(user.c.id),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'group_id',
+ sql.String(64),
+ sql.ForeignKey(group.c.id),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey(identity_provider.c.id, ondelete='CASCADE'),
+ primary_key=True,
+ ),
+ sql.Column('last_verified', sql.DateTime(), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ federation_protocol = sql.Table(
+ 'federation_protocol',
+ meta,
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey(identity_provider.c.id, ondelete='CASCADE'),
+ primary_key=True,
+ ),
+ sql.Column('mapping_id', sql.String(64), nullable=False),
+ sql.Column('remote_id_attribute', sql.String(64)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ implied_role = sql.Table(
+ 'implied_role',
+ meta,
+ sql.Column(
+ 'prior_role_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ role.c.id,
+ name='implied_role_prior_role_id_fkey',
+ ondelete='CASCADE',
+ ),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'implied_role_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ role.c.id,
+ name='implied_role_implied_role_id_fkey',
+ ondelete='CASCADE',
+ ),
+ primary_key=True,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ limit = sql.Table(
+ 'limit',
+ meta,
+ sql.Column('id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(64), nullable=True),
+ sql.Column('resource_limit', sql.Integer, nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('internal_id', sql.Integer, primary_key=True),
+ # FIXME(stephenfin): This should have a foreign key constraint on
+ # registered_limit.id, but sqlalchemy-migrate clearly didn't handle
+ # creating a column with embedded FK info as was attempted in 048
+ sql.Column(
+ 'registered_limit_id',
+ sql.String(64),
+ ),
+ sql.Column('domain_id', sql.String(64), nullable=True),
+ # NOTE(stephenfin): Name chosen to preserve backwards compatibility
+ # with names used for primary key unique constraints
+ sql.UniqueConstraint('id', name='limit_id_key'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ local_user = sql.Table(
+ 'local_user',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ nullable=False,
+ unique=True,
+ ),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.Column('failed_auth_count', sql.Integer, nullable=True),
+ sql.Column('failed_auth_at', sql.DateTime(), nullable=True),
+ sql.ForeignKeyConstraint(
+ ['user_id', 'domain_id'],
+ [user.c.id, user.c.domain_id],
+ name='local_user_user_id_fkey',
+ onupdate='CASCADE',
+ ondelete='CASCADE',
+ ),
+ sql.UniqueConstraint('domain_id', 'name'),
+ )
+
+ nonlocal_user = sql.Table(
+ 'nonlocal_user',
+ meta,
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('name', sql.String(255), primary_key=True),
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ nullable=False,
+ ),
+ sql.ForeignKeyConstraint(
+ ['user_id', 'domain_id'],
+ [user.c.id, user.c.domain_id],
+ name='nonlocal_user_user_id_fkey',
+ onupdate='CASCADE',
+ ondelete='CASCADE',
+ ),
+ sql.UniqueConstraint('user_id', name='ixu_nonlocal_user_user_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ password = sql.Table(
+ 'password',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column(
+ 'local_user_id',
+ sql.Integer,
+ sql.ForeignKey(local_user.c.id, ondelete='CASCADE'),
+ nullable=False,
+ ),
+ sql.Column('expires_at', sql.DateTime(), nullable=True),
+ sql.Column(
+ 'self_service',
+ sql.Boolean,
+ nullable=False,
+ server_default='0',
+ default=False,
+ ),
+ # NOTE(notmorgan): To support the full range of scrypt and pbkfd
+ # password hash lengths, this should be closer to varchar(1500) instead
+ # of varchar(255).
+ sql.Column('password_hash', sql.String(255), nullable=True),
+ sql.Column(
+ 'created_at_int',
+ ks_sql.DateTimeInt(),
+ nullable=False,
+ default=0,
+ server_default='0',
+ ),
+ sql.Column('expires_at_int', ks_sql.DateTimeInt(), nullable=True),
+ sql.Column(
+ 'created_at',
+ sql.DateTime(),
+ nullable=False,
+ default=datetime.datetime.utcnow,
+ ),
+ )
+
+ project_endpoint_group = sql.Table(
+ 'project_endpoint_group',
+ meta,
+ sql.Column(
+ 'endpoint_group_id',
+ sql.String(64),
+ sql.ForeignKey(endpoint_group.c.id),
+ nullable=False,
+ ),
+ sql.Column('project_id', sql.String(64), nullable=False),
+ sql.PrimaryKeyConstraint('endpoint_group_id', 'project_id'),
+ )
+
+ federated_user = sql.Table(
+ 'federated_user',
+ meta,
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ sql.ForeignKey(user.c.id, ondelete='CASCADE'),
+ nullable=False,
+ ),
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey(identity_provider.c.id, ondelete='CASCADE'),
+ nullable=False,
+ ),
+ sql.Column('protocol_id', sql.String(64), nullable=False),
+ sql.Column('unique_id', sql.String(255), nullable=False),
+ sql.Column('display_name', sql.String(255), nullable=True),
+ sql.ForeignKeyConstraint(
+ ['protocol_id', 'idp_id'],
+ [federation_protocol.c.id, federation_protocol.c.idp_id],
+ name='federated_user_protocol_id_fkey',
+ ondelete='CASCADE',
+ ),
+ sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ # create all tables
+ tables = [
+ access_rule,
+ application_credential,
+ assignment,
+ config_register,
+ consumer,
+ credential,
+ group,
+ id_mapping,
+ identity_provider,
+ idp_remote_ids,
+ mapping,
+ policy,
+ policy_association,
+ project,
+ project_endpoint,
+ project_option,
+ project_tag,
+ region,
+ registered_limit,
+ request_token,
+ revocation_event,
+ role,
+ role_option,
+ sensitive_config,
+ service,
+ service_provider,
+ system_assignment,
+ token,
+ trust,
+ trust_role,
+ user,
+ user_group_membership,
+ user_option,
+ whitelisted_config,
+
+ access_token,
+ application_credential_access_rule,
+ application_credential_role,
+ endpoint,
+ endpoint_group,
+ expiring_user_group_membership,
+ federation_protocol,
+ implied_role,
+ limit,
+ local_user,
+ nonlocal_user,
+ password,
+ project_endpoint_group,
+
+ federated_user,
+ ]
+
+ for table in tables:
+ try:
+ table.create()
+ except Exception:
+ LOG.exception('Exception while creating table: %r', table)
+ raise
+
+ fkeys = []
+
+ if migrate_engine.name == 'sqlite':
+ # NOTE(stevemar): We need to keep this FK constraint due to 073, but
+ # only for sqlite, once we collapse 073 we can remove this constraint
+ fkeys.append(
+ {
+ 'columns': [assignment.c.role_id],
+ 'references': [role.c.id],
+ 'name': 'fk_assignment_role_id',
+ },
+ )
+
+ for fkey in fkeys:
+ migrate.ForeignKeyConstraint(
+ columns=fkey['columns'],
+ refcolumns=fkey['references'],
+ name=fkey.get('name'),
+ ondelete=fkey.get('ondelete'),
+ onupdate=fkey.get('onupdate'),
+ ).create()
+
+ # TODO(stephenfin): Remove these procedures in a future contract migration
+
+ if migrate_engine.name == 'postgresql':
+ error_message = (
+ 'Credential migration in progress. Cannot perform '
+ 'writes to credential table.'
+ )
+ credential_update_trigger = textwrap.dedent(f"""
+ CREATE OR REPLACE FUNCTION keystone_read_only_update()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ IF NEW.encrypted_blob IS NULL THEN
+ RAISE EXCEPTION '{error_message}';
+ END IF;
+ IF NEW.encrypted_blob IS NOT NULL AND OLD.blob IS NULL THEN
+ RAISE EXCEPTION '{error_message}';
+ END IF;
+ RETURN NEW;
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ migrate_engine.execute(credential_update_trigger)
+
+ error_message = (
+ 'Identity provider migration in progress. Cannot '
+ 'insert new rows into the identity_provider table at '
+ 'this time.'
+ )
+ identity_provider_insert_trigger = textwrap.dedent(f"""
+ CREATE OR REPLACE FUNCTION keystone_read_only_insert()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ RAISE EXCEPTION '{error_message}';
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ migrate_engine.execute(identity_provider_insert_trigger)
+
+ federated_user_insert_trigger = textwrap.dedent("""
+ CREATE OR REPLACE FUNCTION update_federated_user_domain_id()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ UPDATE "user" SET domain_id = (
+ SELECT domain_id FROM identity_provider WHERE id = NEW.idp_id)
+ WHERE id = NEW.user_id and domain_id IS NULL;
+ RETURN NULL;
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ migrate_engine.execute(federated_user_insert_trigger)
+
+ local_user_insert_trigger = textwrap.dedent("""
+ CREATE OR REPLACE FUNCTION update_user_domain_id()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ UPDATE "user" SET domain_id = NEW.domain_id
+ WHERE id = NEW.user_id;
+ RETURN NULL;
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ migrate_engine.execute(local_user_insert_trigger)
+
+ # FIXME(stephenfin): Remove these indexes. They're left over from attempts
+ # to remove foreign key constraints in past migrations. Apparently
+ # sqlalchemy-migrate didn't do the job fully and left behind indexes
+ if migrate_engine.name == 'mysql':
+ sql.Index('region_id', registered_limit.c.region_id).create()
+
+ # FIXME(stephenfin): This should be dropped when we add the FK
+ # constraint to this column
+ sql.Index('registered_limit_id', limit.c.registered_limit_id).create()
+
+ # FIXME(stephenfin): These are leftover from when we removed a FK
+ # constraint and should probable be dropped
+ sql.Index('domain_id', identity_provider.c.domain_id).create()
+ sql.Index('domain_id', user.c.domain_id).create()
diff --git a/keystone/common/sql/expand_repo/versions/076_placeholder.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/074_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/expand_repo/versions/076_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/074_placeholder.py
diff --git a/keystone/common/sql/expand_repo/versions/077_placeholder.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/075_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/expand_repo/versions/077_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/075_placeholder.py
diff --git a/keystone/common/sql/expand_repo/versions/078_placeholder.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/076_placeholder.py
index 2b09cbc99..2b09cbc99 100644
--- a/keystone/common/sql/expand_repo/versions/078_placeholder.py
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/076_placeholder.py
diff --git a/keystone/common/sql/legacy_migrations/expand_repo/versions/077_placeholder.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/077_placeholder.py
new file mode 100644
index 000000000..2b09cbc99
--- /dev/null
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/077_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Ussuri backports. Do not use this number for new
+# Victoria work. New Victoria work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone/common/sql/legacy_migrations/expand_repo/versions/078_placeholder.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/078_placeholder.py
new file mode 100644
index 000000000..2b09cbc99
--- /dev/null
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/078_placeholder.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for Ussuri backports. Do not use this number for new
+# Victoria work. New Victoria work starts after all the placeholders.
+
+
+def upgrade(migrate_engine):
+ pass
diff --git a/keystone/common/sql/contract_repo/versions/054_contract_drop_old_passoword_column.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/079_expand_update_local_id_limit.py
index 1b33173c6..20db83851 100644
--- a/keystone/common/sql/contract_repo/versions/054_contract_drop_old_passoword_column.py
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/079_expand_update_local_id_limit.py
@@ -14,8 +14,11 @@ import sqlalchemy as sql
def upgrade(migrate_engine):
+
meta = sql.MetaData()
meta.bind = migrate_engine
- password_table = sql.Table('password', meta, autoload=True)
- password_table.c.password.drop()
+ id_mapping_table = sql.Table(
+ 'id_mapping', meta, autoload=True
+ )
+ id_mapping_table.c.local_id.alter(type=sql.String(255))
diff --git a/keystone/common/sql/expand_repo/versions/__init__.py b/keystone/common/sql/legacy_migrations/expand_repo/versions/__init__.py
index 84e0fb83b..84e0fb83b 100644
--- a/keystone/common/sql/expand_repo/versions/__init__.py
+++ b/keystone/common/sql/legacy_migrations/expand_repo/versions/__init__.py
diff --git a/keystone/common/sql/migrate_repo/README b/keystone/common/sql/migrate_repo/README
deleted file mode 100644
index 131117104..000000000
--- a/keystone/common/sql/migrate_repo/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-https://opendev.org/openstack/sqlalchemy-migrate
diff --git a/keystone/common/sql/migrate_repo/manage.py b/keystone/common/sql/migrate_repo/manage.py
deleted file mode 100644
index 41cba1adb..000000000
--- a/keystone/common/sql/migrate_repo/manage.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.versioning.shell import main
-
-if __name__ == '__main__':
- main(debug='False')
diff --git a/keystone/common/sql/migrate_repo/migrate.cfg b/keystone/common/sql/migrate_repo/migrate.cfg
deleted file mode 100644
index db531bb41..000000000
--- a/keystone/common/sql/migrate_repo/migrate.cfg
+++ /dev/null
@@ -1,25 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=keystone
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
-
-# When creating new change scripts, Migrate will stamp the new script with
-# a version number. By default this is latest_version + 1. You can set this
-# to 'true' to tell Migrate to use the UTC timestamp instead.
-use_timestamp_numbering=False
diff --git a/keystone/common/sql/migrate_repo/versions/067_kilo.py b/keystone/common/sql/migrate_repo/versions/067_kilo.py
deleted file mode 100644
index a6dbed670..000000000
--- a/keystone/common/sql/migrate_repo/versions/067_kilo.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-from oslo_log import log
-import sqlalchemy as sql
-
-from keystone.assignment.backends import sql as assignment_sql
-from keystone.common import sql as ks_sql
-from keystone.identity.mapping_backends import mapping as mapping_backend
-
-
-LOG = log.getLogger(__name__)
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- if migrate_engine.name == 'mysql':
- # In Folsom we explicitly converted migrate_version to UTF8.
- migrate_engine.execute(
- 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
- # Set default DB charset to UTF8.
- migrate_engine.execute(
- 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
- migrate_engine.url.database)
-
- credential = sql.Table(
- 'credential', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('user_id', sql.String(length=64), nullable=False),
- sql.Column('project_id', sql.String(length=64)),
- sql.Column('blob', ks_sql.JsonBlob, nullable=False),
- sql.Column('type', sql.String(length=255), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- domain = sql.Table(
- 'domain', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=64), nullable=False),
- sql.Column('enabled', sql.Boolean, default=True, nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- endpoint = sql.Table(
- 'endpoint', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('legacy_endpoint_id', sql.String(length=64)),
- sql.Column('interface', sql.String(length=8), nullable=False),
- sql.Column('service_id', sql.String(length=64), nullable=False),
- sql.Column('url', sql.Text, nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('enabled', sql.Boolean, nullable=False, default=True,
- server_default='1'),
- sql.Column('region_id', sql.String(length=255), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- group = sql.Table(
- 'group', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('domain_id', sql.String(length=64), nullable=False),
- sql.Column('name', sql.String(length=64), nullable=False),
- sql.Column('description', sql.Text),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- policy = sql.Table(
- 'policy', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('type', sql.String(length=255), nullable=False),
- sql.Column('blob', ks_sql.JsonBlob, nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- project = sql.Table(
- 'project', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=64), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('description', sql.Text),
- sql.Column('enabled', sql.Boolean),
- sql.Column('domain_id', sql.String(length=64), nullable=False),
- sql.Column('parent_id', sql.String(64), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- role = sql.Table(
- 'role', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=255), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- service = sql.Table(
- 'service', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('type', sql.String(length=255)),
- sql.Column('enabled', sql.Boolean, nullable=False, default=True,
- server_default='1'),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- token = sql.Table(
- 'token', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('expires', sql.DateTime, default=None),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('valid', sql.Boolean, default=True, nullable=False),
- sql.Column('trust_id', sql.String(length=64)),
- sql.Column('user_id', sql.String(length=64)),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- trust = sql.Table(
- 'trust', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('trustor_user_id', sql.String(length=64), nullable=False),
- sql.Column('trustee_user_id', sql.String(length=64), nullable=False),
- sql.Column('project_id', sql.String(length=64)),
- sql.Column('impersonation', sql.Boolean, nullable=False),
- sql.Column('deleted_at', sql.DateTime),
- sql.Column('expires_at', sql.DateTime),
- sql.Column('remaining_uses', sql.Integer, nullable=True),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- trust_role = sql.Table(
- 'trust_role', meta,
- sql.Column('trust_id', sql.String(length=64), primary_key=True,
- nullable=False),
- sql.Column('role_id', sql.String(length=64), primary_key=True,
- nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- user = sql.Table(
- 'user', meta,
- sql.Column('id', sql.String(length=64), primary_key=True),
- sql.Column('name', sql.String(length=255), nullable=False),
- sql.Column('extra', ks_sql.JsonBlob.impl),
- sql.Column('password', sql.String(length=128)),
- sql.Column('enabled', sql.Boolean),
- sql.Column('domain_id', sql.String(length=64), nullable=False),
- sql.Column('default_project_id', sql.String(length=64)),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- user_group_membership = sql.Table(
- 'user_group_membership', meta,
- sql.Column('user_id', sql.String(length=64), primary_key=True),
- sql.Column('group_id', sql.String(length=64), primary_key=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- region = sql.Table(
- 'region',
- meta,
- sql.Column('id', sql.String(255), primary_key=True),
- sql.Column('description', sql.String(255), nullable=False),
- sql.Column('parent_region_id', sql.String(255), nullable=True),
- sql.Column('extra', sql.Text()),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- assignment = sql.Table(
- 'assignment',
- meta,
- sql.Column('type', sql.Enum(
- assignment_sql.AssignmentType.USER_PROJECT,
- assignment_sql.AssignmentType.GROUP_PROJECT,
- assignment_sql.AssignmentType.USER_DOMAIN,
- assignment_sql.AssignmentType.GROUP_DOMAIN,
- name='type'),
- nullable=False),
- sql.Column('actor_id', sql.String(64), nullable=False),
- sql.Column('target_id', sql.String(64), nullable=False),
- sql.Column('role_id', sql.String(64), nullable=False),
- sql.Column('inherited', sql.Boolean, default=False, nullable=False),
- sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- mapping = sql.Table(
- 'id_mapping',
- meta,
- sql.Column('public_id', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- sql.Column('local_id', sql.String(64), nullable=False),
- sql.Column('entity_type', sql.Enum(
- mapping_backend.EntityType.USER,
- mapping_backend.EntityType.GROUP,
- name='entity_type'),
- nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- domain_config_whitelist = sql.Table(
- 'whitelisted_config',
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- domain_config_sensitive = sql.Table(
- 'sensitive_config',
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('group', sql.String(255), primary_key=True),
- sql.Column('option', sql.String(255), primary_key=True),
- sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- # create all tables
- tables = [credential, domain, endpoint, group, policy, project, role,
- service, token, trust, trust_role, user, user_group_membership,
- region, assignment, mapping, domain_config_whitelist,
- domain_config_sensitive]
-
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.exception('Exception while creating table: %r', table)
- raise
-
- # Unique Constraints
- migrate.UniqueConstraint(user.c.domain_id,
- user.c.name,
- name='ixu_user_name_domain_id').create()
- migrate.UniqueConstraint(group.c.domain_id,
- group.c.name,
- name='ixu_group_name_domain_id').create()
- migrate.UniqueConstraint(role.c.name,
- name='ixu_role_name').create()
- migrate.UniqueConstraint(project.c.domain_id,
- project.c.name,
- name='ixu_project_name_domain_id').create()
- migrate.UniqueConstraint(domain.c.name,
- name='ixu_domain_name').create()
- migrate.UniqueConstraint(mapping.c.domain_id,
- mapping.c.local_id,
- mapping.c.entity_type,
- name='domain_id').create()
-
- # Indexes
- sql.Index('ix_token_expires', token.c.expires).create()
- sql.Index('ix_token_expires_valid', token.c.expires,
- token.c.valid).create()
- sql.Index('ix_actor_id', assignment.c.actor_id).create()
- sql.Index('ix_token_user_id', token.c.user_id).create()
- sql.Index('ix_token_trust_id', token.c.trust_id).create()
- # NOTE(stevemar): The two indexes below were named 'service_id' and
- # 'group_id' in 050_fk_consistent_indexes.py, and need to be preserved
- sql.Index('service_id', endpoint.c.service_id).create()
- sql.Index('group_id', user_group_membership.c.group_id).create()
-
- fkeys = [
- {'columns': [endpoint.c.service_id],
- 'references': [service.c.id]},
-
- {'columns': [user_group_membership.c.group_id],
- 'references': [group.c.id],
- 'name': 'fk_user_group_membership_group_id'},
-
- {'columns': [user_group_membership.c.user_id],
- 'references':[user.c.id],
- 'name': 'fk_user_group_membership_user_id'},
-
- {'columns': [project.c.domain_id],
- 'references': [domain.c.id],
- 'name': 'fk_project_domain_id'},
-
- {'columns': [endpoint.c.region_id],
- 'references': [region.c.id],
- 'name': 'fk_endpoint_region_id'},
-
- {'columns': [project.c.parent_id],
- 'references': [project.c.id],
- 'name': 'project_parent_id_fkey'},
- ]
-
- if migrate_engine.name == 'sqlite':
- # NOTE(stevemar): We need to keep this FK constraint due to 073, but
- # only for sqlite, once we collapse 073 we can remove this constraint
- fkeys.append(
- {'columns': [assignment.c.role_id],
- 'references': [role.c.id],
- 'name': 'fk_assignment_role_id'})
-
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).create()
diff --git a/keystone/common/sql/migrate_repo/versions/068_placeholder.py b/keystone/common/sql/migrate_repo/versions/068_placeholder.py
deleted file mode 100644
index 111df9d4a..000000000
--- a/keystone/common/sql/migrate_repo/versions/068_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/069_placeholder.py b/keystone/common/sql/migrate_repo/versions/069_placeholder.py
deleted file mode 100644
index 111df9d4a..000000000
--- a/keystone/common/sql/migrate_repo/versions/069_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/070_placeholder.py b/keystone/common/sql/migrate_repo/versions/070_placeholder.py
deleted file mode 100644
index 111df9d4a..000000000
--- a/keystone/common/sql/migrate_repo/versions/070_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/071_placeholder.py b/keystone/common/sql/migrate_repo/versions/071_placeholder.py
deleted file mode 100644
index 111df9d4a..000000000
--- a/keystone/common/sql/migrate_repo/versions/071_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/072_placeholder.py b/keystone/common/sql/migrate_repo/versions/072_placeholder.py
deleted file mode 100644
index 111df9d4a..000000000
--- a/keystone/common/sql/migrate_repo/versions/072_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Kilo backports. Do not use this number for new
-# Liberty work. New Liberty work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py b/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
deleted file mode 100644
index dc0905b5f..000000000
--- a/keystone/common/sql/migrate_repo/versions/073_insert_assignment_inherited_pk.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-from sqlalchemy.orm import sessionmaker
-
-from keystone.assignment.backends import sql as assignment_sql
-
-
-def upgrade(migrate_engine):
- """Insert inherited column to assignment table PK constraints.
-
- For non-SQLite databases, it changes the constraint in the existing table.
-
- For SQLite, since changing constraints is not supported, it recreates the
- assignment table with the new PK constraint and migrates the existing data.
-
- """
- ASSIGNMENT_TABLE_NAME = 'assignment'
-
- metadata = sql.MetaData()
- metadata.bind = migrate_engine
-
- # Retrieve the existing assignment table
- assignment_table = sql.Table(ASSIGNMENT_TABLE_NAME, metadata,
- autoload=True)
-
- if migrate_engine.name == 'sqlite':
- ACTOR_ID_INDEX_NAME = 'ix_actor_id'
- TMP_ASSIGNMENT_TABLE_NAME = 'tmp_assignment'
-
- # Define the new assignment table with a temporary name
- new_assignment_table = sql.Table(
- TMP_ASSIGNMENT_TABLE_NAME, metadata,
- sql.Column('type', sql.Enum(
- assignment_sql.AssignmentType.USER_PROJECT,
- assignment_sql.AssignmentType.GROUP_PROJECT,
- assignment_sql.AssignmentType.USER_DOMAIN,
- assignment_sql.AssignmentType.GROUP_DOMAIN,
- name='type'),
- nullable=False),
- sql.Column('actor_id', sql.String(64), nullable=False),
- sql.Column('target_id', sql.String(64), nullable=False),
- sql.Column('role_id', sql.String(64), sql.ForeignKey('role.id'),
- nullable=False),
- sql.Column('inherited', sql.Boolean, default=False,
- nullable=False),
- sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id',
- 'role_id', 'inherited'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- # Create the new assignment table
- new_assignment_table.create(migrate_engine, checkfirst=True)
-
- # Change the index from the existing assignment table to the new one
- sql.Index(ACTOR_ID_INDEX_NAME, assignment_table.c.actor_id).drop()
- sql.Index(ACTOR_ID_INDEX_NAME,
- new_assignment_table.c.actor_id).create()
-
- # Instantiate session
- maker = sessionmaker(bind=migrate_engine)
- session = maker()
-
- # Migrate existing data
- insert = new_assignment_table.insert().from_select(
- assignment_table.c, select=session.query(assignment_table))
- session.execute(insert)
- session.commit()
-
- # Drop the existing assignment table, in favor of the new one
- assignment_table.deregister()
- assignment_table.drop()
-
- # Finally, rename the new table to the original assignment table name
- new_assignment_table.rename(ASSIGNMENT_TABLE_NAME)
- elif migrate_engine.name == 'ibm_db_sa':
- # Recreate the existing constraint, marking the inherited column as PK
- # for DB2.
-
- # This is a workaround to the general case in the else statement below.
- # Due to a bug in the DB2 sqlalchemy dialect, Column.alter() actually
- # creates a primary key over only the "inherited" column. This is wrong
- # because the primary key for the table actually covers other columns
- # too, not just the "inherited" column. Since the primary key already
- # exists for the table after the Column.alter() call, it causes the
- # next line to fail with an error that the primary key already exists.
-
- # The workaround here skips doing the Column.alter(). This causes a
- # warning message since the metadata is out of sync. We can remove this
- # workaround once the DB2 sqlalchemy dialect is fixed.
- # DB2 Issue: https://code.google.com/p/ibm-db/issues/detail?id=173
-
- migrate.PrimaryKeyConstraint(table=assignment_table).drop()
- migrate.PrimaryKeyConstraint(
- assignment_table.c.type, assignment_table.c.actor_id,
- assignment_table.c.target_id, assignment_table.c.role_id,
- assignment_table.c.inherited).create()
- else:
- # Recreate the existing constraint, marking the inherited column as PK
- migrate.PrimaryKeyConstraint(table=assignment_table).drop()
- assignment_table.c.inherited.alter(primary_key=True)
- migrate.PrimaryKeyConstraint(table=assignment_table).create()
diff --git a/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py b/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py
deleted file mode 100644
index dcb89b07c..000000000
--- a/keystone/common/sql/migrate_repo/versions/074_add_is_domain_project.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-_PROJECT_TABLE_NAME = 'project'
-_IS_DOMAIN_COLUMN_NAME = 'is_domain'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- is_domain = sql.Column(_IS_DOMAIN_COLUMN_NAME, sql.Boolean, nullable=False,
- server_default='0', default=False)
- project_table.create_column(is_domain)
diff --git a/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py b/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py
deleted file mode 100644
index 576842c69..000000000
--- a/keystone/common/sql/migrate_repo/versions/075_confirm_config_registration.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-REGISTRATION_TABLE = 'config_register'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- registration_table = sql.Table(
- REGISTRATION_TABLE,
- meta,
- sql.Column('type', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- registration_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/076_placeholder.py b/keystone/common/sql/migrate_repo/versions/076_placeholder.py
deleted file mode 100644
index 9f6e84156..000000000
--- a/keystone/common/sql/migrate_repo/versions/076_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/077_placeholder.py b/keystone/common/sql/migrate_repo/versions/077_placeholder.py
deleted file mode 100644
index 9f6e84156..000000000
--- a/keystone/common/sql/migrate_repo/versions/077_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/078_placeholder.py b/keystone/common/sql/migrate_repo/versions/078_placeholder.py
deleted file mode 100644
index 9f6e84156..000000000
--- a/keystone/common/sql/migrate_repo/versions/078_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/079_placeholder.py b/keystone/common/sql/migrate_repo/versions/079_placeholder.py
deleted file mode 100644
index 9f6e84156..000000000
--- a/keystone/common/sql/migrate_repo/versions/079_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/080_placeholder.py b/keystone/common/sql/migrate_repo/versions/080_placeholder.py
deleted file mode 100644
index 9f6e84156..000000000
--- a/keystone/common/sql/migrate_repo/versions/080_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Liberty backports. Do not use this number for new
-# Mitaka work. New Mitaka work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py b/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
deleted file mode 100644
index f8bffa1cd..000000000
--- a/keystone/common/sql/migrate_repo/versions/081_add_endpoint_policy_table.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = upgrades.get_db_version(
- extension='endpoint_policy',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to endpoint_policy extension migration 1. Only
- # update if it has not been run.
- if extension_version >= 1:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- endpoint_policy_table = sql.Table(
- 'policy_association',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('policy_id', sql.String(64),
- nullable=False),
- sql.Column('endpoint_id', sql.String(64),
- nullable=True),
- sql.Column('service_id', sql.String(64),
- nullable=True),
- sql.Column('region_id', sql.String(64),
- nullable=True),
- sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- endpoint_policy_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py b/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
deleted file mode 100644
index bbda554a4..000000000
--- a/keystone/common/sql/migrate_repo/versions/082_add_federation_tables.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-import keystone.conf
-
-CONF = keystone.conf.CONF
-_RELAY_STATE_PREFIX = 'relay_state_prefix'
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = upgrades.get_db_version(
- extension='federation',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to federation extension migration 8. Only
- # update if it has not been run.
- if extension_version >= 8:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- idp_table = sql.Table(
- 'identity_provider',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('enabled', sql.Boolean, nullable=False),
- sql.Column('description', sql.Text(), nullable=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- idp_table.create(migrate_engine, checkfirst=True)
-
- federation_protocol_table = sql.Table(
- 'federation_protocol',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('idp_id', sql.String(64),
- sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
- primary_key=True),
- sql.Column('mapping_id', sql.String(64), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- federation_protocol_table.create(migrate_engine, checkfirst=True)
-
- mapping_table = sql.Table(
- 'mapping',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('rules', sql.Text(), nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- mapping_table.create(migrate_engine, checkfirst=True)
-
- relay_state_prefix_default = CONF.saml.relay_state_prefix
- sp_table = sql.Table(
- 'service_provider',
- meta,
- sql.Column('auth_url', sql.String(256), nullable=False),
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('enabled', sql.Boolean, nullable=False),
- sql.Column('description', sql.Text(), nullable=True),
- sql.Column('sp_url', sql.String(256), nullable=False),
- sql.Column(_RELAY_STATE_PREFIX, sql.String(256), nullable=False,
- server_default=relay_state_prefix_default),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- sp_table.create(migrate_engine, checkfirst=True)
-
- idp_table = sql.Table('identity_provider', meta, autoload=True)
- remote_id_table = sql.Table(
- 'idp_remote_ids',
- meta,
- sql.Column('idp_id', sql.String(64),
- sql.ForeignKey('identity_provider.id', ondelete='CASCADE')),
- sql.Column('remote_id', sql.String(255), primary_key=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- remote_id_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py b/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
deleted file mode 100644
index 8bbccb962..000000000
--- a/keystone/common/sql/migrate_repo/versions/083_add_oauth1_tables.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = upgrades.get_db_version(
- extension='oauth1',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to oauth extension migration 5. Only
- # update if it has not been run.
- if extension_version >= 5:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- consumer_table = sql.Table(
- 'consumer',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('description', sql.String(64), nullable=True),
- sql.Column('secret', sql.String(64), nullable=False),
- sql.Column('extra', sql.Text(), nullable=False))
- consumer_table.create(migrate_engine, checkfirst=True)
-
- request_token_table = sql.Table(
- 'request_token',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('request_secret', sql.String(64), nullable=False),
- sql.Column('verifier', sql.String(64), nullable=True),
- sql.Column('authorizing_user_id', sql.String(64), nullable=True),
- sql.Column('requested_project_id', sql.String(64), nullable=False),
- sql.Column('role_ids', sql.Text(), nullable=True),
- sql.Column('consumer_id', sql.String(64),
- sql.ForeignKey('consumer.id'),
- nullable=False, index=True),
- sql.Column('expires_at', sql.String(64), nullable=True))
- request_token_table.create(migrate_engine, checkfirst=True)
-
- access_token_table = sql.Table(
- 'access_token',
- meta,
- sql.Column('id', sql.String(64), primary_key=True, nullable=False),
- sql.Column('access_secret', sql.String(64), nullable=False),
- sql.Column('authorizing_user_id', sql.String(64),
- nullable=False, index=True),
- sql.Column('project_id', sql.String(64), nullable=False),
- sql.Column('role_ids', sql.Text(), nullable=False),
- sql.Column('consumer_id', sql.String(64),
- sql.ForeignKey('consumer.id'),
- nullable=False, index=True),
- sql.Column('expires_at', sql.String(64), nullable=True))
- access_token_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py b/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
deleted file mode 100644
index 0cf37e7d7..000000000
--- a/keystone/common/sql/migrate_repo/versions/084_add_revoke_tables.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2014 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = upgrades.get_db_version(
- extension='revoke',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to revoke extension migration 2. Only
- # update if it has not been run.
- if extension_version >= 2:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- service_table = sql.Table(
- 'revocation_event',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('domain_id', sql.String(64)),
- sql.Column('project_id', sql.String(64)),
- sql.Column('user_id', sql.String(64)),
- sql.Column('role_id', sql.String(64)),
- sql.Column('trust_id', sql.String(64)),
- sql.Column('consumer_id', sql.String(64)),
- sql.Column('access_token_id', sql.String(64)),
- sql.Column('issued_before', sql.DateTime(), nullable=False),
- sql.Column('expires_at', sql.DateTime()),
- sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
- sql.Column('audit_id', sql.String(32), nullable=True),
- sql.Column('audit_chain_id', sql.String(32), nullable=True))
-
- service_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py b/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
deleted file mode 100644
index 0283bc335..000000000
--- a/keystone/common/sql/migrate_repo/versions/085_add_endpoint_filtering_table.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-def upgrade(migrate_engine):
- try:
- extension_version = upgrades.get_db_version(
- extension='endpoint_filter',
- engine=migrate_engine)
- except Exception:
- extension_version = 0
-
- # This migration corresponds to endpoint_filter extension migration 2. Only
- # update if it has not been run.
- if extension_version >= 2:
- return
-
- # Upgrade operations go here. Don't create your own engine; bind
- # migrate_engine to your metadata
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- EP_GROUP_ID = 'endpoint_group_id'
- PROJECT_ID = 'project_id'
-
- endpoint_filtering_table = sql.Table(
- 'project_endpoint',
- meta,
- sql.Column(
- 'endpoint_id',
- sql.String(64),
- primary_key=True,
- nullable=False),
- sql.Column(
- 'project_id',
- sql.String(64),
- primary_key=True,
- nullable=False))
- endpoint_filtering_table.create(migrate_engine, checkfirst=True)
-
- endpoint_group_table = sql.Table(
- 'endpoint_group',
- meta,
- sql.Column('id', sql.String(64), primary_key=True),
- sql.Column('name', sql.String(255), nullable=False),
- sql.Column('description', sql.Text, nullable=True),
- sql.Column('filters', sql.Text(), nullable=False))
- endpoint_group_table.create(migrate_engine, checkfirst=True)
-
- project_endpoint_group_table = sql.Table(
- 'project_endpoint_group',
- meta,
- sql.Column(EP_GROUP_ID, sql.String(64),
- sql.ForeignKey('endpoint_group.id'), nullable=False),
- sql.Column(PROJECT_ID, sql.String(64), nullable=False),
- sql.PrimaryKeyConstraint(EP_GROUP_ID, PROJECT_ID))
- project_endpoint_group_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py b/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
deleted file mode 100644
index 2b115ea42..000000000
--- a/keystone/common/sql/migrate_repo/versions/086_add_duplicate_constraint_trusts.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2015 Intel Corporation
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import UniqueConstraint
-from sqlalchemy import MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- trusts = Table('trust', meta, autoload=True)
-
- UniqueConstraint('trustor_user_id', 'trustee_user_id', 'project_id',
- 'impersonation', 'expires_at', table=trusts,
- name='duplicate_trust_constraint').create()
diff --git a/keystone/common/sql/migrate_repo/versions/087_implied_roles.py b/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
deleted file mode 100644
index 7713ce8fa..000000000
--- a/keystone/common/sql/migrate_repo/versions/087_implied_roles.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-import sqlalchemy as sql
-
-
-ROLE_TABLE = 'role'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- implied_role = sql.Table(
- 'implied_role', meta,
- sql.Column('prior_role_id', sql.String(length=64), primary_key=True),
- sql.Column(
- 'implied_role_id', sql.String(length=64), primary_key=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- implied_role.create()
- role = sql.Table(ROLE_TABLE, meta, autoload=True)
- fkeys = [
- {'columns': [implied_role.c.prior_role_id],
- 'references': [role.c.id]},
- {'columns': [implied_role.c.implied_role_id],
- 'references': [role.c.id]},
- ]
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).create()
diff --git a/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py b/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
deleted file mode 100644
index 8b792dfa7..000000000
--- a/keystone/common/sql/migrate_repo/versions/088_domain_specific_roles.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-_ROLE_NAME_NEW_CONSTRAINT = 'ixu_role_name_domain_id'
-_ROLE_TABLE_NAME = 'role'
-_ROLE_NAME_COLUMN_NAME = 'name'
-_DOMAIN_ID_COLUMN_NAME = 'domain_id'
-_NULL_DOMAIN_ID = '<<null>>'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
- domain_id = sql.Column(_DOMAIN_ID_COLUMN_NAME, sql.String(64),
- nullable=False, server_default=_NULL_DOMAIN_ID)
-
- # NOTE(morganfainberg): the `role_name` unique constraint is not
- # guaranteed to be a fixed name, such as 'ixu_role_name`, so we need to
- # search for the correct constraint that only affects role_table.c.name
- # and drop that constraint.
- to_drop = None
- if migrate_engine.name == 'mysql':
- for c in role_table.indexes:
- if (c.unique and len(c.columns) == 1 and
- _ROLE_NAME_COLUMN_NAME in c.columns):
- to_drop = c
- break
- else:
- for c in role_table.constraints:
- if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
- to_drop = c
- break
-
- if to_drop is not None:
- migrate.UniqueConstraint(role_table.c.name,
- name=to_drop.name).drop()
-
- # perform changes after constraint is dropped.
- if 'domain_id' not in role_table.columns:
- # Only create the column if it doesn't already exist.
- role_table.create_column(domain_id)
-
- migrate.UniqueConstraint(role_table.c.name,
- role_table.c.domain_id,
- name=_ROLE_NAME_NEW_CONSTRAINT).create()
diff --git a/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py b/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
deleted file mode 100644
index 800ba47e8..000000000
--- a/keystone/common/sql/migrate_repo/versions/090_add_local_user_and_password_tables.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user = sql.Table('user', meta, autoload=True)
-
- local_user = sql.Table(
- 'local_user',
- meta,
- sql.Column('id', sql.Integer, primary_key=True, nullable=False),
- sql.Column('user_id', sql.String(64),
- sql.ForeignKey(user.c.id, ondelete='CASCADE'),
- nullable=False, unique=True),
- sql.Column('domain_id', sql.String(64), nullable=False),
- sql.Column('name', sql.String(255), nullable=False),
- sql.UniqueConstraint('domain_id', 'name'))
- local_user.create(migrate_engine, checkfirst=True)
-
- password = sql.Table(
- 'password',
- meta,
- sql.Column('id', sql.Integer, primary_key=True, nullable=False),
- sql.Column('local_user_id', sql.Integer,
- sql.ForeignKey(local_user.c.id, ondelete='CASCADE'),
- nullable=False),
- sql.Column('password', sql.String(128), nullable=False))
- password.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py b/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
deleted file mode 100644
index 7e98b99b7..000000000
--- a/keystone/common/sql/migrate_repo/versions/091_migrate_data_to_local_user_and_password_tables.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-from sqlalchemy import func
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
- local_user_table = sql.Table('local_user', meta, autoload=True)
- password_table = sql.Table('password', meta, autoload=True)
-
- # migrate data to local_user table
- local_user_values = []
- for row in user_table.select().execute():
- # skip the row that already exists in `local_user`, this could
- # happen if run into a partially-migrated table due to the
- # bug #1549705.
- filter_by = local_user_table.c.user_id == row['id']
- user_count = sql.select([func.count()]).select_from(
- local_user_table).where(filter_by).execute().fetchone()[0]
- if user_count == 0:
- local_user_values.append({'user_id': row['id'],
- 'domain_id': row['domain_id'],
- 'name': row['name']})
- if local_user_values:
- local_user_table.insert().values(local_user_values).execute()
-
- # migrate data to password table
- sel = (
- sql.select([user_table, local_user_table], use_labels=True)
- .select_from(user_table.join(local_user_table, user_table.c.id ==
- local_user_table.c.user_id))
- )
- user_rows = sel.execute()
- password_values = []
- for row in user_rows:
- if row['user_password']:
- password_values.append({'local_user_id': row['local_user_id'],
- 'password': row['user_password']})
- if password_values:
- password_table.insert().values(password_values).execute()
-
- # NOTE(gnuoy): the `domain_id` unique constraint is not guaranteed to
- # be a fixed name, such as 'ixu_user_name_domain_id`, so we need to
- # search for the correct constraint that only affects
- # user_table.c.domain_id and drop that constraint. (Fix based on
- # morganfainbergs fix in 088_domain_specific_roles.py)
- to_drop = None
- if migrate_engine.name == 'mysql':
- for index in user_table.indexes:
- if (index.unique and len(index.columns) == 2 and
- 'domain_id' in index.columns and 'name' in index.columns):
- to_drop = index
- break
- else:
- for index in user_table.constraints:
- if (len(index.columns) == 2 and 'domain_id' in index.columns and
- 'name' in index.columns):
- to_drop = index
- break
- # remove domain_id and name unique constraint
- if migrate_engine.name != 'sqlite' and to_drop is not None:
- migrate.UniqueConstraint(user_table.c.domain_id,
- user_table.c.name,
- name=to_drop.name).drop()
-
- # drop user columns
- user_table.c.domain_id.drop()
- user_table.c.name.drop()
- user_table.c.password.drop()
diff --git a/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py b/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
deleted file mode 100644
index 5e8418999..000000000
--- a/keystone/common/sql/migrate_repo/versions/092_make_implied_roles_fks_cascaded.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import migrate
-import sqlalchemy as sql
-
-
-ROLE_TABLE = 'role'
-IMPLIED_ROLE_TABLE = 'implied_role'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role = sql.Table(ROLE_TABLE, meta, autoload=True)
- implied_role = sql.Table(IMPLIED_ROLE_TABLE, meta, autoload=True)
-
- fkeys = [
- {'columns': [implied_role.c.prior_role_id],
- 'references': [role.c.id]},
- {'columns': [implied_role.c.implied_role_id],
- 'references': [role.c.id]},
- ]
-
- # NOTE(stevemar): We need to divide these into two separate loops otherwise
- # they may clobber each other and only end up with one foreign key.
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name')).drop()
- for fkey in fkeys:
- migrate.ForeignKeyConstraint(columns=fkey['columns'],
- refcolumns=fkey['references'],
- name=fkey.get('name'),
- ondelete="CASCADE").create()
diff --git a/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py b/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
deleted file mode 100644
index e0d6a4ee5..000000000
--- a/keystone/common/sql/migrate_repo/versions/093_migrate_domains_to_projects.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-import sqlalchemy as sql
-
-from keystone.common.sql import upgrades
-
-
-_PROJECT_TABLE_NAME = 'project'
-_DOMAIN_TABLE_NAME = 'domain'
-_PARENT_ID_COLUMN_NAME = 'parent_id'
-_DOMAIN_ID_COLUMN_NAME = 'domain_id'
-
-# Above the driver level, the domain_id of a project acting as a domain is
-# None. However, in order to enable sql integrity constraints to still operate
-# on this column, we create a special "root of all domains" row, with an ID of
-# NULL_DOMAIN_ID, which all projects acting as a domain reference in their
-# domain_id attribute. This special row, as well as NULL_DOMAIN_ID, are never
-# exposed outside of sql driver layer.
-NULL_DOMAIN_ID = '<<keystone.domain.root>>'
-
-
-def list_existing_project_constraints(project_table, domain_table):
- constraints = [{'table': project_table,
- 'fk_column': _PARENT_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id},
- {'table': project_table,
- 'fk_column': _DOMAIN_ID_COLUMN_NAME,
- 'ref_column': domain_table.c.id}]
-
- return constraints
-
-
-def list_new_project_constraints(project_table):
- constraints = [{'table': project_table,
- 'fk_column': _PARENT_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id},
- {'table': project_table,
- 'fk_column': _DOMAIN_ID_COLUMN_NAME,
- 'ref_column': project_table.c.id}]
-
- return constraints
-
-
-def upgrade(migrate_engine):
-
- def _project_from_domain(domain):
- # Creates a project dict with is_domain=True from the provided
- # domain.
-
- description = None
- extra = {}
- if domain.extra is not None:
- # 'description' property is an extra attribute in domains but a
- # first class attribute in projects
- extra = json.loads(domain.extra)
- description = extra.pop('description', None)
-
- return {
- 'id': domain.id,
- 'name': domain.name,
- 'enabled': domain.enabled,
- 'description': description,
- 'domain_id': NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None,
- 'extra': json.dumps(extra)
- }
-
- meta = sql.MetaData()
- meta.bind = migrate_engine
- session = sql.orm.sessionmaker(bind=migrate_engine)()
-
- project_table = sql.Table(_PROJECT_TABLE_NAME, meta, autoload=True)
- domain_table = sql.Table(_DOMAIN_TABLE_NAME, meta, autoload=True)
-
- # NOTE(htruta): Remove the parent_id constraint during the migration
- # because for every root project inside this domain, we will set
- # the project domain_id to be its parent_id. We re-enable the constraint
- # in the end of this method. We also remove the domain_id constraint,
- # while be recreated a FK to the project_id at the end.
- upgrades.remove_constraints(
- list_existing_project_constraints(project_table, domain_table))
-
- # For each domain, create a project acting as a domain. We ignore the
- # "root of all domains" row, since we already have one of these in the
- # project table.
- domains = list(domain_table.select().execute())
- for domain in domains:
- if domain.id == NULL_DOMAIN_ID:
- continue
- is_domain_project = _project_from_domain(domain)
- new_entry = project_table.insert().values(**is_domain_project)
- session.execute(new_entry)
- session.commit()
-
- # For each project, that has no parent (i.e. a top level project), update
- # it's parent_id to point at the project acting as its domain. We ignore
- # the "root of all domains" row, since its parent_id must always be None.
- projects = list(project_table.select().execute())
- for project in projects:
- if (project.parent_id is not None or project.is_domain or
- project.id == NULL_DOMAIN_ID):
- continue
- values = {'parent_id': project.domain_id}
- update = project_table.update().where(
- project_table.c.id == project.id).values(values)
- session.execute(update)
- session.commit()
-
- upgrades.add_constraints(
- list_new_project_constraints(project_table))
-
- session.close()
diff --git a/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py b/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
deleted file mode 100644
index 8bc64ec1f..000000000
--- a/keystone/common/sql/migrate_repo/versions/094_add_federated_user_table.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
- idp_table = sql.Table('identity_provider', meta, autoload=True)
- protocol_table = sql.Table('federation_protocol', meta, autoload=True)
-
- federated_table = sql.Table(
- 'federated_user',
- meta,
- sql.Column('id', sql.Integer, primary_key=True, nullable=False),
- sql.Column('user_id', sql.String(64),
- sql.ForeignKey(user_table.c.id, ondelete='CASCADE'),
- nullable=False),
- sql.Column('idp_id', sql.String(64),
- sql.ForeignKey(idp_table.c.id, ondelete='CASCADE'),
- nullable=False),
- sql.Column('protocol_id', sql.String(64), nullable=False),
- sql.Column('unique_id', sql.String(255), nullable=False),
- sql.Column('display_name', sql.String(255), nullable=True),
- sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- federated_table.create(migrate_engine, checkfirst=True)
-
- migrate.ForeignKeyConstraint(
- columns=[federated_table.c.protocol_id, federated_table.c.idp_id],
- refcolumns=[protocol_table.c.id, protocol_table.c.idp_id]).create()
diff --git a/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py b/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
deleted file mode 100644
index 7a75f7b19..000000000
--- a/keystone/common/sql/migrate_repo/versions/095_add_integer_pkey_to_revocation_event_table.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- # You can specify primary keys when creating tables, however adding
- # auto-increment integer primary keys for existing tables is not
- # cross-engine compatibility supported. Thus, the approach is to:
- # (1) create a new revocation_event table with an int pkey,
- # (2) migrate data from the old table to the new table,
- # (3) delete the old revocation_event table
- # (4) rename the new revocation_event table
- revocation_table = sql.Table('revocation_event', meta, autoload=True)
-
- revocation_table_new = sql.Table(
- 'revocation_event_new',
- meta,
- sql.Column('id', sql.Integer, primary_key=True),
- sql.Column('domain_id', sql.String(64)),
- sql.Column('project_id', sql.String(64)),
- sql.Column('user_id', sql.String(64)),
- sql.Column('role_id', sql.String(64)),
- sql.Column('trust_id', sql.String(64)),
- sql.Column('consumer_id', sql.String(64)),
- sql.Column('access_token_id', sql.String(64)),
- sql.Column('issued_before', sql.DateTime(), nullable=False),
- sql.Column('expires_at', sql.DateTime()),
- sql.Column('revoked_at', sql.DateTime(), index=True, nullable=False),
- sql.Column('audit_id', sql.String(32), nullable=True),
- sql.Column('audit_chain_id', sql.String(32), nullable=True))
- revocation_table_new.create(migrate_engine, checkfirst=True)
-
- revocation_table_new.insert().from_select(['domain_id',
- 'project_id',
- 'user_id',
- 'role_id',
- 'trust_id',
- 'consumer_id',
- 'access_token_id',
- 'issued_before',
- 'expires_at',
- 'revoked_at',
- 'audit_id',
- 'audit_chain_id'],
- revocation_table.select())
-
- revocation_table.drop()
- revocation_table_new.rename('revocation_event')
diff --git a/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py b/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
deleted file mode 100644
index 0156de217..000000000
--- a/keystone/common/sql/migrate_repo/versions/096_drop_role_name_constraint.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-_ROLE_TABLE_NAME = 'role'
-_ROLE_NAME_COLUMN_NAME = 'name'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
-
- # NOTE(morganfainberg): the `role_name` unique constraint is not
- # guaranteed to be named 'ixu_role_name', so we need to search for the
- # correct constraint that only affects role_table.c.name and drop
- # that constraint.
- #
- # This is an idempotent change that reflects the fix to migration
- # 88 if the role_name unique constraint was not named consistently and
- # someone manually fixed the migrations / db without dropping the
- # old constraint.
- to_drop = None
- if migrate_engine.name == 'mysql':
- for c in role_table.indexes:
- if (c.unique and len(c.columns) == 1 and
- _ROLE_NAME_COLUMN_NAME in c.columns):
- to_drop = c
- break
- else:
- for c in role_table.constraints:
- if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
- to_drop = c
- break
-
- if to_drop is not None:
- migrate.UniqueConstraint(role_table.c.name,
- name=to_drop.name).drop()
diff --git a/keystone/common/sql/migrate_repo/versions/097_drop_user_name_domainid_constraint.py b/keystone/common/sql/migrate_repo/versions/097_drop_user_name_domainid_constraint.py
deleted file mode 100644
index d99d6aa6a..000000000
--- a/keystone/common/sql/migrate_repo/versions/097_drop_user_name_domainid_constraint.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-_USER_TABLE_NAME = 'user'
-_USER_NAME_COLUMN_NAME = 'name'
-_USER_DOMAINID_COLUMN_NAME = 'domain_id'
-_USER_PASSWORD_COLUMN_NAME = 'password' # nosec
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table(_USER_TABLE_NAME, meta, autoload=True)
-
- # NOTE(gnuoy): the `domain_id` unique constraint is not guaranteed to
- # be a fixed name, such as 'ixu_user_name_domain_id`, so we need to
- # search for the correct constraint that only affects
- # user_table.c.domain_id and drop that constraint. (Fix based on
- # morganfainbergs fix in 088_domain_specific_roles.py)
- #
- # This is an idempotent change that reflects the fix to migration
- # 91 if the user name & domain_id unique constraint was not named
- # consistently and someone manually fixed the migrations / db
- # without dropping the old constraint.
- to_drop = None
- if migrate_engine.name == 'mysql':
- for index in user_table.indexes:
- if (index.unique and len(index.columns) == 2 and
- _USER_DOMAINID_COLUMN_NAME in index.columns and
- _USER_NAME_COLUMN_NAME in index.columns):
- to_drop = index
- break
- else:
- for index in user_table.constraints:
- if (len(index.columns) == 2 and
- _USER_DOMAINID_COLUMN_NAME in index.columns and
- _USER_NAME_COLUMN_NAME in index.columns):
- to_drop = index
- break
-
- # remove domain_id and name unique constraint
- if to_drop is not None:
- migrate.UniqueConstraint(user_table.c.domain_id,
- user_table.c.name,
- name=to_drop.name).drop()
-
- # If migration 91 was aborted due to Bug #1572341 then columns may not
- # have been dropped.
- if _USER_DOMAINID_COLUMN_NAME in user_table.c:
- user_table.c.domain_id.drop()
- if _USER_NAME_COLUMN_NAME in user_table.c:
- user_table.c.name.drop()
- if _USER_PASSWORD_COLUMN_NAME in user_table.c:
- user_table.c.password.drop()
diff --git a/keystone/common/sql/migrate_repo/versions/098_placeholder.py b/keystone/common/sql/migrate_repo/versions/098_placeholder.py
deleted file mode 100644
index 295e0f45b..000000000
--- a/keystone/common/sql/migrate_repo/versions/098_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Mitaka backports. Do not use this number for new
-# Newton work. New Newton work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/099_placeholder.py b/keystone/common/sql/migrate_repo/versions/099_placeholder.py
deleted file mode 100644
index 295e0f45b..000000000
--- a/keystone/common/sql/migrate_repo/versions/099_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Mitaka backports. Do not use this number for new
-# Newton work. New Newton work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/100_placeholder.py b/keystone/common/sql/migrate_repo/versions/100_placeholder.py
deleted file mode 100644
index 295e0f45b..000000000
--- a/keystone/common/sql/migrate_repo/versions/100_placeholder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for Mitaka backports. Do not use this number for new
-# Newton work. New Newton work starts after all the placeholders.
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/keystone/common/sql/migrate_repo/versions/101_drop_role_name_constraint.py b/keystone/common/sql/migrate_repo/versions/101_drop_role_name_constraint.py
deleted file mode 100644
index ff90d441d..000000000
--- a/keystone/common/sql/migrate_repo/versions/101_drop_role_name_constraint.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-_ROLE_TABLE_NAME = 'role'
-_ROLE_NAME_COLUMN_NAME = 'name'
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- role_table = sql.Table(_ROLE_TABLE_NAME, meta, autoload=True)
-
- # NOTE(morganfainberg): the `role_name` unique constraint is not
- # guaranteed to be named 'ixu_role_name', so we need to search for the
- # correct constraint that only affects role_table.c.name and drop
- # that constraint.
- #
- # This is an idempotent change that reflects the fix to migration
- # 88 if the role_name unique constraint was not named consistently and
- # someone manually fixed the migrations / db without dropping the
- # old constraint.
- # This is a copy of migration 96 to catch any/all deployments that
- # are close to master. migration 96 will be backported to
- # stable/mitaka.
- to_drop = None
- if migrate_engine.name == 'mysql':
- for c in role_table.indexes:
- if (c.unique and len(c.columns) == 1 and
- _ROLE_NAME_COLUMN_NAME in c.columns):
- to_drop = c
- break
- else:
- for c in role_table.constraints:
- if len(c.columns) == 1 and _ROLE_NAME_COLUMN_NAME in c.columns:
- to_drop = c
- break
-
- if to_drop is not None:
- migrate.UniqueConstraint(role_table.c.name,
- name=to_drop.name).drop()
diff --git a/keystone/common/sql/migrate_repo/versions/102_drop_domain_table.py b/keystone/common/sql/migrate_repo/versions/102_drop_domain_table.py
deleted file mode 100644
index 85eb8e104..000000000
--- a/keystone/common/sql/migrate_repo/versions/102_drop_domain_table.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- domain_table = sql.Table('domain', meta, autoload=True)
- domain_table.drop()
diff --git a/keystone/common/sql/migrate_repo/versions/103_add_nonlocal_user_table.py b/keystone/common/sql/migrate_repo/versions/103_add_nonlocal_user_table.py
deleted file mode 100644
index 2b5e6c296..000000000
--- a/keystone/common/sql/migrate_repo/versions/103_add_nonlocal_user_table.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table('user', meta, autoload=True)
-
- nonlocal_user_table = sql.Table(
- 'nonlocal_user',
- meta,
- sql.Column('domain_id', sql.String(64), primary_key=True),
- sql.Column('name', sql.String(255), primary_key=True),
- sql.Column('user_id', sql.String(64),
- sql.ForeignKey(user_table.c.id, ondelete='CASCADE'),
- nullable=False),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- nonlocal_user_table.create(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/104_drop_user_name_domainid_constraint.py b/keystone/common/sql/migrate_repo/versions/104_drop_user_name_domainid_constraint.py
deleted file mode 100644
index a8740c594..000000000
--- a/keystone/common/sql/migrate_repo/versions/104_drop_user_name_domainid_constraint.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sql
-
-_USER_TABLE_NAME = 'user'
-_USER_NAME_COLUMN_NAME = 'name'
-_USER_DOMAINID_COLUMN_NAME = 'domain_id'
-_USER_PASSWORD_COLUMN_NAME = 'password' # nosec
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- user_table = sql.Table(_USER_TABLE_NAME, meta, autoload=True)
-
- # NOTE(gnuoy): the `domain_id` unique constraint is not guaranteed to
- # be a fixed name, such as 'ixu_user_name_domain_id`, so we need to
- # search for the correct constraint that only affects
- # user_table.c.domain_id and drop that constraint. (Fix based on
- # morganfainbergs fix in 088_domain_specific_roles.py)
- #
- # This is an idempotent change that reflects the fix to migration
- # 91 if the user name & domain_id unique constraint was not named
- # consistently and someone manually fixed the migrations / db
- # without dropping the old constraint.
- # This is a copy of migration 97 to catch any/all deployments that
- # are close to master. migration 97 will be backported to
- # stable/mitaka.
-
- to_drop = None
- if migrate_engine.name == 'mysql':
- for index in user_table.indexes:
- if (index.unique and len(index.columns) == 2 and
- _USER_DOMAINID_COLUMN_NAME in index.columns and
- _USER_NAME_COLUMN_NAME in index.columns):
- to_drop = index
- break
- else:
- for index in user_table.constraints:
- if (len(index.columns) == 2 and
- _USER_DOMAINID_COLUMN_NAME in index.columns and
- _USER_NAME_COLUMN_NAME in index.columns):
- to_drop = index
- break
-
- # remove domain_id and name unique constraint
- if to_drop is not None:
- migrate.UniqueConstraint(user_table.c.domain_id,
- user_table.c.name,
- name=to_drop.name).drop()
-
- # If migration 91 was aborted due to Bug #1572341 then columns may not
- # have been dropped.
- if _USER_DOMAINID_COLUMN_NAME in user_table.c:
- user_table.c.domain_id.drop()
- if _USER_NAME_COLUMN_NAME in user_table.c:
- user_table.c.name.drop()
- if _USER_PASSWORD_COLUMN_NAME in user_table.c:
- user_table.c.password.drop()
diff --git a/keystone/common/sql/migrate_repo/versions/105_add_password_date_columns.py b/keystone/common/sql/migrate_repo/versions/105_add_password_date_columns.py
deleted file mode 100644
index e12b82c28..000000000
--- a/keystone/common/sql/migrate_repo/versions/105_add_password_date_columns.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- created_at = sql.Column('created_at', sql.DateTime(), nullable=True)
- expires_at = sql.Column('expires_at', sql.DateTime(), nullable=True)
- password_table = sql.Table('password', meta, autoload=True)
- password_table.create_column(created_at)
- password_table.create_column(expires_at)
-
- now = datetime.datetime.utcnow()
- stmt = password_table.update().values(created_at=now)
- stmt.execute()
diff --git a/keystone/common/sql/migrate_repo/versions/106_allow_password_column_to_be_nullable.py b/keystone/common/sql/migrate_repo/versions/106_allow_password_column_to_be_nullable.py
deleted file mode 100644
index 24abe1539..000000000
--- a/keystone/common/sql/migrate_repo/versions/106_allow_password_column_to_be_nullable.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- password_table = sql.Table('password', meta, autoload=True)
- password_table.c.password.alter(nullable=True)
diff --git a/keystone/common/sql/migrate_repo/versions/107_add_user_date_columns.py b/keystone/common/sql/migrate_repo/versions/107_add_user_date_columns.py
deleted file mode 100644
index 64a1d1163..000000000
--- a/keystone/common/sql/migrate_repo/versions/107_add_user_date_columns.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- created_at = sql.Column('created_at', sql.DateTime(), nullable=True)
- last_active_at = sql.Column('last_active_at', sql.Date(), nullable=True)
- user_table = sql.Table('user', meta, autoload=True)
- user_table.create_column(created_at)
- user_table.create_column(last_active_at)
-
- now = datetime.datetime.utcnow()
- stmt = user_table.update().values(created_at=now)
- stmt.execute()
diff --git a/keystone/common/sql/migrate_repo/versions/108_add_failed_auth_columns.py b/keystone/common/sql/migrate_repo/versions/108_add_failed_auth_columns.py
deleted file mode 100644
index a20487c4d..000000000
--- a/keystone/common/sql/migrate_repo/versions/108_add_failed_auth_columns.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- failed_auth_count = sql.Column('failed_auth_count', sql.Integer,
- nullable=True)
- failed_auth_at = sql.Column('failed_auth_at', sql.DateTime(),
- nullable=True)
- local_user_table = sql.Table('local_user', meta, autoload=True)
- local_user_table.create_column(failed_auth_count)
- local_user_table.create_column(failed_auth_at)
diff --git a/keystone/common/sql/migrate_repo/versions/109_add_password_self_service_column.py b/keystone/common/sql/migrate_repo/versions/109_add_password_self_service_column.py
deleted file mode 100644
index 1c85ead25..000000000
--- a/keystone/common/sql/migrate_repo/versions/109_add_password_self_service_column.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sql
-
-
-def upgrade(migrate_engine):
- meta = sql.MetaData()
- meta.bind = migrate_engine
-
- self_service_column = sql.Column('self_service', sql.Boolean,
- nullable=False, server_default='0',
- default=False)
- password_table = sql.Table('password', meta, autoload=True)
- password_table.create_column(self_service_column)
diff --git a/keystone/common/sql/migrate_repo/versions/__init__.py b/keystone/common/sql/migrate_repo/versions/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/keystone/common/sql/migrate_repo/versions/__init__.py
+++ /dev/null
diff --git a/keystone/common/sql/migrations/README.rst b/keystone/common/sql/migrations/README.rst
new file mode 100644
index 000000000..33b7373b9
--- /dev/null
+++ b/keystone/common/sql/migrations/README.rst
@@ -0,0 +1,15 @@
+Migrations for the database
+===========================
+
+This directory contains migrations for the database. These are implemented
+using `alembic`__, a lightweight database migration tool designed for usage
+with `SQLAlchemy`__.
+
+The best place to start understanding Alembic is with its own `tutorial`__. You
+can also play around with the :command:`alembic` command::
+
+ $ alembic --help
+
+.. __: https://alembic.sqlalchemy.org/en/latest/
+.. __: https://www.sqlalchemy.org/
+.. __: https://alembic.sqlalchemy.org/en/latest/tutorial.html
diff --git a/keystone/common/sql/migrations/env.py b/keystone/common/sql/migrations/env.py
new file mode 100644
index 000000000..2d116f1bd
--- /dev/null
+++ b/keystone/common/sql/migrations/env.py
@@ -0,0 +1,80 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from logging.config import fileConfig
+
+from alembic import context
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Interpret the config file for Python logging unless we're told not to.
+# This line sets up loggers basically.
+if config.attributes.get('configure_logger', True):
+ fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+# from myapp import mymodel
+# target_metadata = mymodel.Base.metadata
+target_metadata = None
+
+
+def run_migrations_offline():
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL and not an Engine, though an
+ Engine is acceptable here as well. By skipping the Engine creation we
+ don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the script output.
+ """
+ url = config.get_main_option("sqlalchemy.url")
+ context.configure(
+ url=url,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online():
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine and associate a connection
+ with the context.
+ """
+ connectable = engine_from_config(
+ config.get_section(config.config_ini_section),
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ with connectable.connect() as connection:
+ context.configure(
+ connection=connection, target_metadata=target_metadata
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/keystone/common/sql/expand_repo/versions/035_expand_add_system_column_to_application_credential_table.py b/keystone/common/sql/migrations/script.py.mako
index 7f389508a..a9957ef6e 100644
--- a/keystone/common/sql/expand_repo/versions/035_expand_add_system_column_to_application_credential_table.py
+++ b/keystone/common/sql/migrations/script.py.mako
@@ -2,7 +2,7 @@
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -10,16 +10,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-import sqlalchemy as sql
+"""${message}
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+"""
-def upgrade(migrate_engine):
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
- meta = sql.MetaData()
- meta.bind = migrate_engine
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
- system = sql.Column('system', sql.String(64), nullable=True)
- application_credential_table = sql.Table(
- 'application_credential', meta, autoload=True
- )
- application_credential_table.create_column(system)
+
+def upgrade():
+ ${upgrades if upgrades else "pass"}
diff --git a/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py b/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
new file mode 100644
index 000000000..eec97c573
--- /dev/null
+++ b/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py
@@ -0,0 +1,1106 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Initial version
+
+Revision ID: 27e647c0fad4
+Revises:
+Create Date: 2021-12-23 11:13:26.305412
+"""
+
+import datetime
+import textwrap
+
+from alembic import op
+from oslo_log import log
+import sqlalchemy as sql
+
+from keystone.assignment.backends import sql as assignment_sql
+from keystone.common import sql as ks_sql
+import keystone.conf
+from keystone.identity.mapping_backends import mapping as mapping_backend
+
+# revision identifiers, used by Alembic.
+revision = '27e647c0fad4'
+down_revision = None
+depends_on = None
+
+CONF = keystone.conf.CONF
+LOG = log.getLogger(__name__)
+
+NULL_DOMAIN_ID = '<<keystone.domain.root>>'
+
+# FIXME(stephenfin): Remove this as soon as we're done reworking the
+# migrations. Until then, this is necessary to allow us to use the native
+# alembic tooling (which won't register opts). Alternatively, maybe
+# the server default *shouldn't* rely on a (changeable) config option value?
+try:
+ service_provider_relay_state_prefix_default = CONF.saml.relay_state_prefix
+except Exception:
+ service_provider_relay_state_prefix_default = 'ss:mem:'
+
+
+def upgrade():
+ bind = op.get_bind()
+
+ if bind.engine.name == 'mysql':
+ # Set default DB charset to UTF8.
+ op.execute(
+ 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8'
+ % bind.engine.url.database
+ )
+
+ op.create_table(
+ 'application_credential',
+ sql.Column(
+ 'internal_id', sql.Integer, primary_key=True, nullable=False
+ ),
+ sql.Column('id', sql.String(length=64), nullable=False),
+ sql.Column('name', sql.String(length=255), nullable=False),
+ sql.Column('secret_hash', sql.String(length=255), nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(64), nullable=True),
+ sql.Column('expires_at', ks_sql.DateTimeInt()),
+ sql.Column('system', sql.String(64), nullable=True),
+ sql.Column('unrestricted', sql.Boolean),
+ sql.UniqueConstraint(
+ 'user_id', 'name', name='duplicate_app_cred_constraint'
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'assignment',
+ sql.Column(
+ 'type',
+ sql.Enum(
+ assignment_sql.AssignmentType.USER_PROJECT,
+ assignment_sql.AssignmentType.GROUP_PROJECT,
+ assignment_sql.AssignmentType.USER_DOMAIN,
+ assignment_sql.AssignmentType.GROUP_DOMAIN,
+ name='type',
+ ),
+ nullable=False,
+ ),
+ sql.Column('actor_id', sql.String(64), nullable=False),
+ sql.Column('target_id', sql.String(64), nullable=False),
+ sql.Column('role_id', sql.String(64), nullable=False),
+ sql.Column('inherited', sql.Boolean, default=False, nullable=False),
+ sql.PrimaryKeyConstraint(
+ 'type',
+ 'actor_id',
+ 'target_id',
+ 'role_id',
+ 'inherited',
+ ),
+ sql.Index('ix_actor_id', 'actor_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'access_rule',
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column('service', sql.String(64)),
+ sql.Column('path', sql.String(128)),
+ sql.Column('method', sql.String(16)),
+ sql.Column('external_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.UniqueConstraint(
+ 'external_id',
+ name='access_rule_external_id_key',
+ ),
+ sql.UniqueConstraint(
+ 'user_id',
+ 'service',
+ 'path',
+ 'method',
+ name='duplicate_access_rule_for_user_constraint',
+ ),
+ sql.Index('user_id', 'user_id'),
+ sql.Index('external_id', 'external_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'config_register',
+ sql.Column('type', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'consumer',
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('description', sql.String(64), nullable=True),
+ sql.Column('secret', sql.String(64), nullable=False),
+ sql.Column('extra', sql.Text(), nullable=False),
+ )
+
+ op.create_table(
+ 'credential',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(length=64)),
+ sql.Column('type', sql.String(length=255), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('key_hash', sql.String(64), nullable=False),
+ sql.Column(
+ 'encrypted_blob',
+ ks_sql.Text,
+ nullable=False,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'group',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('domain_id', sql.String(length=64), nullable=False),
+ sql.Column('name', sql.String(length=64), nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.UniqueConstraint(
+ 'domain_id',
+ 'name',
+ name='ixu_group_name_domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'id_mapping',
+ sql.Column('public_id', sql.String(64), primary_key=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('local_id', sql.String(255), nullable=False),
+ sql.Column(
+ 'entity_type',
+ sql.Enum(
+ mapping_backend.EntityType.USER,
+ mapping_backend.EntityType.GROUP,
+ name='entity_type',
+ ),
+ nullable=False,
+ ),
+ sql.UniqueConstraint(
+ 'domain_id',
+ 'local_id',
+ 'entity_type',
+ name='domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'identity_provider',
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('authorization_ttl', sql.Integer, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'idp_remote_ids',
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
+ ),
+ sql.Column('remote_id', sql.String(255), primary_key=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'mapping',
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('rules', sql.Text(), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'policy',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('type', sql.String(length=255), nullable=False),
+ sql.Column('blob', ks_sql.JsonBlob, nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'policy_association',
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('policy_id', sql.String(64), nullable=False),
+ sql.Column('endpoint_id', sql.String(64), nullable=True),
+ sql.Column('service_id', sql.String(64), nullable=True),
+ sql.Column('region_id', sql.String(64), nullable=True),
+ sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'project',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=64), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('description', sql.Text),
+ sql.Column('enabled', sql.Boolean),
+ sql.Column(
+ 'domain_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ 'project.id',
+ name='project_domain_id_fkey',
+ ),
+ nullable=False,
+ ),
+ sql.Column(
+ 'parent_id',
+ sql.String(64),
+ sql.ForeignKey(
+ 'project.id',
+ name='project_parent_id_fkey',
+ ),
+ nullable=True,
+ ),
+ sql.Column(
+ 'is_domain',
+ sql.Boolean,
+ nullable=False,
+ server_default='0',
+ default=False,
+ ),
+ sql.UniqueConstraint(
+ 'domain_id',
+ 'name',
+ name='ixu_project_name_domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'project_endpoint',
+ sql.Column(
+ 'endpoint_id', sql.String(64), primary_key=True, nullable=False
+ ),
+ sql.Column(
+ 'project_id', sql.String(64), primary_key=True, nullable=False
+ ),
+ )
+
+ op.create_table(
+ 'project_option',
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ sql.ForeignKey('project.id', ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column(
+ 'option_id', sql.String(4), nullable=False, primary_key=True
+ ),
+ sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ # NOTE(lamt) To allow tag name to be case sensitive for MySQL, the 'name'
+ # column needs to use collation, which is incompatible with Postgresql.
+ # Using unicode to mirror nova's server tag:
+ # https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/models.py
+ op.create_table(
+ 'project_tag',
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ sql.ForeignKey('project.id', ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column('name', sql.Unicode(255), nullable=False, primary_key=True),
+ sql.UniqueConstraint('project_id', 'name'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'region',
+ sql.Column('id', sql.String(255), primary_key=True),
+ sql.Column('description', sql.String(255), nullable=False),
+ sql.Column('parent_region_id', sql.String(255), nullable=True),
+ sql.Column('extra', sql.Text()),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'registered_limit',
+ sql.Column('id', sql.String(length=64), nullable=False),
+ sql.Column('service_id', sql.String(255)),
+ sql.Column('region_id', sql.String(64), nullable=True),
+ sql.Column('resource_name', sql.String(255)),
+ sql.Column('default_limit', sql.Integer, nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('internal_id', sql.Integer, primary_key=True),
+ # NOTE(stephenfin): Name chosen to preserve backwards compatibility
+ # with names used for primary key unique constraints
+ sql.UniqueConstraint('id', name='registered_limit_id_key'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'request_token',
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('request_secret', sql.String(64), nullable=False),
+ sql.Column('verifier', sql.String(64), nullable=True),
+ sql.Column('authorizing_user_id', sql.String(64), nullable=True),
+ sql.Column('requested_project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=True),
+ sql.Column(
+ 'consumer_id',
+ sql.String(64),
+ sql.ForeignKey('consumer.id'),
+ nullable=False,
+ index=True,
+ ),
+ sql.Column('expires_at', sql.String(64), nullable=True),
+ )
+
+ op.create_table(
+ 'revocation_event',
+ sql.Column('id', sql.Integer, primary_key=True),
+ sql.Column('domain_id', sql.String(64)),
+ sql.Column('project_id', sql.String(64)),
+ sql.Column('user_id', sql.String(64)),
+ sql.Column('role_id', sql.String(64)),
+ sql.Column('trust_id', sql.String(64)),
+ sql.Column('consumer_id', sql.String(64)),
+ sql.Column('access_token_id', sql.String(64)),
+ sql.Column('issued_before', sql.DateTime(), nullable=False),
+ sql.Column('expires_at', sql.DateTime()),
+ sql.Column('revoked_at', sql.DateTime(), nullable=False),
+ sql.Column('audit_id', sql.String(32), nullable=True),
+ sql.Column('audit_chain_id', sql.String(32), nullable=True),
+ # NOTE(stephenfin): The '_new' suffix here is due to migration 095,
+ # which changed the 'id' column from String(64) to Integer. It did this
+ # by creating a 'revocation_event_new' table and populating it with
+ # data from the 'revocation_event' table before deleting the
+ # 'revocation_event' table and renaming the 'revocation_event_new'
+ # table to 'revocation_event'. Because the 'revoked_at' column had
+ # 'index=True', sqlalchemy automatically generated the index name as
+ # 'ix_{table}_{column}'. However, when intitially created, '{table}'
+ # was 'revocation_event_new' so the index got that name. We may wish to
+ # rename this eventually.
+ sql.Index('ix_revocation_event_new_revoked_at', 'revoked_at'),
+ sql.Index('ix_revocation_event_issued_before', 'issued_before'),
+ sql.Index(
+ 'ix_revocation_event_project_id_issued_before',
+ 'project_id',
+ 'issued_before',
+ ),
+ sql.Index(
+ 'ix_revocation_event_user_id_issued_before',
+ 'user_id',
+ 'issued_before',
+ ),
+ sql.Index(
+ 'ix_revocation_event_audit_id_issued_before',
+ 'audit_id',
+ 'issued_before',
+ ),
+ )
+
+ op.create_table(
+ 'role',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('name', sql.String(length=255), nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column(
+ 'domain_id',
+ sql.String(64),
+ nullable=False,
+ server_default='<<null>>',
+ ),
+ sql.Column('description', sql.String(255), nullable=True),
+ sql.UniqueConstraint(
+ 'name',
+ 'domain_id',
+ name='ixu_role_name_domain_id',
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'role_option',
+ sql.Column(
+ 'role_id',
+ sql.String(64),
+ sql.ForeignKey('role.id', ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column(
+ 'option_id', sql.String(4), nullable=False, primary_key=True
+ ),
+ sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'sensitive_config',
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'service',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('type', sql.String(length=255)),
+ sql.Column(
+ 'enabled',
+ sql.Boolean,
+ nullable=False,
+ default=True,
+ server_default='1',
+ ),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'service_provider',
+ sql.Column('auth_url', sql.String(256), nullable=False),
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('enabled', sql.Boolean, nullable=False),
+ sql.Column('description', sql.Text(), nullable=True),
+ sql.Column('sp_url', sql.String(256), nullable=False),
+ sql.Column(
+ 'relay_state_prefix',
+ sql.String(256),
+ nullable=False,
+ server_default=service_provider_relay_state_prefix_default,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'system_assignment',
+ sql.Column('type', sql.String(64), nullable=False),
+ sql.Column('actor_id', sql.String(64), nullable=False),
+ sql.Column('target_id', sql.String(64), nullable=False),
+ sql.Column('role_id', sql.String(64), nullable=False),
+ sql.Column('inherited', sql.Boolean, default=False, nullable=False),
+ sql.PrimaryKeyConstraint(
+ 'type', 'actor_id', 'target_id', 'role_id', 'inherited'
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'token',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('expires', sql.DateTime, default=None),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('valid', sql.Boolean, default=True, nullable=False),
+ sql.Column('trust_id', sql.String(length=64)),
+ sql.Column('user_id', sql.String(length=64)),
+ sql.Index('ix_token_expires', 'expires'),
+ sql.Index('ix_token_expires_valid', 'expires', 'valid'),
+ sql.Index('ix_token_user_id', 'user_id'),
+ sql.Index('ix_token_trust_id', 'trust_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'trust',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('trustor_user_id', sql.String(length=64), nullable=False),
+ sql.Column('trustee_user_id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(length=64)),
+ sql.Column('impersonation', sql.Boolean, nullable=False),
+ sql.Column('deleted_at', sql.DateTime),
+ sql.Column('expires_at', sql.DateTime),
+ sql.Column('remaining_uses', sql.Integer, nullable=True),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('expires_at_int', ks_sql.DateTimeInt()),
+ sql.UniqueConstraint(
+ 'trustor_user_id',
+ 'trustee_user_id',
+ 'project_id',
+ 'impersonation',
+ 'expires_at',
+ 'expires_at_int',
+ name='duplicate_trust_constraint_expanded',
+ ),
+ sql.Column(
+ 'redelegated_trust_id',
+ sql.String(64),
+ nullable=True,
+ ),
+ sql.Column(
+ 'redelegation_count',
+ sql.Integer,
+ nullable=True,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'trust_role',
+ sql.Column(
+ 'trust_id', sql.String(length=64), primary_key=True, nullable=False
+ ),
+ sql.Column(
+ 'role_id', sql.String(length=64), primary_key=True, nullable=False
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'user',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column('enabled', sql.Boolean),
+ sql.Column('default_project_id', sql.String(length=64)),
+ sql.Column('created_at', sql.DateTime(), nullable=True),
+ sql.Column('last_active_at', sql.Date(), nullable=True),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.UniqueConstraint('id', 'domain_id', name='ixu_user_id_domain_id'),
+ sql.Index('ix_default_project_id', 'default_project_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'user_group_membership',
+ sql.Column(
+ 'user_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ 'user.id',
+ name='fk_user_group_membership_user_id',
+ ),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'group_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ 'group.id',
+ name='fk_user_group_membership_group_id',
+ ),
+ primary_key=True,
+ ),
+ # NOTE(stevemar): The index was named 'group_id' in
+ # 050_fk_consistent_indexes.py and needs to be preserved
+ sql.Index('group_id', 'group_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'user_option',
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ sql.ForeignKey('user.id', ondelete='CASCADE'),
+ nullable=False,
+ primary_key=True,
+ ),
+ sql.Column(
+ 'option_id', sql.String(4), nullable=False, primary_key=True
+ ),
+ sql.Column('option_value', ks_sql.JsonBlob, nullable=True),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'whitelisted_config',
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('group', sql.String(255), primary_key=True),
+ sql.Column('option', sql.String(255), primary_key=True),
+ sql.Column('value', ks_sql.JsonBlob.impl, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'access_token',
+ sql.Column('id', sql.String(64), primary_key=True, nullable=False),
+ sql.Column('access_secret', sql.String(64), nullable=False),
+ sql.Column(
+ 'authorizing_user_id', sql.String(64), nullable=False, index=True
+ ),
+ sql.Column('project_id', sql.String(64), nullable=False),
+ sql.Column('role_ids', sql.Text(), nullable=False),
+ sql.Column(
+ 'consumer_id',
+ sql.String(64),
+ sql.ForeignKey('consumer.id'),
+ nullable=False,
+ index=True,
+ ),
+ sql.Column('expires_at', sql.String(64), nullable=True),
+ )
+
+ op.create_table(
+ 'application_credential_role',
+ sql.Column(
+ 'application_credential_id',
+ sql.Integer,
+ sql.ForeignKey(
+ 'application_credential.internal_id', ondelete='CASCADE'
+ ),
+ primary_key=True,
+ nullable=False,
+ ),
+ sql.Column(
+ 'role_id', sql.String(length=64), primary_key=True, nullable=False
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'application_credential_access_rule',
+ sql.Column(
+ 'application_credential_id',
+ sql.Integer,
+ sql.ForeignKey(
+ 'application_credential.internal_id', ondelete='CASCADE'
+ ),
+ primary_key=True,
+ nullable=False,
+ ),
+ sql.Column(
+ 'access_rule_id',
+ sql.Integer,
+ sql.ForeignKey('access_rule.id', ondelete='CASCADE'),
+ primary_key=True,
+ nullable=False,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'endpoint',
+ sql.Column('id', sql.String(length=64), primary_key=True),
+ sql.Column('legacy_endpoint_id', sql.String(length=64)),
+ sql.Column('interface', sql.String(length=8), nullable=False),
+ sql.Column(
+ 'service_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ 'service.id',
+ name='endpoint_service_id_fkey',
+ ),
+ nullable=False,
+ ),
+ sql.Column('url', sql.Text, nullable=False),
+ sql.Column('extra', ks_sql.JsonBlob.impl),
+ sql.Column(
+ 'enabled',
+ sql.Boolean,
+ nullable=False,
+ default=True,
+ server_default='1',
+ ),
+ sql.Column(
+ 'region_id',
+ sql.String(length=255),
+ sql.ForeignKey(
+ 'region.id',
+ name='fk_endpoint_region_id',
+ ),
+ nullable=True,
+ ),
+ # NOTE(stevemar): The index was named 'service_id' in
+ # 050_fk_consistent_indexes.py and needs to be preserved
+ sql.Index('service_id', 'service_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'endpoint_group',
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.Column('description', sql.Text, nullable=True),
+ sql.Column('filters', sql.Text(), nullable=False),
+ )
+
+ op.create_table(
+ 'expiring_user_group_membership',
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ sql.ForeignKey('user.id'),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'group_id',
+ sql.String(64),
+ sql.ForeignKey('group.id'),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
+ primary_key=True,
+ ),
+ sql.Column('last_verified', sql.DateTime(), nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'federation_protocol',
+ sql.Column('id', sql.String(64), primary_key=True),
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
+ primary_key=True,
+ ),
+ sql.Column('mapping_id', sql.String(64), nullable=False),
+ sql.Column('remote_id_attribute', sql.String(64)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'implied_role',
+ sql.Column(
+ 'prior_role_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ 'role.id',
+ name='implied_role_prior_role_id_fkey',
+ ondelete='CASCADE',
+ ),
+ primary_key=True,
+ ),
+ sql.Column(
+ 'implied_role_id',
+ sql.String(length=64),
+ sql.ForeignKey(
+ 'role.id',
+ name='implied_role_implied_role_id_fkey',
+ ondelete='CASCADE',
+ ),
+ primary_key=True,
+ ),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'limit',
+ sql.Column('id', sql.String(length=64), nullable=False),
+ sql.Column('project_id', sql.String(64), nullable=True),
+ sql.Column('resource_limit', sql.Integer, nullable=False),
+ sql.Column('description', sql.Text),
+ sql.Column('internal_id', sql.Integer, primary_key=True),
+ # FIXME(stephenfin): This should have a foreign key constraint on
+ # registered_limit.id, but sqlalchemy-migrate clearly didn't handle
+ # creating a column with embedded FK info as was attempted in 048
+ sql.Column(
+ 'registered_limit_id',
+ sql.String(64),
+ ),
+ sql.Column('domain_id', sql.String(64), nullable=True),
+ # NOTE(stephenfin): Name chosen to preserve backwards compatibility
+ # with names used for primary key unique constraints
+ sql.UniqueConstraint('id', name='limit_id_key'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'local_user',
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ nullable=False,
+ unique=True,
+ ),
+ sql.Column('domain_id', sql.String(64), nullable=False),
+ sql.Column('name', sql.String(255), nullable=False),
+ sql.Column('failed_auth_count', sql.Integer, nullable=True),
+ sql.Column('failed_auth_at', sql.DateTime(), nullable=True),
+ sql.ForeignKeyConstraint(
+ ['user_id', 'domain_id'],
+ ['user.id', 'user.domain_id'],
+ name='local_user_user_id_fkey',
+ onupdate='CASCADE',
+ ondelete='CASCADE',
+ ),
+ sql.UniqueConstraint('domain_id', 'name'),
+ )
+
+ op.create_table(
+ 'nonlocal_user',
+ sql.Column('domain_id', sql.String(64), primary_key=True),
+ sql.Column('name', sql.String(255), primary_key=True),
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ nullable=False,
+ ),
+ sql.ForeignKeyConstraint(
+ ['user_id', 'domain_id'],
+ ['user.id', 'user.domain_id'],
+ name='nonlocal_user_user_id_fkey',
+ onupdate='CASCADE',
+ ondelete='CASCADE',
+ ),
+ sql.UniqueConstraint('user_id', name='ixu_nonlocal_user_user_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ op.create_table(
+ 'password',
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column(
+ 'local_user_id',
+ sql.Integer,
+ sql.ForeignKey('local_user.id', ondelete='CASCADE'),
+ nullable=False,
+ ),
+ sql.Column('expires_at', sql.DateTime(), nullable=True),
+ sql.Column(
+ 'self_service',
+ sql.Boolean,
+ nullable=False,
+ server_default='0',
+ default=False,
+ ),
+ # NOTE(notmorgan): To support the full range of scrypt and pbkfd
+ # password hash lengths, this should be closer to varchar(1500) instead
+ # of varchar(255).
+ sql.Column('password_hash', sql.String(255), nullable=True),
+ sql.Column(
+ 'created_at_int',
+ ks_sql.DateTimeInt(),
+ nullable=False,
+ default=0,
+ server_default='0',
+ ),
+ sql.Column('expires_at_int', ks_sql.DateTimeInt(), nullable=True),
+ sql.Column(
+ 'created_at',
+ sql.DateTime(),
+ nullable=False,
+ default=datetime.datetime.utcnow,
+ ),
+ )
+
+ op.create_table(
+ 'project_endpoint_group',
+ sql.Column(
+ 'endpoint_group_id',
+ sql.String(64),
+ sql.ForeignKey('endpoint_group.id'),
+ nullable=False,
+ ),
+ sql.Column('project_id', sql.String(64), nullable=False),
+ sql.PrimaryKeyConstraint('endpoint_group_id', 'project_id'),
+ )
+
+ op.create_table(
+ 'federated_user',
+ sql.Column('id', sql.Integer, primary_key=True, nullable=False),
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ sql.ForeignKey('user.id', ondelete='CASCADE'),
+ nullable=False,
+ ),
+ sql.Column(
+ 'idp_id',
+ sql.String(64),
+ sql.ForeignKey('identity_provider.id', ondelete='CASCADE'),
+ nullable=False,
+ ),
+ sql.Column('protocol_id', sql.String(64), nullable=False),
+ sql.Column('unique_id', sql.String(255), nullable=False),
+ sql.Column('display_name', sql.String(255), nullable=True),
+ sql.ForeignKeyConstraint(
+ ['protocol_id', 'idp_id'],
+ ['federation_protocol.id', 'federation_protocol.idp_id'],
+ name='federated_user_protocol_id_fkey',
+ ondelete='CASCADE',
+ ),
+ sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ if bind.engine.name == 'sqlite':
+ # NOTE(stevemar): We need to keep this FK constraint due to 073, but
+ # only for sqlite, once we collapse 073 we can remove this constraint
+ with op.batch_alter_table('assignment') as batch_op:
+ batch_op.create_foreign_key(
+ 'fk_assignment_role_id',
+ 'role',
+ ['role_id'],
+ ['id'],
+ )
+
+ # TODO(stephenfin): Remove these procedures in a future contract migration
+
+ if bind.engine.name == 'postgresql':
+ error_message = (
+ 'Credential migration in progress. Cannot perform '
+ 'writes to credential table.'
+ )
+ credential_update_trigger = textwrap.dedent(f"""
+ CREATE OR REPLACE FUNCTION keystone_read_only_update()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ IF NEW.encrypted_blob IS NULL THEN
+ RAISE EXCEPTION '{error_message}';
+ END IF;
+ IF NEW.encrypted_blob IS NOT NULL AND OLD.blob IS NULL THEN
+ RAISE EXCEPTION '{error_message}';
+ END IF;
+ RETURN NEW;
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ op.execute(credential_update_trigger)
+
+ error_message = (
+ 'Identity provider migration in progress. Cannot '
+ 'insert new rows into the identity_provider table at '
+ 'this time.'
+ )
+ identity_provider_insert_trigger = textwrap.dedent(f"""
+ CREATE OR REPLACE FUNCTION keystone_read_only_insert()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ RAISE EXCEPTION '{error_message}';
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ op.execute(identity_provider_insert_trigger)
+
+ federated_user_insert_trigger = textwrap.dedent("""
+ CREATE OR REPLACE FUNCTION update_federated_user_domain_id()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ UPDATE "user" SET domain_id = (
+ SELECT domain_id FROM identity_provider WHERE id = NEW.idp_id)
+ WHERE id = NEW.user_id and domain_id IS NULL;
+ RETURN NULL;
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ op.execute(federated_user_insert_trigger)
+
+ local_user_insert_trigger = textwrap.dedent("""
+ CREATE OR REPLACE FUNCTION update_user_domain_id()
+ RETURNS trigger AS
+ $BODY$
+ BEGIN
+ UPDATE "user" SET domain_id = NEW.domain_id
+ WHERE id = NEW.user_id;
+ RETURN NULL;
+ END
+ $BODY$ LANGUAGE plpgsql;
+ """)
+ op.execute(local_user_insert_trigger)
+
+ # FIXME(stephenfin): Remove these indexes. They're left over from attempts
+ # to remove foreign key constraints in past migrations. Apparently
+ # sqlalchemy-migrate didn't do the job fully and left behind indexes
+ if bind.engine.name == 'mysql':
+ op.create_index('region_id', 'registered_limit', ['region_id'])
+
+ # FIXME(stephenfin): This should be dropped when we add the FK
+ # constraint to this column
+ op.create_index(
+ 'registered_limit_id',
+ 'limit',
+ ['registered_limit_id'],
+ )
+
+ # FIXME(stephenfin): These are leftover from when we removed a FK
+ # constraint and should probable be dropped
+ op.create_index('domain_id', 'identity_provider', ['domain_id'])
+ op.create_index('domain_id', 'user', ['domain_id'])
+
+ # data migration
+
+ def _generate_root_domain_project():
+ # Generate a project that will act as a root for all domains, in order
+ # for use to be able to use a FK constraint on domain_id. Projects
+ # acting as a domain will not reference this as their parent_id, just
+ # as domain_id.
+ #
+ # This special project is filtered out by the driver, so is never
+ # visible to the manager or API.
+
+ project_ref = {
+ 'id': NULL_DOMAIN_ID,
+ 'name': NULL_DOMAIN_ID,
+ 'enabled': False,
+ 'description': '',
+ 'domain_id': NULL_DOMAIN_ID,
+ 'is_domain': True,
+ 'parent_id': None,
+ 'extra': '{}',
+ }
+ return project_ref
+
+ bind = op.get_bind()
+ meta = sql.MetaData()
+ project = sql.Table('project', meta, autoload_with=bind)
+
+ root_domain_project = _generate_root_domain_project()
+ op.execute(project.insert().values(**root_domain_project))
diff --git a/keystone/common/sql/migrations/versions/CONTRACT_HEAD b/keystone/common/sql/migrations/versions/CONTRACT_HEAD
new file mode 100644
index 000000000..8dc296b9c
--- /dev/null
+++ b/keystone/common/sql/migrations/versions/CONTRACT_HEAD
@@ -0,0 +1 @@
+e25ffa003242
diff --git a/keystone/common/sql/migrations/versions/EXPAND_HEAD b/keystone/common/sql/migrations/versions/EXPAND_HEAD
new file mode 100644
index 000000000..b2bd55d17
--- /dev/null
+++ b/keystone/common/sql/migrations/versions/EXPAND_HEAD
@@ -0,0 +1 @@
+29e87d24a316
diff --git a/keystone/common/sql/expand_repo/versions/001_expand_initial_null_migration.py b/keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py
index 1cd34e617..bb36d3b2b 100644
--- a/keystone/common/sql/expand_repo/versions/001_expand_initial_null_migration.py
+++ b/keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py
@@ -2,7 +2,7 @@
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -10,9 +10,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-# A null initial migration to open this repo. Do not re-use replace this with
-# a real migration, add additional ones in subsequent version scripts.
+"""Initial no-op Yoga contract migration.
+Revision ID: e25ffa003242
+Revises: 27e647c0fad4
+Create Date: 2022-01-21 00:00:00.000000
+"""
-def upgrade(migrate_engine):
+# revision identifiers, used by Alembic.
+revision = 'e25ffa003242'
+down_revision = '27e647c0fad4'
+branch_labels = ('contract',)
+
+
+def upgrade():
pass
diff --git a/keystone/common/sql/data_migration_repo/versions/001_data_initial_null_migration.py b/keystone/common/sql/migrations/versions/yoga/expand/29e87d24a316_initial.py
index 1cd34e617..8fd4c5a84 100644
--- a/keystone/common/sql/data_migration_repo/versions/001_data_initial_null_migration.py
+++ b/keystone/common/sql/migrations/versions/yoga/expand/29e87d24a316_initial.py
@@ -2,7 +2,7 @@
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
@@ -10,9 +10,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-# A null initial migration to open this repo. Do not re-use replace this with
-# a real migration, add additional ones in subsequent version scripts.
+"""Initial no-op Yoga expand migration.
+Revision ID: 29e87d24a316
+Revises: 27e647c0fad4
+Create Date: 2022-01-21 00:00:00.000000
+"""
-def upgrade(migrate_engine):
+# revision identifiers, used by Alembic.
+revision = '29e87d24a316'
+down_revision = '27e647c0fad4'
+branch_labels = ('expand',)
+
+
+def upgrade():
pass
diff --git a/keystone/common/sql/upgrades.py b/keystone/common/sql/upgrades.py
index 8bfe453cf..f463771f2 100644
--- a/keystone/common/sql/upgrades.py
+++ b/keystone/common/sql/upgrades.py
@@ -16,226 +16,154 @@
import os
-import migrate
-from migrate import exceptions
-from migrate.versioning import api as versioning_api
+from migrate import exceptions as migrate_exceptions
+from migrate.versioning import api as migrate_api
+from migrate.versioning import repository as migrate_repository
from oslo_db import exception as db_exception
-from oslo_db.sqlalchemy import migration
-import sqlalchemy
+import sqlalchemy as sa
from keystone.common import sql
from keystone import exception
from keystone.i18n import _
+INITIAL_VERSION = 72
+LATEST_VERSION = 79
+EXPAND_BRANCH = 'expand'
+DATA_MIGRATION_BRANCH = 'data_migration'
+CONTRACT_BRANCH = 'contract'
-USE_TRIGGERS = True
-
-LEGACY_REPO = 'migrate_repo'
-EXPAND_REPO = 'expand_repo'
-DATA_MIGRATION_REPO = 'data_migration_repo'
-CONTRACT_REPO = 'contract_repo'
-
-
-class Repository(object):
- def __init__(self, engine, repo_name):
- self.repo_name = repo_name
-
- self.repo_path = find_repo(self.repo_name)
- self.min_version = (
- get_init_version(abs_path=self.repo_path))
- self.schema_ = versioning_api.ControlledSchema.create(
- engine, self.repo_path, self.min_version)
- self.max_version = self.schema_.repository.version().version
-
- def upgrade(self, version=None, current_schema=None):
- version = version or self.max_version
- err = ''
- upgrade = True
- version = versioning_api._migrate_version(
- self.schema_, version, upgrade, err)
- validate_upgrade_order(self.repo_name, target_repo_version=version)
- if not current_schema:
- current_schema = self.schema_
- changeset = current_schema.changeset(version)
- for ver, change in changeset:
- self.schema_.runchange(ver, change, changeset.step)
-
- if self.schema_.version != version:
- raise Exception(
- 'Actual version (%s) of %s does not equal expected '
- 'version (%s)' % (
- self.schema_.version, self.repo_name, version))
-
- @property
- def version(self):
- with sql.session_for_read() as session:
- return migration.db_version(
- session.get_bind(), self.repo_path, self.min_version)
-
-
-# Different RDBMSs use different schemes for naming the Foreign Key
-# Constraints. SQLAlchemy does not yet attempt to determine the name
-# for the constraint, and instead attempts to deduce it from the column.
-# This fails on MySQL.
-def get_constraints_names(table, column_name):
- fkeys = [fk.name for fk in table.constraints
- if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
- column_name in fk.columns)]
- return fkeys
-
-
-# remove_constraints and add_constraints both accept a list of dictionaries
-# that contain:
-# {'table': a sqlalchemy table. The constraint is added to dropped from
-# this table.
-# 'fk_column': the name of a column on the above table, The constraint
-# is added to or dropped from this column
-# 'ref_column':a sqlalchemy column object. This is the reference column
-# for the constraint.
-def remove_constraints(constraints):
- for constraint_def in constraints:
- constraint_names = get_constraints_names(constraint_def['table'],
- constraint_def['fk_column'])
- for constraint_name in constraint_names:
- migrate.ForeignKeyConstraint(
- columns=[getattr(constraint_def['table'].c,
- constraint_def['fk_column'])],
- refcolumns=[constraint_def['ref_column']],
- name=constraint_name).drop()
-
-
-def add_constraints(constraints):
- for constraint_def in constraints:
-
- if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
- # Don't try to create constraint when using MyISAM because it's
- # not supported.
- continue
-
- ref_col = constraint_def['ref_column']
- ref_engine = ref_col.table.kwargs.get('mysql_engine')
- if ref_engine == 'MyISAM':
- # Don't try to create constraint when using MyISAM because it's
- # not supported.
- continue
-
- migrate.ForeignKeyConstraint(
- columns=[getattr(constraint_def['table'].c,
- constraint_def['fk_column'])],
- refcolumns=[constraint_def['ref_column']]).create()
-
-
-def find_repo(repo_name):
- """Return the absolute path to the named repository."""
- path = os.path.abspath(os.path.join(
- os.path.dirname(sql.__file__), repo_name))
-
- if not os.path.isdir(path):
- raise exception.MigrationNotProvided(sql.__name__, path)
-
- return path
-
-
-def _sync_common_repo(version):
- abs_path = find_repo(LEGACY_REPO)
- init_version = get_init_version()
- with sql.session_for_write() as session:
- engine = session.get_bind()
- _assert_not_schema_downgrade(version=version)
- migration.db_sync(engine, abs_path, version=version,
- init_version=init_version, sanity_check=False)
+def _get_migrate_repo_path(branch):
+ abs_path = os.path.abspath(
+ os.path.join(
+ os.path.dirname(sql.__file__),
+ 'legacy_migrations',
+ f'{branch}_repo',
+ )
+ )
-def _sync_repo(repo_name):
- abs_path = find_repo(repo_name)
- with sql.session_for_write() as session:
- engine = session.get_bind()
- # Register the repo with the version control API
- # If it already knows about the repo, it will throw
- # an exception that we can safely ignore
- try:
- migration.db_version_control(engine, abs_path)
- except (migration.exception.DBMigrationError,
- exceptions.DatabaseAlreadyControlledError): # nosec
- pass
- init_version = get_init_version(abs_path=abs_path)
- migration.db_sync(engine, abs_path,
- init_version=init_version, sanity_check=False)
+ if not os.path.isdir(abs_path):
+ raise exception.MigrationNotProvided(sql.__name__, abs_path)
+ return abs_path
-def get_init_version(abs_path=None):
- """Get the initial version of a migrate repository.
- :param abs_path: Absolute path to migrate repository.
- :return: initial version number or None, if DB is empty.
+def _find_migrate_repo(abs_path):
+ """Get the project's change script repository
+
+ :param abs_path: Absolute path to migrate repository
"""
- if abs_path is None:
- abs_path = find_repo(LEGACY_REPO)
+ if not os.path.exists(abs_path):
+ raise db_exception.DBMigrationError("Path %s not found" % abs_path)
+ return migrate_repository.Repository(abs_path)
- repo = migrate.versioning.repository.Repository(abs_path)
- # Sadly, Repository has a `latest` but not an `oldest`.
- # The value is a VerNum object which needs to be converted into an int.
- oldest = int(min(repo.versions.versions))
+def _migrate_db_version_control(engine, abs_path, version=None):
+ """Mark a database as under this repository's version control.
- if oldest < 1:
- return None
+ Once a database is under version control, schema changes should
+ only be done via change scripts in this repository.
- # The initial version is one less
- return oldest - 1
+ :param engine: SQLAlchemy engine instance for a given database
+ :param abs_path: Absolute path to migrate repository
+ :param version: Initial database version
+ """
+ repository = _find_migrate_repo(abs_path)
+ try:
+ migrate_api.version_control(engine, repository, version)
+ except migrate_exceptions.InvalidVersionError as ex:
+ raise db_exception.DBMigrationError("Invalid version : %s" % ex)
+ except migrate_exceptions.DatabaseAlreadyControlledError:
+ raise db_exception.DBMigrationError("Database is already controlled.")
-def _assert_not_schema_downgrade(version=None):
- if version is not None:
- try:
- current_ver = int(str(get_db_version()))
- if int(version) < current_ver:
- raise migration.exception.DBMigrationError(
- _("Unable to downgrade schema"))
- except exceptions.DatabaseNotControlledError: # nosec
- # NOTE(morganfainberg): The database is not controlled, this action
- # cannot be a downgrade.
- pass
+ return version
-def offline_sync_database_to_version(version=None):
- """Perform and off-line sync of the database.
-
- Migrate the database up to the latest version, doing the equivalent of
- the cycle of --expand, --migrate and --contract, for when an offline
- upgrade is being performed.
-
- If a version is specified then only migrate the database up to that
- version. Downgrading is not supported. If version is specified, then only
- the main database migration is carried out - and the expand, migration and
- contract phases will NOT be run.
+def _migrate_db_version(engine, abs_path, init_version):
+ """Show the current version of the repository.
+ :param engine: SQLAlchemy engine instance for a given database
+ :param abs_path: Absolute path to migrate repository
+ :param init_version: Initial database version
+ """
+ repository = _find_migrate_repo(abs_path)
+ try:
+ return migrate_api.db_version(engine, repository)
+ except migrate_exceptions.DatabaseNotControlledError:
+ pass
+
+ meta = sa.MetaData()
+ meta.reflect(bind=engine)
+ tables = meta.tables
+ if (
+ len(tables) == 0 or
+ 'alembic_version' in tables or
+ 'migrate_version' in tables
+ ):
+ _migrate_db_version_control(engine, abs_path, version=init_version)
+ return migrate_api.db_version(engine, repository)
+
+ msg = _(
+ "The database is not under version control, but has tables. "
+ "Please stamp the current version of the schema manually."
+ )
+ raise db_exception.DBMigrationError(msg)
+
+
+def _migrate_db_sync(engine, abs_path, version=None, init_version=0):
+ """Upgrade or downgrade a database.
+
+ Function runs the upgrade() or downgrade() functions in change scripts.
+
+ :param engine: SQLAlchemy engine instance for a given database
+ :param abs_path: Absolute path to migrate repository.
+ :param version: Database will upgrade/downgrade until this version.
+ If None - database will update to the latest available version.
+ :param init_version: Initial database version
"""
- global USE_TRIGGERS
- # This flags let's us bypass trigger setup & teardown for non-rolling
- # upgrades. We set this as a global variable immediately before handing off
- # to sqlalchemy-migrate, because we can't pass arguments directly to
- # migrations that depend on it. We could also register this as a CONF
- # option, but the idea here is that we aren't exposing a new API.
- USE_TRIGGERS = False
+ if version is not None:
+ try:
+ version = int(version)
+ except ValueError:
+ msg = _("version should be an integer")
+ raise db_exception.DBMigrationError(msg)
+
+ current_version = _migrate_db_version(engine, abs_path, init_version)
+ repository = _find_migrate_repo(abs_path)
- if version:
- _sync_common_repo(version)
+ if version is None or version > current_version:
+ try:
+ return migrate_api.upgrade(engine, repository, version)
+ except Exception as ex:
+ raise db_exception.DBMigrationError(ex)
else:
- expand_schema()
- migrate_data()
- contract_schema()
+ return migrate_api.downgrade(engine, repository, version)
-def get_db_version(repo=LEGACY_REPO):
+def get_db_version(branch=EXPAND_BRANCH):
+ abs_path = _get_migrate_repo_path(branch)
with sql.session_for_read() as session:
- repo = find_repo(repo)
- return migration.db_version(
- session.get_bind(), repo, get_init_version(repo))
+ return _migrate_db_version(
+ session.get_bind(),
+ abs_path,
+ INITIAL_VERSION,
+ )
+
+
+def _db_sync(branch):
+ abs_path = _get_migrate_repo_path(branch)
+ with sql.session_for_write() as session:
+ engine = session.get_bind()
+ _migrate_db_sync(
+ engine=engine,
+ abs_path=abs_path,
+ init_version=INITIAL_VERSION,
+ )
-def validate_upgrade_order(repo_name, target_repo_version=None):
+def _validate_upgrade_order(branch, target_repo_version=None):
"""Validate the state of the migration repositories.
This is run before allowing the db_sync command to execute. Ensure the
@@ -243,7 +171,7 @@ def validate_upgrade_order(repo_name, target_repo_version=None):
the upgrade process. I.e. expand's version is greater or equal to
migrate's, migrate's version is greater or equal to contract's.
- :param repo_name: The name of the repository that the user is trying to
+ :param branch: The name of the repository that the user is trying to
upgrade.
:param target_repo_version: The version to upgrade the repo. Otherwise, the
version will be upgraded to the latest version
@@ -251,33 +179,23 @@ def validate_upgrade_order(repo_name, target_repo_version=None):
"""
# Initialize a dict to have each key assigned a repo with their value being
# the repo that comes before.
- db_sync_order = {DATA_MIGRATION_REPO: EXPAND_REPO,
- CONTRACT_REPO: DATA_MIGRATION_REPO}
+ db_sync_order = {
+ DATA_MIGRATION_BRANCH: EXPAND_BRANCH,
+ CONTRACT_BRANCH: DATA_MIGRATION_BRANCH,
+ }
- if repo_name == LEGACY_REPO:
- return
- # If expand is being run, we validate that Legacy repo is at the maximum
- # version before running the additional schema expansions.
- elif repo_name == EXPAND_REPO:
- abs_path = find_repo(LEGACY_REPO)
- repo = migrate.versioning.repository.Repository(abs_path)
- if int(repo.latest) != get_db_version():
- raise db_exception.DBMigrationError(
- 'Your Legacy repo version is not up to date. Please refer to '
- 'https://docs.openstack.org/keystone/latest/admin/'
- 'identity-upgrading.html '
- 'to see the proper steps for rolling upgrades.')
+ if branch == EXPAND_BRANCH:
return
# find the latest version that the current command will upgrade to if there
# wasn't a version specified for upgrade.
if not target_repo_version:
- abs_path = find_repo(repo_name)
- repo = migrate.versioning.repository.Repository(abs_path)
+ abs_path = _get_migrate_repo_path(branch)
+ repo = _find_migrate_repo(abs_path)
target_repo_version = int(repo.latest)
# get current version of the command that runs before the current command.
- dependency_repo_version = get_db_version(repo=db_sync_order[repo_name])
+ dependency_repo_version = get_db_version(branch=db_sync_order[branch])
if dependency_repo_version < target_repo_version:
raise db_exception.DBMigrationError(
@@ -285,7 +203,7 @@ def validate_upgrade_order(repo_name, target_repo_version=None):
'https://docs.openstack.org/keystone/latest/admin/'
'identity-upgrading.html '
'to see the proper steps for rolling upgrades.' % (
- repo_name, db_sync_order[repo_name]))
+ branch, db_sync_order[branch]))
def expand_schema():
@@ -293,13 +211,9 @@ def expand_schema():
This is run manually by the keystone-manage command before the first
keystone node is migrated to the latest release.
-
"""
- # Make sure all the legacy migrations are run before we run any new
- # expand migrations.
- _sync_common_repo(version=None)
- validate_upgrade_order(EXPAND_REPO)
- _sync_repo(repo_name=EXPAND_REPO)
+ _validate_upgrade_order(EXPAND_BRANCH)
+ _db_sync(branch=EXPAND_BRANCH)
def migrate_data():
@@ -307,10 +221,9 @@ def migrate_data():
This is run manually by the keystone-manage command once the keystone
schema has been expanded for the new release.
-
"""
- validate_upgrade_order(DATA_MIGRATION_REPO)
- _sync_repo(repo_name=DATA_MIGRATION_REPO)
+ _validate_upgrade_order(DATA_MIGRATION_BRANCH)
+ _db_sync(branch=DATA_MIGRATION_BRANCH)
def contract_schema():
@@ -319,7 +232,26 @@ def contract_schema():
This is run manually by the keystone-manage command once the keystone
nodes have been upgraded to the latest release and will remove any old
tables/columns that are no longer required.
+ """
+ _validate_upgrade_order(CONTRACT_BRANCH)
+ _db_sync(branch=CONTRACT_BRANCH)
+
+def offline_sync_database_to_version(version=None):
+ """Perform and off-line sync of the database.
+
+ Migrate the database up to the latest version, doing the equivalent of
+ the cycle of --expand, --migrate and --contract, for when an offline
+ upgrade is being performed.
+
+ If a version is specified then only migrate the database up to that
+ version. Downgrading is not supported. If version is specified, then only
+ the main database migration is carried out - and the expand, migration and
+ contract phases will NOT be run.
"""
- validate_upgrade_order(CONTRACT_REPO)
- _sync_repo(repo_name=CONTRACT_REPO)
+ if version:
+ raise Exception('Specifying a version is no longer supported')
+
+ expand_schema()
+ migrate_data()
+ contract_schema()
diff --git a/keystone/common/utils.py b/keystone/common/utils.py
index 13140853a..70d277e52 100644
--- a/keystone/common/utils.py
+++ b/keystone/common/utils.py
@@ -16,7 +16,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import collections
+import collections.abc
+import contextlib
import grp
import hashlib
import itertools
@@ -81,7 +82,7 @@ def flatten_dict(d, parent_key=''):
items = []
for k, v in d.items():
new_key = parent_key + '.' + k if parent_key else k
- if isinstance(v, collections.MutableMapping):
+ if isinstance(v, collections.abc.MutableMapping):
items.extend(list(flatten_dict(v, new_key).items()))
else:
items.append((new_key, v))
@@ -489,3 +490,9 @@ def create_directory(directory, keystone_user_id=None, keystone_group_id=None):
'Unable to change the ownership of key repository without '
'a keystone user ID and keystone group ID both being '
'provided: %s', directory)
+
+
+@contextlib.contextmanager
+def nested_contexts(*contexts):
+ with contextlib.ExitStack() as stack:
+ yield [stack.enter_context(c) for c in contexts]
diff --git a/keystone/conf/ldap.py b/keystone/conf/ldap.py
index 5943ff434..e9b89f9f6 100644
--- a/keystone/conf/ldap.py
+++ b/keystone/conf/ldap.py
@@ -411,11 +411,11 @@ use_pool` is also enabled.
pool_retry_max = cfg.IntOpt(
'pool_retry_max',
default=3,
- min=0,
+ min=1,
help=utils.fmt("""
-The maximum number of times to attempt reconnecting to the LDAP server before
-aborting. A value of zero prevents retries. This option has no effect unless
-`[ldap] use_pool` is also enabled.
+The maximum number of times to attempt connecting to the LDAP server before
+aborting. A value of one makes only one connection attempt.
+This option has no effect unless `[ldap] use_pool` is also enabled.
"""))
pool_retry_delay = cfg.FloatOpt(
diff --git a/keystone/conf/memcache.py b/keystone/conf/memcache.py
index 97dc2c9e1..b4b8c8b06 100644
--- a/keystone/conf/memcache.py
+++ b/keystone/conf/memcache.py
@@ -19,6 +19,12 @@ from keystone.conf import utils
dead_retry = cfg.IntOpt(
'dead_retry',
default=5 * 60,
+ deprecated_for_removal=True,
+ deprecated_reason='This option has no effect. '
+ 'Configure ``keystone.conf [cache] '
+ 'memcache_dead_retry`` option to set the '
+ 'dead_retry of memcached instead. ',
+ deprecated_since='Y',
help=utils.fmt("""
Number of seconds memcached server is considered dead before it is tried again.
This is used by the key value store system.
@@ -28,7 +34,7 @@ socket_timeout = cfg.IntOpt(
'socket_timeout',
default=3,
deprecated_for_removal=True,
- deprecated_reason='This option is duplicated with oslo.cache. '
+ deprecated_reason='This option has no effect. '
'Configure ``keystone.conf [cache] '
'memcache_socket_timeout`` option to set the '
'socket_timeout of memcached instead. ',
@@ -41,6 +47,12 @@ store system.
pool_maxsize = cfg.IntOpt(
'pool_maxsize',
default=10,
+ deprecated_for_removal=True,
+ deprecated_reason='This option has no effect. '
+ 'Configure ``keystone.conf [cache] '
+ 'memcache_pool_maxsize`` option to set the '
+ 'pool_maxsize of memcached instead. ',
+ deprecated_since='Y',
help=utils.fmt("""
Max total number of open connections to every memcached server. This is used by
the key value store system.
@@ -49,6 +61,12 @@ the key value store system.
pool_unused_timeout = cfg.IntOpt(
'pool_unused_timeout',
default=60,
+ deprecated_for_removal=True,
+ deprecated_reason='This option has no effect. '
+ 'Configure ``keystone.conf [cache] '
+ 'memcache_pool_unused_timeout`` option to set the '
+ 'pool_unused_timeout of memcached instead. ',
+ deprecated_since='Y',
help=utils.fmt("""
Number of seconds a connection to memcached is held unused in the pool before
it is closed. This is used by the key value store system.
@@ -57,6 +75,12 @@ it is closed. This is used by the key value store system.
pool_connection_get_timeout = cfg.IntOpt(
'pool_connection_get_timeout',
default=10,
+ deprecated_for_removal=True,
+ deprecated_reason='This option has no effect. '
+ 'Configure ``keystone.conf [cache] '
+ 'memcache_pool_connection_get_timeout`` option to set '
+ 'the connection_get_timeout of memcached instead. ',
+ deprecated_since='Y',
help=utils.fmt("""
Number of seconds that an operation will wait to get a memcache client
connection. This is used by the key value store system.
diff --git a/keystone/credential/providers/fernet/core.py b/keystone/credential/providers/fernet/core.py
index 00e197a37..99de106fb 100644
--- a/keystone/credential/providers/fernet/core.py
+++ b/keystone/credential/providers/fernet/core.py
@@ -96,7 +96,7 @@ class Provider(core.Provider):
:returns: a decrypted credential
"""
key_utils = fernet_utils.FernetUtils(
- CONF.credential.key_repository, MAX_ACTIVE_KEYS)
+ CONF.credential.key_repository, MAX_ACTIVE_KEYS, 'credential')
keys = key_utils.load_keys(use_null_key=True)
fernet_keys = [fernet.Fernet(key) for key in keys]
crypto = fernet.MultiFernet(fernet_keys)
diff --git a/keystone/federation/idp.py b/keystone/federation/idp.py
index fd464f5c2..2f1a4fe5a 100644
--- a/keystone/federation/idp.py
+++ b/keystone/federation/idp.py
@@ -366,7 +366,11 @@ class SAMLGenerator(object):
"""
canonicalization_method = xmldsig.CanonicalizationMethod()
- canonicalization_method.algorithm = xmldsig.ALG_EXC_C14N
+ # TODO(stephenfin): Drop when we remove support for pysaml < 7.1.0
+ if hasattr(xmldsig, 'TRANSFORM_C14N'): # >= 7.1.0
+ canonicalization_method.algorithm = xmldsig.TRANSFORM_C14N
+ else: # < 7.1.0
+ canonicalization_method.algorithm = xmldsig.ALG_EXC_C14N
signature_method = xmldsig.SignatureMethod(
algorithm=xmldsig.SIG_RSA_SHA1)
@@ -374,7 +378,11 @@ class SAMLGenerator(object):
envelope_transform = xmldsig.Transform(
algorithm=xmldsig.TRANSFORM_ENVELOPED)
- c14_transform = xmldsig.Transform(algorithm=xmldsig.ALG_EXC_C14N)
+ # TODO(stephenfin): Drop when we remove support for pysaml < 7.1.0
+ if hasattr(xmldsig, 'TRANSFORM_C14N'): # >= 7.1.0
+ c14_transform = xmldsig.Transform(algorithm=xmldsig.TRANSFORM_C14N)
+ else: # < 7.1.0
+ c14_transform = xmldsig.Transform(algorithm=xmldsig.ALG_EXC_C14N)
transforms.transform = [envelope_transform, c14_transform]
digest_method = xmldsig.DigestMethod(algorithm=xmldsig.DIGEST_SHA1)
diff --git a/keystone/identity/backends/ldap/common.py b/keystone/identity/backends/ldap/common.py
index 4af42de29..1033a4efd 100644
--- a/keystone/identity/backends/ldap/common.py
+++ b/keystone/identity/backends/ldap/common.py
@@ -1401,9 +1401,24 @@ class BaseLdap(object):
pass
else:
try:
- obj[k] = v[0]
+ value = v[0]
except IndexError:
- obj[k] = None
+ value = None
+
+ # NOTE(xek): Some LDAP servers return bytes data type
+ # We convert it to string here, so that it is consistent with
+ # the other (SQL) backends.
+ # Bytes data type caused issues in the past, because it could
+ # be cached and then passed into str() method to be used as
+ # LDAP filters, which results in an unexpected b'...' prefix.
+ if isinstance(value, bytes):
+ try:
+ value = value.decode('utf-8')
+ except UnicodeDecodeError:
+ LOG.error("Error decoding value %r (object id %r).",
+ value, res[0])
+ raise
+ obj[k] = value
return obj
diff --git a/keystone/identity/mapping_backends/sql.py b/keystone/identity/mapping_backends/sql.py
index 676d14492..6fadd6a0b 100644
--- a/keystone/identity/mapping_backends/sql.py
+++ b/keystone/identity/mapping_backends/sql.py
@@ -21,7 +21,7 @@ class IDMapping(sql.ModelBase, sql.ModelDictMixin):
__tablename__ = 'id_mapping'
public_id = sql.Column(sql.String(64), primary_key=True)
domain_id = sql.Column(sql.String(64), nullable=False)
- local_id = sql.Column(sql.String(64), nullable=False)
+ local_id = sql.Column(sql.String(255), nullable=False)
# NOTE(henry-nash): Postgres requires a name to be defined for an Enum
entity_type = sql.Column(
sql.Enum(identity_mapping.EntityType.USER,
diff --git a/keystone/identity/shadow_backends/sql.py b/keystone/identity/shadow_backends/sql.py
index 1d817c038..3e04b332d 100644
--- a/keystone/identity/shadow_backends/sql.py
+++ b/keystone/identity/shadow_backends/sql.py
@@ -98,7 +98,8 @@ class ShadowUsers(base.ShadowUsersDriverBase):
x for x in hints.filters if x['name'] not in ('idp_id',
'protocol_id',
'unique_id')]
- query = query.filter(sqlalchemy.and_(*statements))
+ if statements:
+ query = query.filter(sqlalchemy.and_(*statements))
return query
def get_federated_users(self, hints):
diff --git a/keystone/locale/de/LC_MESSAGES/keystone.po b/keystone/locale/de/LC_MESSAGES/keystone.po
index 8c3b16303..a126f83dc 100644
--- a/keystone/locale/de/LC_MESSAGES/keystone.po
+++ b/keystone/locale/de/LC_MESSAGES/keystone.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 10:31+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -806,9 +806,6 @@ msgstr ""
"Region %(region_id)s kann nicht gelöscht werden, da sie oder ihr "
"untergeordnete Regionen über zugeordnete Endpunkte verfügen. "
-msgid "Unable to downgrade schema"
-msgstr "Das Schema konnte nicht herabgestuft werden."
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Domänenkonfigurationsverzeichnis wurde nicht gefunden: %s"
diff --git a/keystone/locale/en_GB/LC_MESSAGES/keystone.po b/keystone/locale/en_GB/LC_MESSAGES/keystone.po
index 191ed5596..aa775b0d7 100644
--- a/keystone/locale/en_GB/LC_MESSAGES/keystone.po
+++ b/keystone/locale/en_GB/LC_MESSAGES/keystone.po
@@ -8,15 +8,16 @@
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2019. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2021-01-08 19:57+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-10-28 02:12+0000\n"
+"PO-Revision-Date: 2022-05-25 08:57+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language: en_GB\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
@@ -134,15 +135,6 @@ msgstr "Access token not found"
msgid "Additional authentications steps required."
msgstr "Additional authentications steps required."
-msgid ""
-"All extensions have been moved into keystone core and as such its migrations "
-"are maintained by the main keystone database control. Use the command: "
-"keystone-manage db_sync"
-msgstr ""
-"All extensions have been moved into Keystone core and as such its migrations "
-"are maintained by the main Keystone database control. Use the command: "
-"keystone-manage db_sync"
-
msgid "An unexpected error occurred when retrieving domain configs"
msgstr "An unexpected error occurred when retrieving domain configs"
@@ -1205,6 +1197,13 @@ msgstr "The action you have requested has not been implemented."
msgid "The authenticated user should match the trustor"
msgstr "The authenticated user should match the trustor"
+msgid ""
+"The database is not under version control, but has tables. Please stamp the "
+"current version of the schema manually."
+msgstr ""
+"The database is not under version control but has tables. Please stamp the "
+"current version of the schema manually."
+
#, python-format
msgid ""
"The given operator %(_op)s is not valid. It must be one of the following: "
@@ -1406,9 +1405,6 @@ msgstr ""
"Unable to delete region %(region_id)s because it or its child regions have "
"associated endpoints."
-msgid "Unable to downgrade schema"
-msgstr "Unable to downgrade schema"
-
#, python-format
msgid "Unable to establish a connection to LDAP Server (%(url)s)."
msgstr "Unable to establish a connection to LDAP Server (%(url)s)."
@@ -1760,3 +1756,6 @@ msgstr "tls_cacertdir %s not found or is not a directory"
#, python-format
msgid "tls_cacertfile %s not found or is not a file"
msgstr "tls_cacertfile %s not found or is not a file"
+
+msgid "version should be an integer"
+msgstr "version should be an integer"
diff --git a/keystone/locale/es/LC_MESSAGES/keystone.po b/keystone/locale/es/LC_MESSAGES/keystone.po
index 6bce54265..d585f728a 100644
--- a/keystone/locale/es/LC_MESSAGES/keystone.po
+++ b/keystone/locale/es/LC_MESSAGES/keystone.po
@@ -15,7 +15,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -748,9 +748,6 @@ msgstr ""
"No se puede suprimir la región %(region_id)s porque sus regiones secundarias "
"tienen puntos finales asociados."
-msgid "Unable to downgrade schema"
-msgstr "No se ha podido degradar el esquema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "No se ha podido localizar el directorio config de dominio: %s"
diff --git a/keystone/locale/fr/LC_MESSAGES/keystone.po b/keystone/locale/fr/LC_MESSAGES/keystone.po
index 66540fd97..6d69341b5 100644
--- a/keystone/locale/fr/LC_MESSAGES/keystone.po
+++ b/keystone/locale/fr/LC_MESSAGES/keystone.po
@@ -14,7 +14,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -741,9 +741,6 @@ msgstr ""
"Impossible de supprimer la région %(region_id)s car la région ou ses régions "
"enfant ont des noeuds finals associés."
-msgid "Unable to downgrade schema"
-msgstr "Impossible de rétrograder le schéma"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossible de localiser le répertoire de configuration domaine: %s"
diff --git a/keystone/locale/it/LC_MESSAGES/keystone.po b/keystone/locale/it/LC_MESSAGES/keystone.po
index 2bc580c20..c9384b0b8 100644
--- a/keystone/locale/it/LC_MESSAGES/keystone.po
+++ b/keystone/locale/it/LC_MESSAGES/keystone.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -740,9 +740,6 @@ msgstr ""
"Impossibile eliminare la regione %(region_id)s perché la regione o le "
"relative regioni child hanno degli endpoint associati."
-msgid "Unable to downgrade schema"
-msgstr "Impossibile eseguire il downgrade dello schema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Impossibile individuare la directory config del dominio: %s"
diff --git a/keystone/locale/ja/LC_MESSAGES/keystone.po b/keystone/locale/ja/LC_MESSAGES/keystone.po
index 433c673b8..e62f4f492 100644
--- a/keystone/locale/ja/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ja/LC_MESSAGES/keystone.po
@@ -9,7 +9,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -729,9 +729,6 @@ msgstr ""
"リージョン %(region_id)s またはその子リージョンがエンドポイントに関連付けられ"
"ているため、このリージョンを削除できません。"
-msgid "Unable to downgrade schema"
-msgstr "スキーマをダウングレードすることができません"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "ドメイン設定ディレクトリーが見つかりません: %s"
diff --git a/keystone/locale/ko_KR/LC_MESSAGES/keystone.po b/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
index 102b67fa6..8c278558c 100644
--- a/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ko_KR/LC_MESSAGES/keystone.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -712,9 +712,6 @@ msgstr ""
"리젼 %(region_id)s 또는 하위 리젼에 연관된 엔드포인트가 있어 삭제할 수 없습니"
"다."
-msgid "Unable to downgrade schema"
-msgstr "스키마를 다운그레이드할 수 없음"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "%s: 도메인 설정 디렉토리를 찾을 수 없습니다."
diff --git a/keystone/locale/pt_BR/LC_MESSAGES/keystone.po b/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
index 853478f93..7516816b7 100644
--- a/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
+++ b/keystone/locale/pt_BR/LC_MESSAGES/keystone.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -728,9 +728,6 @@ msgstr ""
"Não foi possível excluir a região %(region_id)s, uma vez que ela ou suas "
"regiões filhas possuem terminais associados."
-msgid "Unable to downgrade schema"
-msgstr "Não é possível fazer downgrade do esquema"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Não é possível localizar diretório de configuração de domínio: %s"
diff --git a/keystone/locale/ru/LC_MESSAGES/keystone.po b/keystone/locale/ru/LC_MESSAGES/keystone.po
index 542b138f2..56e50d9c0 100644
--- a/keystone/locale/ru/LC_MESSAGES/keystone.po
+++ b/keystone/locale/ru/LC_MESSAGES/keystone.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -723,9 +723,6 @@ msgstr ""
"Не удалось удалить регион %(region_id)s: регион или его дочерние регионы "
"имеют связанные конечные точки."
-msgid "Unable to downgrade schema"
-msgstr "Не удается понизить версию схемы"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "Не удалось найти каталог конфигурации домена: %s"
diff --git a/keystone/locale/zh_CN/LC_MESSAGES/keystone.po b/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
index 27b9c6f4e..cb194dc71 100644
--- a/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
+++ b/keystone/locale/zh_CN/LC_MESSAGES/keystone.po
@@ -11,16 +11,18 @@
# 颜海峰 <yanheven@gmail.com>, 2014
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Eric Lei <1165970798@qq.com>, 2016. #zanata
+# Research and Development Center UnitedStack <dev@unitedstack.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-07-01 18:11+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2016-09-28 03:23+0000\n"
-"Last-Translator: Eric Lei <1165970798@qq.com>\n"
+"PO-Revision-Date: 2022-06-14 12:29+0000\n"
+"Last-Translator: Research and Development Center UnitedStack "
+"<dev@unitedstack.com>\n"
"Language: zh_CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
@@ -654,9 +656,6 @@ msgid ""
"associated endpoints."
msgstr "无法删除区域 %(region_id)s,因为它或它的子区域具有关联的端点。"
-msgid "Unable to downgrade schema"
-msgstr "无法对模式进行降级"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "找不到指定的域配置目录:%s"
@@ -732,6 +731,14 @@ msgstr "用户类型 %s 不受支持"
msgid "You are not authorized to perform the requested action."
msgstr "您没有授权完成所请求的操作。"
+msgid ""
+"You cannot change your password at this time due to password policy "
+"disallowing password changes. Please contact your administrator to reset "
+"your password."
+msgstr ""
+"因为密码策略被设置为禁止修改密码,目前您不能更改密码。请联系管理员重置您的密"
+"码。"
+
#, python-format
msgid ""
"You cannot change your password at this time due to the minimum password "
@@ -740,7 +747,7 @@ msgid ""
"contact your administrator to reset your password."
msgstr ""
"没有达到密码最小使用时长,目前您不能更改密码。一旦您修改了密码,在下次可被修"
-"改前该密码必须使用%(min_age_days)d天.请在%(days_left)d天后重试,或者联系管理"
+"改前该密码必须使用%(min_age_days)d天。请在%(days_left)d天后重试,或者联系管理"
"员重置您的密码。"
msgid ""
diff --git a/keystone/locale/zh_TW/LC_MESSAGES/keystone.po b/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
index 4d0399c9d..4529c4bc0 100644
--- a/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
+++ b/keystone/locale/zh_TW/LC_MESSAGES/keystone.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: keystone VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-09-30 20:17+0000\n"
+"POT-Creation-Date: 2022-05-20 04:41+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -638,9 +638,6 @@ msgid ""
"associated endpoints."
msgstr "無法刪除區域 %(region_id)s,因為此區域或其子區域具有相關聯的端點。"
-msgid "Unable to downgrade schema"
-msgstr "無法將綱目降級"
-
#, python-format
msgid "Unable to locate domain config directory: %s"
msgstr "找不到網域配置目錄:%s"
diff --git a/keystone/notifications.py b/keystone/notifications.py
index e536ebdd4..a59b1d0ba 100644
--- a/keystone/notifications.py
+++ b/keystone/notifications.py
@@ -580,6 +580,8 @@ class CadfNotificationWrapper(object):
taxonomy.OUTCOME_FAILURE,
target, self.event_type,
reason=audit_reason)
+ if isinstance(ex, exception.AccountLocked):
+ raise exception.Unauthorized
raise
except Exception:
# For authentication failure send a CADF event as well
diff --git a/keystone/server/flask/application.py b/keystone/server/flask/application.py
index 12d59b289..537bd45ac 100644
--- a/keystone/server/flask/application.py
+++ b/keystone/server/flask/application.py
@@ -81,10 +81,8 @@ def _handle_keystone_exception(error):
LOG.warning(
"Authorization failed. %(exception)s from %(remote_addr)s",
{'exception': error, 'remote_addr': flask.request.remote_addr})
- elif isinstance(error, exception.UnexpectedError):
- LOG.exception(str(error))
else:
- LOG.warning(str(error))
+ LOG.exception(str(error))
# Render the exception to something user "friendly"
error_message = error.args[0]
diff --git a/keystone/tests/unit/assignment/test_backends.py b/keystone/tests/unit/assignment/test_backends.py
index cdf89664a..4add5649a 100644
--- a/keystone/tests/unit/assignment/test_backends.py
+++ b/keystone/tests/unit/assignment/test_backends.py
@@ -3694,9 +3694,9 @@ class ImpliedRoleTests(AssignmentTestHelperMixin):
expected_implied_role_ref = {
'prior_role_id': prior_role_ref['id'],
'implied_role_id': implied_role_ref['id']}
- self.assertDictContainsSubset(
- expected_implied_role_ref,
- implied_role)
+ self.assertLessEqual(
+ expected_implied_role_ref.items(),
+ implied_role.items())
PROVIDERS.role_api.delete_implied_role(
prior_role_ref['id'],
diff --git a/keystone/tests/unit/base_classes.py b/keystone/tests/unit/base_classes.py
index 95bf7fa02..9bf3b50eb 100644
--- a/keystone/tests/unit/base_classes.py
+++ b/keystone/tests/unit/base_classes.py
@@ -31,7 +31,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
Re-implementation of TestCase that doesn't load a bunch of fixtures by
hand and instead uses the bootstrap process. This makes it so that our base
tests have the same things available to us as operators after they run
- boostrap. It also makes our tests DRY and pushes setup required for
+ bootstrap. It also makes our tests DRY and pushes setup required for
specific tests into the actual test class, instead of pushing it into a
generic structure that gets loaded for every test.
@@ -46,7 +46,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
- CONF.fernet_tokens.max_active_keys
+ CONF.fernet_tokens.max_active_keys,
)
)
@@ -54,7 +54,7 @@ class TestCaseWithBootstrap(core.BaseTestCase):
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_receipts',
- CONF.fernet_receipts.max_active_keys
+ CONF.fernet_receipts.max_active_keys,
)
)
@@ -72,7 +72,8 @@ class TestCaseWithBootstrap(core.BaseTestCase):
try:
PROVIDERS.resource_api.create_domain(
default_fixtures.ROOT_DOMAIN['id'],
- default_fixtures.ROOT_DOMAIN)
+ default_fixtures.ROOT_DOMAIN,
+ )
except exception.Conflict:
pass
diff --git a/keystone/tests/unit/catalog/test_backends.py b/keystone/tests/unit/catalog/test_backends.py
index b2989def4..513e5c3c3 100644
--- a/keystone/tests/unit/catalog/test_backends.py
+++ b/keystone/tests/unit/catalog/test_backends.py
@@ -111,20 +111,23 @@ class CatalogTests(object):
PROVIDERS.catalog_api.get_region(region_id)
# update the region bypassing catalog_api
PROVIDERS.catalog_api.driver.update_region(region_id, updated_region)
- self.assertDictContainsSubset(
- new_region, PROVIDERS.catalog_api.get_region(region_id)
+ self.assertLessEqual(
+ new_region.items(),
+ PROVIDERS.catalog_api.get_region(region_id).items()
)
PROVIDERS.catalog_api.get_region.invalidate(
PROVIDERS.catalog_api, region_id
)
- self.assertDictContainsSubset(
- updated_region, PROVIDERS.catalog_api.get_region(region_id)
+ self.assertLessEqual(
+ updated_region.items(),
+ PROVIDERS.catalog_api.get_region(region_id).items()
)
# delete the region
PROVIDERS.catalog_api.driver.delete_region(region_id)
# still get the old region
- self.assertDictContainsSubset(
- updated_region, PROVIDERS.catalog_api.get_region(region_id)
+ self.assertLessEqual(
+ updated_region.items(),
+ PROVIDERS.catalog_api.get_region(region_id).items()
)
PROVIDERS.catalog_api.get_region.invalidate(
PROVIDERS.catalog_api, region_id
@@ -342,20 +345,23 @@ class CatalogTests(object):
PROVIDERS.catalog_api.driver.update_service(
service_id, updated_service
)
- self.assertDictContainsSubset(
- new_service, PROVIDERS.catalog_api.get_service(service_id)
+ self.assertLessEqual(
+ new_service.items(),
+ PROVIDERS.catalog_api.get_service(service_id).items()
)
PROVIDERS.catalog_api.get_service.invalidate(
PROVIDERS.catalog_api, service_id
)
- self.assertDictContainsSubset(
- updated_service, PROVIDERS.catalog_api.get_service(service_id)
+ self.assertLessEqual(
+ updated_service.items(),
+ PROVIDERS.catalog_api.get_service(service_id).items()
)
# delete bypassing catalog api
PROVIDERS.catalog_api.driver.delete_service(service_id)
- self.assertDictContainsSubset(
- updated_service, PROVIDERS.catalog_api.get_service(service_id)
+ self.assertLessEqual(
+ updated_service.items(),
+ PROVIDERS.catalog_api.get_service(service_id).items()
)
PROVIDERS.catalog_api.get_service.invalidate(
PROVIDERS.catalog_api, service_id
@@ -416,12 +422,12 @@ class CatalogTests(object):
PROVIDERS.catalog_api.get_endpoint(endpoint['id'])
# delete the service bypassing catalog api
PROVIDERS.catalog_api.driver.delete_service(service['id'])
- self.assertDictContainsSubset(endpoint,
- PROVIDERS.catalog_api.
- get_endpoint(endpoint['id']))
- self.assertDictContainsSubset(service,
- PROVIDERS.catalog_api.
- get_service(service['id']))
+ self.assertLessEqual(
+ endpoint.items(),
+ PROVIDERS.catalog_api.get_endpoint(endpoint['id']).items())
+ self.assertLessEqual(
+ service.items(),
+ PROVIDERS.catalog_api.get_service(service['id']).items())
PROVIDERS.catalog_api.get_endpoint.invalidate(
PROVIDERS.catalog_api, endpoint['id']
)
diff --git a/keystone/tests/unit/common/sql/test_upgrades.py b/keystone/tests/unit/common/sql/test_upgrades.py
new file mode 100644
index 000000000..c6c4a2e56
--- /dev/null
+++ b/keystone/tests/unit/common/sql/test_upgrades.py
@@ -0,0 +1,252 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import tempfile
+from unittest import mock
+
+from migrate import exceptions as migrate_exception
+from migrate.versioning import api as migrate_api
+from migrate.versioning import repository as migrate_repository
+from oslo_db import exception as db_exception
+from oslo_db.sqlalchemy import enginefacade
+from oslo_db.sqlalchemy import test_fixtures as db_fixtures
+from oslotest import base as test_base
+import sqlalchemy
+
+from keystone.common.sql import upgrades
+from keystone.common import utils
+
+
+class TestMigrationCommon(
+ db_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase,
+):
+
+ def setUp(self):
+ super().setUp()
+
+ self.engine = enginefacade.writer.get_engine()
+
+ self.path = tempfile.mkdtemp('test_migration')
+ self.path1 = tempfile.mkdtemp('test_migration')
+ self.return_value = '/home/openstack/migrations'
+ self.return_value1 = '/home/extension/migrations'
+ self.init_version = 1
+ self.test_version = 123
+
+ self.patcher_repo = mock.patch.object(migrate_repository, 'Repository')
+ self.repository = self.patcher_repo.start()
+ self.repository.side_effect = [self.return_value, self.return_value1]
+
+ self.mock_api_db = mock.patch.object(migrate_api, 'db_version')
+ self.mock_api_db_version = self.mock_api_db.start()
+ self.mock_api_db_version.return_value = self.test_version
+
+ def tearDown(self):
+ os.rmdir(self.path)
+ self.mock_api_db.stop()
+ self.patcher_repo.stop()
+ super().tearDown()
+
+ def test_find_migrate_repo_path_not_found(self):
+ self.assertRaises(
+ db_exception.DBMigrationError,
+ upgrades._find_migrate_repo,
+ "/foo/bar/",
+ )
+
+ def test_find_migrate_repo_called_once(self):
+ my_repository = upgrades._find_migrate_repo(self.path)
+ self.repository.assert_called_once_with(self.path)
+ self.assertEqual(self.return_value, my_repository)
+
+ def test_find_migrate_repo_called_few_times(self):
+ repo1 = upgrades._find_migrate_repo(self.path)
+ repo2 = upgrades._find_migrate_repo(self.path1)
+ self.assertNotEqual(repo1, repo2)
+
+ def test_db_version_control(self):
+ with utils.nested_contexts(
+ mock.patch.object(upgrades, '_find_migrate_repo'),
+ mock.patch.object(migrate_api, 'version_control'),
+ ) as (mock_find_repo, mock_version_control):
+ mock_find_repo.return_value = self.return_value
+
+ version = upgrades._migrate_db_version_control(
+ self.engine, self.path, self.test_version)
+
+ self.assertEqual(self.test_version, version)
+ mock_version_control.assert_called_once_with(
+ self.engine, self.return_value, self.test_version)
+
+ @mock.patch.object(upgrades, '_find_migrate_repo')
+ @mock.patch.object(migrate_api, 'version_control')
+ def test_db_version_control_version_less_than_actual_version(
+ self, mock_version_control, mock_find_repo,
+ ):
+ mock_find_repo.return_value = self.return_value
+ mock_version_control.side_effect = \
+ migrate_exception.DatabaseAlreadyControlledError
+ self.assertRaises(
+ db_exception.DBMigrationError,
+ upgrades._migrate_db_version_control, self.engine,
+ self.path, self.test_version - 1)
+
+ @mock.patch.object(upgrades, '_find_migrate_repo')
+ @mock.patch.object(migrate_api, 'version_control')
+ def test_db_version_control_version_greater_than_actual_version(
+ self, mock_version_control, mock_find_repo,
+ ):
+ mock_find_repo.return_value = self.return_value
+ mock_version_control.side_effect = \
+ migrate_exception.InvalidVersionError
+ self.assertRaises(
+ db_exception.DBMigrationError,
+ upgrades._migrate_db_version_control, self.engine,
+ self.path, self.test_version + 1)
+
+ def test_db_version_return(self):
+ ret_val = upgrades._migrate_db_version(
+ self.engine, self.path, self.init_version)
+ self.assertEqual(self.test_version, ret_val)
+
+ def test_db_version_raise_not_controlled_error_first(self):
+ with mock.patch.object(
+ upgrades, '_migrate_db_version_control',
+ ) as mock_ver:
+ self.mock_api_db_version.side_effect = [
+ migrate_exception.DatabaseNotControlledError('oups'),
+ self.test_version]
+
+ ret_val = upgrades._migrate_db_version(
+ self.engine, self.path, self.init_version)
+ self.assertEqual(self.test_version, ret_val)
+ mock_ver.assert_called_once_with(
+ self.engine, self.path, version=self.init_version)
+
+ def test_db_version_raise_not_controlled_error_tables(self):
+ with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
+ self.mock_api_db_version.side_effect = \
+ migrate_exception.DatabaseNotControlledError('oups')
+ my_meta = mock.MagicMock()
+ my_meta.tables = {'a': 1, 'b': 2}
+ mock_meta.return_value = my_meta
+
+ self.assertRaises(
+ db_exception.DBMigrationError, upgrades._migrate_db_version,
+ self.engine, self.path, self.init_version)
+
+ @mock.patch.object(migrate_api, 'version_control')
+ def test_db_version_raise_not_controlled_error_no_tables(self, mock_vc):
+ with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
+ self.mock_api_db_version.side_effect = (
+ migrate_exception.DatabaseNotControlledError('oups'),
+ self.init_version)
+ my_meta = mock.MagicMock()
+ my_meta.tables = {}
+ mock_meta.return_value = my_meta
+
+ upgrades._migrate_db_version(
+ self.engine, self.path, self.init_version)
+
+ mock_vc.assert_called_once_with(
+ self.engine, self.return_value1, self.init_version)
+
+ @mock.patch.object(migrate_api, 'version_control')
+ def test_db_version_raise_not_controlled_alembic_tables(self, mock_vc):
+ # When there are tables but the alembic control table
+ # (alembic_version) is present, attempt to version the db.
+ # This simulates the case where there is are multiple repos (different
+ # abs_paths) and a different path has been versioned already.
+ with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
+ self.mock_api_db_version.side_effect = [
+ migrate_exception.DatabaseNotControlledError('oups'), None]
+ my_meta = mock.MagicMock()
+ my_meta.tables = {'alembic_version': 1, 'b': 2}
+ mock_meta.return_value = my_meta
+
+ upgrades._migrate_db_version(
+ self.engine, self.path, self.init_version)
+
+ mock_vc.assert_called_once_with(
+ self.engine, self.return_value1, self.init_version)
+
+ @mock.patch.object(migrate_api, 'version_control')
+ def test_db_version_raise_not_controlled_migrate_tables(self, mock_vc):
+ # When there are tables but the sqlalchemy-migrate control table
+ # (migrate_version) is present, attempt to version the db.
+ # This simulates the case where there is are multiple repos (different
+ # abs_paths) and a different path has been versioned already.
+ with mock.patch.object(sqlalchemy, 'MetaData') as mock_meta:
+ self.mock_api_db_version.side_effect = [
+ migrate_exception.DatabaseNotControlledError('oups'), None]
+ my_meta = mock.MagicMock()
+ my_meta.tables = {'migrate_version': 1, 'b': 2}
+ mock_meta.return_value = my_meta
+
+ upgrades._migrate_db_version(
+ self.engine, self.path, self.init_version)
+
+ mock_vc.assert_called_once_with(
+ self.engine, self.return_value1, self.init_version)
+
+ def test_db_sync_wrong_version(self):
+ self.assertRaises(
+ db_exception.DBMigrationError,
+ upgrades._migrate_db_sync, self.engine, self.path, 'foo')
+
+ @mock.patch.object(migrate_api, 'upgrade')
+ def test_db_sync_script_not_present(self, upgrade):
+ # For non existent upgrades script file sqlalchemy-migrate will raise
+ # VersionNotFoundError which will be wrapped in DBMigrationError.
+ upgrade.side_effect = migrate_exception.VersionNotFoundError
+ self.assertRaises(
+ db_exception.DBMigrationError,
+ upgrades._migrate_db_sync, self.engine, self.path,
+ self.test_version + 1)
+
+ @mock.patch.object(migrate_api, 'upgrade')
+ def test_db_sync_known_error_raised(self, upgrade):
+ upgrade.side_effect = migrate_exception.KnownError
+ self.assertRaises(
+ db_exception.DBMigrationError,
+ upgrades._migrate_db_sync, self.engine, self.path,
+ self.test_version + 1)
+
+ def test_db_sync_upgrade(self):
+ init_ver = 55
+ with utils.nested_contexts(
+ mock.patch.object(upgrades, '_find_migrate_repo'),
+ mock.patch.object(migrate_api, 'upgrade')
+ ) as (mock_find_repo, mock_upgrade):
+ mock_find_repo.return_value = self.return_value
+ self.mock_api_db_version.return_value = self.test_version - 1
+
+ upgrades._migrate_db_sync(
+ self.engine, self.path, self.test_version, init_ver)
+
+ mock_upgrade.assert_called_once_with(
+ self.engine, self.return_value, self.test_version)
+
+ def test_db_sync_downgrade(self):
+ with utils.nested_contexts(
+ mock.patch.object(upgrades, '_find_migrate_repo'),
+ mock.patch.object(migrate_api, 'downgrade')
+ ) as (mock_find_repo, mock_downgrade):
+ mock_find_repo.return_value = self.return_value
+ self.mock_api_db_version.return_value = self.test_version + 1
+
+ upgrades._migrate_db_sync(
+ self.engine, self.path, self.test_version)
+
+ mock_downgrade.assert_called_once_with(
+ self.engine, self.return_value, self.test_version)
diff --git a/keystone/tests/unit/common/test_notifications.py b/keystone/tests/unit/common/test_notifications.py
index b0fb720f1..2fa9f2612 100644
--- a/keystone/tests/unit/common/test_notifications.py
+++ b/keystone/tests/unit/common/test_notifications.py
@@ -802,7 +802,7 @@ class CADFNotificationsForPCIDSSEvents(BaseNotificationTest):
password = uuid.uuid4().hex
new_password = uuid.uuid4().hex
expected_responses = [AssertionError, AssertionError, AssertionError,
- exception.AccountLocked]
+ exception.Unauthorized]
user_ref = unit.new_user_ref(domain_id=self.domain_id,
password=password)
user_ref = PROVIDERS.identity_api.create_user(user_ref)
@@ -1045,7 +1045,7 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
Foo()
project_ref = unit.new_project_ref(domain_id=self.domain_id)
PROVIDERS.resource_api.create_project(project_ref['id'], project_ref)
- self.assertItemsEqual(['cb1', 'cb0'], callback_called)
+ self.assertCountEqual(['cb1', 'cb0'], callback_called)
def test_invalid_event_callbacks(self):
@notifications.listener
diff --git a/keystone/tests/unit/config_files/backend_ldap_sql.conf b/keystone/tests/unit/config_files/backend_ldap_sql.conf
index 96a0ffa98..c50d8dd40 100644
--- a/keystone/tests/unit/config_files/backend_ldap_sql.conf
+++ b/keystone/tests/unit/config_files/backend_ldap_sql.conf
@@ -5,7 +5,7 @@
#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
#To Test PostgreSQL:
#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
-idle_timeout = 200
+connection_recycle_time = 200
[ldap]
url = fake://memory
diff --git a/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf b/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf
index 5185770b7..2795d5e28 100644
--- a/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf
+++ b/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf
@@ -6,4 +6,4 @@ connection = sqlite://
#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
#To Test PostgreSQL:
#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
-idle_timeout = 200
+connection_recycle_time = 200
diff --git a/keystone/tests/unit/config_files/backend_sql.conf b/keystone/tests/unit/config_files/backend_sql.conf
index f2828e2ef..bc15d2f8e 100644
--- a/keystone/tests/unit/config_files/backend_sql.conf
+++ b/keystone/tests/unit/config_files/backend_sql.conf
@@ -5,4 +5,4 @@
#connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8
#To Test PostgreSQL:
#connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8
-idle_timeout = 200
+connection_recycle_time = 200
diff --git a/keystone/tests/unit/config_files/deprecated.conf b/keystone/tests/unit/config_files/deprecated.conf
deleted file mode 100644
index 515e663a3..000000000
--- a/keystone/tests/unit/config_files/deprecated.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-# Options in this file are deprecated. See test_config.
-
-[sql]
-# These options were deprecated in Icehouse with the switch to oslo's
-# db.sqlalchemy.
-
-connection = sqlite://deprecated
-idle_timeout = 54321
diff --git a/keystone/tests/unit/config_files/deprecated_override.conf b/keystone/tests/unit/config_files/deprecated_override.conf
deleted file mode 100644
index 1d1c926fe..000000000
--- a/keystone/tests/unit/config_files/deprecated_override.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-# Options in this file are deprecated. See test_config.
-
-[sql]
-# These options were deprecated in Icehouse with the switch to oslo's
-# db.sqlalchemy.
-
-connection = sqlite://deprecated
-idle_timeout = 54321
-
-
-[database]
-# These are the new options from the [sql] section.
-
-connection = sqlite://new
-idle_timeout = 65432
diff --git a/keystone/tests/unit/contrib/federation/test_utils.py b/keystone/tests/unit/contrib/federation/test_utils.py
index f233ac56e..f9153cb09 100644
--- a/keystone/tests/unit/contrib/federation/test_utils.py
+++ b/keystone/tests/unit/contrib/federation/test_utils.py
@@ -777,7 +777,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
self.assertIsNotNone(mapped_properties)
self.assertEqual('opilotte', mapped_properties['user']['name'])
self.assertListEqual([], mapped_properties['group_names'])
- self.assertItemsEqual(['abc123', 'ghi789', 'klm012'],
+ self.assertCountEqual(['abc123', 'ghi789', 'klm012'],
mapped_properties['group_ids'])
def test_rule_engine_group_ids_mapping_blacklist(self):
@@ -793,7 +793,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
self.assertIsNotNone(mapped_properties)
self.assertEqual('opilotte', mapped_properties['user']['name'])
self.assertListEqual([], mapped_properties['group_names'])
- self.assertItemsEqual(['abc123', 'ghi789', 'klm012'],
+ self.assertCountEqual(['abc123', 'ghi789', 'klm012'],
mapped_properties['group_ids'])
def test_rule_engine_group_ids_mapping_only_one_group(self):
@@ -810,7 +810,7 @@ class MappingRuleEngineTests(unit.BaseTestCase):
self.assertIsNotNone(mapped_properties)
self.assertEqual('opilotte', mapped_properties['user']['name'])
self.assertListEqual([], mapped_properties['group_names'])
- self.assertItemsEqual(['210mlk', '321cba'],
+ self.assertCountEqual(['210mlk', '321cba'],
mapped_properties['group_ids'])
def test_mapping_projects(self):
diff --git a/keystone/tests/unit/core.py b/keystone/tests/unit/core.py
index 918a87253..2a6c12038 100644
--- a/keystone/tests/unit/core.py
+++ b/keystone/tests/unit/core.py
@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import atexit
import base64
import contextlib
@@ -18,13 +19,14 @@ import datetime
import functools
import hashlib
import json
+import secrets
+
import ldap
import os
import shutil
import socket
import sys
import uuid
-import warnings
import fixtures
import flask
@@ -36,7 +38,6 @@ from oslo_context import fixture as oslo_ctx_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
-from sqlalchemy import exc
import testtools
from testtools import testcase
@@ -422,9 +423,9 @@ def new_ec2_credential(user_id, project_id=None, blob=None, **kwargs):
def new_totp_credential(user_id, project_id=None, blob=None):
if not blob:
- # NOTE(notmorgan): 20 bytes of data from os.urandom for
+ # NOTE(notmorgan): 20 bytes of data from secrets.token_bytes for
# a totp secret.
- blob = base64.b32encode(os.urandom(20)).decode('utf-8')
+ blob = base64.b32encode(secrets.token_bytes(20)).decode('utf-8')
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=blob,
@@ -678,17 +679,9 @@ class BaseTestCase(testtools.TestCase):
self.useFixture(fixtures.MockPatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
+ self.stdlog = self.useFixture(ksfixtures.StandardLogging())
+ self.useFixture(ksfixtures.WarningsFixture())
- warnings.filterwarnings('error', category=DeprecationWarning,
- module='^keystone\\.')
- warnings.filterwarnings(
- 'ignore', category=DeprecationWarning,
- message=r"Using function/method 'db_version\(\)' is deprecated")
- warnings.simplefilter('error', exc.SAWarning)
- if hasattr(exc, "RemovedIn20Warning"):
- warnings.simplefilter('ignore', exc.RemovedIn20Warning)
-
- self.addCleanup(warnings.resetwarnings)
# Ensure we have an empty threadlocal context at the start of each
# test.
self.assertIsNone(oslo_context.get_current())
@@ -1023,8 +1016,8 @@ class TestCase(BaseTestCase):
value,
"%s != %s" % (expected, value))
- def assertNotEmpty(self, l):
- self.assertGreater(len(l), 0)
+ def assertNotEmpty(self, iterable):
+ self.assertGreater(len(iterable), 0)
def assertUserDictEqual(self, expected, observed, message=''):
"""Assert that a user dict is equal to another user dict.
diff --git a/keystone/tests/unit/endpoint_policy/backends/test_base.py b/keystone/tests/unit/endpoint_policy/backends/test_base.py
index 279b3d7f5..7c63fb388 100644
--- a/keystone/tests/unit/endpoint_policy/backends/test_base.py
+++ b/keystone/tests/unit/endpoint_policy/backends/test_base.py
@@ -95,7 +95,7 @@ class DriverTestCase(object):
policy_id=policy_id)
associations = self.driver.list_associations_for_policy(policy_id)
- self.assertItemsEqual([first, second], associations)
+ self.assertCountEqual([first, second], associations)
def test_delete_association_by_endpoint(self):
endpoint_id = uuid.uuid4().hex
diff --git a/keystone/tests/unit/identity/shadow_users/test_backend.py b/keystone/tests/unit/identity/shadow_users/test_backend.py
index ee89edf40..da43a2141 100644
--- a/keystone/tests/unit/identity/shadow_users/test_backend.py
+++ b/keystone/tests/unit/identity/shadow_users/test_backend.py
@@ -66,7 +66,7 @@ class ShadowUsersBackendTests(object):
user_created = PROVIDERS.shadow_users_api.create_nonlocal_user(user)
self.assertEqual(user_created['id'], user['id'])
user_found = PROVIDERS.shadow_users_api.get_user(user_created['id'])
- self.assertItemsEqual(user_created, user_found)
+ self.assertCountEqual(user_created, user_found)
def test_create_federated_user_unique_constraint(self):
user_dict = PROVIDERS.shadow_users_api.create_federated_user(
@@ -95,7 +95,7 @@ class ShadowUsersBackendTests(object):
self.federated_user["idp_id"],
self.federated_user["protocol_id"],
self.federated_user["unique_id"])
- self.assertItemsEqual(user_dict_create, user_dict_get)
+ self.assertCountEqual(user_dict_create, user_dict_get)
self.assertEqual(user_dict_create["id"], user_dict_get["id"])
def test_update_federated_user_display_name(self):
diff --git a/keystone/tests/unit/identity/test_backend_sql.py b/keystone/tests/unit/identity/test_backend_sql.py
index 8c7fb3103..0a990024d 100644
--- a/keystone/tests/unit/identity/test_backend_sql.py
+++ b/keystone/tests/unit/identity/test_backend_sql.py
@@ -613,7 +613,7 @@ class LockingOutUserTests(test_backend_sql.SqlTests):
)
# test locking out user after max failed attempts
self._fail_auth_repeatedly(self.user['id'])
- self.assertRaises(exception.AccountLocked,
+ self.assertRaises(exception.Unauthorized,
PROVIDERS.identity_api.authenticate,
user_id=self.user['id'],
password=uuid.uuid4().hex)
@@ -642,7 +642,7 @@ class LockingOutUserTests(test_backend_sql.SqlTests):
with self.make_request():
# lockout user
self._fail_auth_repeatedly(self.user['id'])
- self.assertRaises(exception.AccountLocked,
+ self.assertRaises(exception.Unauthorized,
PROVIDERS.identity_api.authenticate,
user_id=self.user['id'],
password=uuid.uuid4().hex)
@@ -661,7 +661,7 @@ class LockingOutUserTests(test_backend_sql.SqlTests):
with self.make_request():
# lockout user
self._fail_auth_repeatedly(self.user['id'])
- self.assertRaises(exception.AccountLocked,
+ self.assertRaises(exception.Unauthorized,
PROVIDERS.identity_api.authenticate,
user_id=self.user['id'],
password=uuid.uuid4().hex)
@@ -687,7 +687,7 @@ class LockingOutUserTests(test_backend_sql.SqlTests):
with self.make_request():
# lockout user
self._fail_auth_repeatedly(self.user['id'])
- self.assertRaises(exception.AccountLocked,
+ self.assertRaises(exception.Unauthorized,
PROVIDERS.identity_api.authenticate,
user_id=self.user['id'],
password=uuid.uuid4().hex)
@@ -697,7 +697,7 @@ class LockingOutUserTests(test_backend_sql.SqlTests):
# repeat failed auth the max times
self._fail_auth_repeatedly(self.user['id'])
# test user account is locked
- self.assertRaises(exception.AccountLocked,
+ self.assertRaises(exception.Unauthorized,
PROVIDERS.identity_api.authenticate,
user_id=self.user['id'],
password=uuid.uuid4().hex)
diff --git a/keystone/tests/unit/identity/test_backends.py b/keystone/tests/unit/identity/test_backends.py
index 43537f549..97d0863cd 100644
--- a/keystone/tests/unit/identity/test_backends.py
+++ b/keystone/tests/unit/identity/test_backends.py
@@ -91,7 +91,7 @@ class IdentityTests(object):
# it easier to authenticate in tests, but should
# not be returned by the api
user.pop('password')
- self.assertDictContainsSubset(user, user_ref)
+ self.assertLessEqual(user.items(), user_ref.items())
role_list = PROVIDERS.assignment_api.get_roles_for_user_and_project(
new_user['id'], self.project_baz['id'])
self.assertEqual(1, len(role_list))
@@ -164,12 +164,14 @@ class IdentityTests(object):
PROVIDERS.identity_api.get_user(ref['id'])
# update using identity api and get back updated user.
user_updated = PROVIDERS.identity_api.update_user(ref['id'], user)
- self.assertDictContainsSubset(
- PROVIDERS.identity_api.get_user(ref['id']), user_updated
+ self.assertLessEqual(
+ PROVIDERS.identity_api.get_user(ref['id']).items(),
+ user_updated.items()
)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
PROVIDERS.identity_api.get_user_by_name(
- ref['name'], ref['domain_id']), user_updated
+ ref['name'], ref['domain_id']).items(),
+ user_updated.items()
)
def test_get_user_returns_not_found(self):
@@ -215,15 +217,16 @@ class IdentityTests(object):
)
user['description'] = uuid.uuid4().hex
user_updated = PROVIDERS.identity_api.update_user(ref['id'], user)
- self.assertDictContainsSubset(
- PROVIDERS.identity_api.get_user(ref['id']), user_updated
+ self.assertLessEqual(
+ PROVIDERS.identity_api.get_user(ref['id']).items(),
+ user_updated.items()
)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
PROVIDERS.identity_api.get_user_by_name(
ref['name'],
ref['domain_id']
- ),
- user_updated
+ ).items(),
+ user_updated.items()
)
def test_get_user_by_name_returns_not_found(self):
@@ -833,12 +836,12 @@ class IdentityTests(object):
group = unit.new_group_ref(domain_id=domain['id'])
group = PROVIDERS.identity_api.create_group(group)
group_ref = PROVIDERS.identity_api.get_group(group['id'])
- self.assertDictContainsSubset(group, group_ref)
+ self.assertLessEqual(group.items(), group_ref.items())
group['name'] = uuid.uuid4().hex
PROVIDERS.identity_api.update_group(group['id'], group)
group_ref = PROVIDERS.identity_api.get_group(group['id'])
- self.assertDictContainsSubset(group, group_ref)
+ self.assertLessEqual(group.items(), group_ref.items())
PROVIDERS.identity_api.delete_group(group['id'])
self.assertRaises(exception.GroupNotFound,
@@ -908,8 +911,9 @@ class IdentityTests(object):
group['name'] = uuid.uuid4().hex
group_ref = PROVIDERS.identity_api.update_group(group['id'], group)
# after updating through identity api, get updated group
- self.assertDictContainsSubset(
- PROVIDERS.identity_api.get_group(group['id']), group_ref
+ self.assertLessEqual(
+ PROVIDERS.identity_api.get_group(group['id']).items(),
+ group_ref.items()
)
def test_create_duplicate_group_name_fails(self):
@@ -950,14 +954,14 @@ class IdentityTests(object):
user_ref = PROVIDERS.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = {x: user_ref[x] for x in user_ref}
- self.assertDictContainsSubset(user_dict, user_ref_dict)
+ self.assertLessEqual(user_dict.items(), user_ref_dict.items())
user_dict['password'] = uuid.uuid4().hex
PROVIDERS.identity_api.update_user(user['id'], user_dict)
user_ref = PROVIDERS.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = {x: user_ref[x] for x in user_ref}
- self.assertDictContainsSubset(user_dict, user_ref_dict)
+ self.assertLessEqual(user_dict.items(), user_ref_dict.items())
PROVIDERS.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
diff --git a/keystone/tests/unit/ksfixtures/__init__.py b/keystone/tests/unit/ksfixtures/__init__.py
index 7a92c42cd..a5fedbfc8 100644
--- a/keystone/tests/unit/ksfixtures/__init__.py
+++ b/keystone/tests/unit/ksfixtures/__init__.py
@@ -16,4 +16,6 @@ from keystone.tests.unit.ksfixtures.backendloader import BackendLoader # noqa
from keystone.tests.unit.ksfixtures.cache import Cache # noqa
from keystone.tests.unit.ksfixtures.jws_key_repository import JWSKeyRepository # noqa
from keystone.tests.unit.ksfixtures.key_repository import KeyRepository # noqa
+from keystone.tests.unit.ksfixtures.logging import StandardLogging # noqa
from keystone.tests.unit.ksfixtures.policy import Policy # noqa
+from keystone.tests.unit.ksfixtures.warnings import WarningsFixture # noqa
diff --git a/keystone/tests/unit/ksfixtures/logging.py b/keystone/tests/unit/ksfixtures/logging.py
new file mode 100644
index 000000000..419880deb
--- /dev/null
+++ b/keystone/tests/unit/ksfixtures/logging.py
@@ -0,0 +1,114 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging as std_logging
+import os
+
+import fixtures
+
+
+class NullHandler(std_logging.Handler):
+ """Custom default NullHandler to attempt to format the record.
+
+ Used to detect formatting errors in debug level logs without saving the
+ logs.
+ """
+
+ def handle(self, record):
+ self.format(record)
+
+ def emit(self, record):
+ pass
+
+ def createLock(self):
+ self.lock = None
+
+
+class StandardLogging(fixtures.Fixture):
+ """Setup Logging redirection for tests.
+
+ There are a number of things we want to handle with logging in tests:
+
+ * Redirect the logging to somewhere that we can test or dump it later.
+
+ * Ensure that as many DEBUG messages as possible are actually
+ executed, to ensure they are actually syntactically valid (they
+ often have not been).
+
+ * Ensure that we create useful output for tests that doesn't
+ overwhelm the testing system (which means we can't capture the
+ 100 MB of debug logging on every run).
+
+ To do this we create a logger fixture at the root level, which
+ defaults to INFO and create a NullLogger at DEBUG which lets
+ us execute log messages at DEBUG but not keep the output.
+
+ To support local debugging OS_DEBUG=True can be set in the
+ environment, which will print out the full debug logging.
+
+ There are also a set of overrides for particularly verbose
+ modules to be even less than INFO.
+ """
+
+ def setUp(self):
+ super().setUp()
+
+ # set root logger to debug
+ root = std_logging.getLogger()
+ root.setLevel(std_logging.DEBUG)
+
+ # supports collecting debug level for local runs
+ if os.environ.get('OS_DEBUG') in ('True', 'true', '1', 'yes'):
+ level = std_logging.DEBUG
+ else:
+ level = std_logging.INFO
+
+ # Collect logs
+ fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
+ self.logger = self.useFixture(
+ fixtures.FakeLogger(format=fs, level=None))
+ # TODO(sdague): why can't we send level through the fake
+ # logger? Tests prove that it breaks, but it's worth getting
+ # to the bottom of.
+ root.handlers[0].setLevel(level)
+
+ if level > std_logging.DEBUG:
+ # Just attempt to format debug level logs, but don't save them
+ handler = NullHandler()
+ self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
+ handler.setLevel(std_logging.DEBUG)
+
+ # Don't log every single DB migration step
+ std_logging.getLogger(
+ 'migrate.versioning.api').setLevel(std_logging.WARNING)
+ # Or alembic for model comparisons.
+ std_logging.getLogger('alembic').setLevel(std_logging.WARNING)
+ # Or oslo_db provisioning steps
+ std_logging.getLogger('oslo_db.sqlalchemy').setLevel(
+ std_logging.WARNING)
+
+ # At times we end up calling back into main() functions in
+ # testing. This has the possibility of calling logging.setup
+ # again, which completely unwinds the logging capture we've
+ # created here. Once we've setup the logging the way we want,
+ # disable the ability for the test to change this.
+ def fake_logging_setup(*args):
+ pass
+
+ self.useFixture(
+ fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
+
+ def delete_stored_logs(self):
+ # NOTE(gibi): this depends on the internals of the fixtures.FakeLogger.
+ # This could be enhanced once the PR
+ # https://github.com/testing-cabal/fixtures/pull/42 is released
+ self.logger._output.truncate(0)
diff --git a/keystone/tests/unit/ksfixtures/warnings.py b/keystone/tests/unit/ksfixtures/warnings.py
new file mode 100644
index 000000000..43519925f
--- /dev/null
+++ b/keystone/tests/unit/ksfixtures/warnings.py
@@ -0,0 +1,79 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import warnings
+
+import fixtures
+from sqlalchemy import exc as sqla_exc
+
+
+class WarningsFixture(fixtures.Fixture):
+ """Filters out warnings during test runs."""
+
+ def setUp(self):
+ super().setUp()
+
+ self._original_warning_filters = warnings.filters[:]
+
+ # NOTE(stephenfin): Make deprecation warnings only happen once.
+ # Otherwise this gets kind of crazy given the way that upstream python
+ # libs use this.
+ warnings.simplefilter('once', DeprecationWarning)
+
+ warnings.filterwarnings(
+ 'error',
+ category=DeprecationWarning,
+ module='^keystone\\.',
+ )
+
+ warnings.filterwarnings(
+ 'ignore',
+ message=(
+ 'Policy enforcement is depending on the value of '
+ '(token|group_ids). '
+ 'This key is deprecated. Please update your policy '
+ 'file to use the standard policy values.'
+ ),
+ )
+
+ # NOTE(stephenfin): Ignore scope check UserWarnings from oslo.policy.
+ warnings.filterwarnings(
+ 'ignore',
+ message="Policy .* failed scope check",
+ category=UserWarning,
+ )
+
+ # TODO(stephenfin): This will be fixed once we drop sqlalchemy-migrate
+ warnings.filterwarnings(
+ 'ignore',
+ category=DeprecationWarning,
+ message=r"Using function/method 'db_version\(\)' is deprecated",
+ )
+
+ # TODO(stephenfin): Remove these when we drop support for
+ # sqlalchemy-migrate
+ warnings.filterwarnings(
+ 'ignore',
+ category=sqla_exc.SADeprecationWarning,
+ module='migrate.versioning',
+ )
+
+ # TODO(stephenfin): We should filter on the specific RemovedIn20Warning
+ # warnings that affect us, so that we can slowly start addressing them
+ warnings.simplefilter('error', sqla_exc.SAWarning)
+ if hasattr(sqla_exc, 'RemovedIn20Warning'):
+ warnings.simplefilter('ignore', sqla_exc.RemovedIn20Warning)
+
+ self.addCleanup(self._reset_warning_filters)
+
+ def _reset_warning_filters(self):
+ warnings.filters[:] = self._original_warning_filters
diff --git a/keystone/tests/unit/policy/backends/test_base.py b/keystone/tests/unit/policy/backends/test_base.py
index e7f39a249..94f07b050 100644
--- a/keystone/tests/unit/policy/backends/test_base.py
+++ b/keystone/tests/unit/policy/backends/test_base.py
@@ -38,7 +38,7 @@ class DriverTestCase(object):
policies = self.driver.list_policies()
- self.assertItemsEqual([self.policy, another_policy], policies)
+ self.assertCountEqual([self.policy, another_policy], policies)
def test_get_policy(self):
self.assertEqual(self.policy,
diff --git a/keystone/tests/unit/resource/test_backends.py b/keystone/tests/unit/resource/test_backends.py
index 627789533..18935ccdc 100644
--- a/keystone/tests/unit/resource/test_backends.py
+++ b/keystone/tests/unit/resource/test_backends.py
@@ -847,12 +847,12 @@ class ResourceTests(object):
project = unit.new_project_ref(domain_id=domain['id'])
PROVIDERS.resource_api.create_project(project['id'], project)
project_ref = PROVIDERS.resource_api.get_project(project['id'])
- self.assertDictContainsSubset(project, project_ref)
+ self.assertLessEqual(project.items(), project_ref.items())
project['name'] = uuid.uuid4().hex
PROVIDERS.resource_api.update_project(project['id'], project)
project_ref = PROVIDERS.resource_api.get_project(project['id'])
- self.assertDictContainsSubset(project, project_ref)
+ self.assertLessEqual(project.items(), project_ref.items())
PROVIDERS.resource_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
@@ -1381,20 +1381,23 @@ class ResourceTests(object):
domain_id, updated_project_domain_ref
)
# Verify get_domain still returns the domain
- self.assertDictContainsSubset(
- domain_ref, PROVIDERS.resource_api.get_domain(domain_id))
+ self.assertLessEqual(
+ domain_ref.items(),
+ PROVIDERS.resource_api.get_domain(domain_id).items())
# Invalidate cache
PROVIDERS.resource_api.get_domain.invalidate(
PROVIDERS.resource_api, domain_id
)
# Verify get_domain returns the updated domain
- self.assertDictContainsSubset(
- updated_domain_ref, PROVIDERS.resource_api.get_domain(domain_id))
+ self.assertLessEqual(
+ updated_domain_ref.items(),
+ PROVIDERS.resource_api.get_domain(domain_id).items())
# Update the domain back to original ref, using the assignment api
# manager
PROVIDERS.resource_api.update_domain(domain_id, domain_ref)
- self.assertDictContainsSubset(
- domain_ref, PROVIDERS.resource_api.get_domain(domain_id))
+ self.assertLessEqual(
+ domain_ref.items(),
+ PROVIDERS.resource_api.get_domain(domain_id).items())
# Make sure domain is 'disabled', bypass resource api manager
project_domain_ref_disabled = project_domain_ref.copy()
project_domain_ref_disabled['enabled'] = False
@@ -1407,8 +1410,9 @@ class ResourceTests(object):
# Delete domain, bypassing resource api manager
PROVIDERS.resource_api.driver.delete_project(domain_id)
# Verify get_domain still returns the domain
- self.assertDictContainsSubset(
- domain_ref, PROVIDERS.resource_api.get_domain(domain_id))
+ self.assertLessEqual(
+ domain_ref.items(),
+ PROVIDERS.resource_api.get_domain(domain_id).items())
# Invalidate cache
PROVIDERS.resource_api.get_domain.invalidate(
PROVIDERS.resource_api, domain_id
@@ -1467,26 +1471,29 @@ class ResourceTests(object):
project_id, updated_project
)
# Verify get_project still returns the original project_ref
- self.assertDictContainsSubset(
- project, PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Invalidate cache
PROVIDERS.resource_api.get_project.invalidate(
PROVIDERS.resource_api, project_id
)
# Verify get_project now returns the new project
- self.assertDictContainsSubset(
- updated_project,
- PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ updated_project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Update project using the resource_api manager back to original
PROVIDERS.resource_api.update_project(project['id'], project)
# Verify get_project returns the original project_ref
- self.assertDictContainsSubset(
- project, PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Delete project bypassing resource
PROVIDERS.resource_api.driver.delete_project(project_id)
# Verify get_project still returns the project_ref
- self.assertDictContainsSubset(
- project, PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Invalidate cache
PROVIDERS.resource_api.get_project.invalidate(
PROVIDERS.resource_api, project_id
diff --git a/keystone/tests/unit/test_associate_project_endpoint_extension.py b/keystone/tests/unit/test_associate_project_endpoint_extension.py
index ad1d5275c..78673f21a 100644
--- a/keystone/tests/unit/test_associate_project_endpoint_extension.py
+++ b/keystone/tests/unit/test_associate_project_endpoint_extension.py
@@ -352,7 +352,7 @@ class EndpointFilterCRUDTestCase(EndpointFilterTestCase):
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
- self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+ self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list)
@unit.skip_if_cache_disabled('catalog')
def test_remove_endpoint_from_project_invalidates_cache(self):
@@ -381,7 +381,7 @@ class EndpointFilterCRUDTestCase(EndpointFilterTestCase):
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
self.assertEqual(2, len(catalog[0]['endpoints']))
- self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+ self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list)
# remove the endpoint2 from the default project, bypassing
# catalog_api API manager.
@@ -1340,7 +1340,7 @@ class EndpointGroupCRUDTestCase(EndpointFilterTestCase):
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
- self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+ self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list)
@unit.skip_if_cache_disabled('catalog')
def test_remove_endpoint_group_from_project_invalidates_cache(self):
@@ -1382,7 +1382,7 @@ class EndpointGroupCRUDTestCase(EndpointFilterTestCase):
ep_id_list = [catalog[0]['endpoints'][0]['id'],
catalog[0]['endpoints'][1]['id']]
- self.assertItemsEqual([self.endpoint_id, endpoint_id2], ep_id_list)
+ self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list)
# remove endpoint_group project association, bypassing
# catalog_api API manager.
diff --git a/keystone/tests/unit/test_backend_id_mapping_sql.py b/keystone/tests/unit/test_backend_id_mapping_sql.py
index baee34e99..7df4ad3d4 100644
--- a/keystone/tests/unit/test_backend_id_mapping_sql.py
+++ b/keystone/tests/unit/test_backend_id_mapping_sql.py
@@ -33,7 +33,7 @@ class SqlIDMappingTable(test_backend_sql.SqlModels):
def test_id_mapping(self):
cols = (('public_id', sql.String, 64),
('domain_id', sql.String, 64),
- ('local_id', sql.String, 64),
+ ('local_id', sql.String, 255),
('entity_type', sql.Enum, None))
self.assertExpectedSchema('id_mapping', cols)
@@ -169,6 +169,26 @@ class SqlIDMapping(test_backend_sql.SqlTests):
self.assertEqual(
public_id, PROVIDERS.id_mapping_api.get_public_id(local_entity))
+ def test_id_mapping_handles_ids_greater_than_64_characters(self):
+ initial_mappings = len(mapping_sql.list_id_mappings())
+ local_id = 'Aa' * 100
+ local_entity = {'domain_id': self.domainA['id'],
+ 'local_id': local_id,
+ 'entity_type': mapping.EntityType.GROUP}
+
+ # Check no mappings for the new local entity
+ self.assertIsNone(PROVIDERS.id_mapping_api.get_public_id(local_entity))
+
+ # Create the new mapping and then read it back
+ public_id = PROVIDERS.id_mapping_api.create_id_mapping(local_entity)
+ self.assertThat(mapping_sql.list_id_mappings(),
+ matchers.HasLength(initial_mappings + 1))
+ self.assertEqual(
+ public_id, PROVIDERS.id_mapping_api.get_public_id(local_entity))
+ self.assertEqual(
+ local_id,
+ PROVIDERS.id_mapping_api.get_id_mapping(public_id)['local_id'])
+
def test_delete_public_id_is_silent(self):
# Test that deleting an invalid public key is silent
PROVIDERS.id_mapping_api.delete_id_mapping(uuid.uuid4().hex)
@@ -368,7 +388,7 @@ class SqlIDMapping(test_backend_sql.SqlTests):
)
)
domain_a_mappings = [m.to_dict() for m in domain_a_mappings]
- self.assertItemsEqual(local_entities[:2], domain_a_mappings)
+ self.assertCountEqual(local_entities[:2], domain_a_mappings)
def test_get_domain_mapping_list_by_user_entity_type(self):
local_entities = self._prepare_domain_mappings_for_list()
@@ -384,7 +404,7 @@ class SqlIDMapping(test_backend_sql.SqlTests):
)
domain_b_mappings_user = [m.to_dict()
for m in domain_b_mappings_user]
- self.assertItemsEqual(local_entities[-2:], domain_b_mappings_user)
+ self.assertCountEqual(local_entities[-2:], domain_b_mappings_user)
def test_get_domain_mapping_list_by_group_entity_type(self):
local_entities = self._prepare_domain_mappings_for_list()
@@ -401,4 +421,4 @@ class SqlIDMapping(test_backend_sql.SqlTests):
)
)
domain_b_mappings_group = domain_b_mappings_group.first().to_dict()
- self.assertItemsEqual(local_entities[2], domain_b_mappings_group)
+ self.assertCountEqual(local_entities[2], domain_b_mappings_group)
diff --git a/keystone/tests/unit/test_backend_ldap.py b/keystone/tests/unit/test_backend_ldap.py
index adb354764..bd804d7e7 100644
--- a/keystone/tests/unit/test_backend_ldap.py
+++ b/keystone/tests/unit/test_backend_ldap.py
@@ -804,7 +804,7 @@ class BaseLDAPIdentity(LDAPTestSetup, IdentityTests, AssignmentTests,
del user_dict['password']
user_ref = PROVIDERS.identity_api.get_user(user['id'])
user_ref_dict = {x: user_ref[x] for x in user_ref}
- self.assertDictContainsSubset(user_dict, user_ref_dict)
+ self.assertLessEqual(user_dict.items(), user_ref_dict.items())
user_dict['password'] = uuid.uuid4().hex
PROVIDERS.identity_api.update_user(user['id'], user_dict)
@@ -814,7 +814,7 @@ class BaseLDAPIdentity(LDAPTestSetup, IdentityTests, AssignmentTests,
del user_dict['password']
user_ref = PROVIDERS.identity_api.get_user(user['id'])
user_ref_dict = {x: user_ref[x] for x in user_ref}
- self.assertDictContainsSubset(user_dict, user_ref_dict)
+ self.assertLessEqual(user_dict.items(), user_ref_dict.items())
# The group and domain CRUD tests below override the standard ones in
# unit.identity.test_backends.py so that we can exclude the update name
@@ -860,8 +860,9 @@ class BaseLDAPIdentity(LDAPTestSetup, IdentityTests, AssignmentTests,
PROVIDERS.identity_api.get_group(group['id'])
group['description'] = uuid.uuid4().hex
group_ref = PROVIDERS.identity_api.update_group(group['id'], group)
- self.assertDictContainsSubset(
- PROVIDERS.identity_api.get_group(group['id']), group_ref
+ self.assertLessEqual(
+ PROVIDERS.identity_api.get_group(group['id']).items(),
+ group_ref.items()
)
@unit.skip_if_cache_disabled('identity')
@@ -878,14 +879,15 @@ class BaseLDAPIdentity(LDAPTestSetup, IdentityTests, AssignmentTests,
PROVIDERS.identity_api.get_user(ref['id'])
# update using identity api and get back updated user.
user_updated = PROVIDERS.identity_api.update_user(ref['id'], user)
- self.assertDictContainsSubset(
- PROVIDERS.identity_api.get_user(ref['id']), user_updated
+ self.assertLessEqual(
+ PROVIDERS.identity_api.get_user(ref['id']).items(),
+ user_updated.items()
)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
PROVIDERS.identity_api.get_user_by_name(
ref['name'], ref['domain_id']
- ),
- user_updated
+ ).items(),
+ user_updated.items()
)
@unit.skip_if_cache_disabled('identity')
@@ -899,14 +901,15 @@ class BaseLDAPIdentity(LDAPTestSetup, IdentityTests, AssignmentTests,
)
user['description'] = uuid.uuid4().hex
user_updated = PROVIDERS.identity_api.update_user(ref['id'], user)
- self.assertDictContainsSubset(
- PROVIDERS.identity_api.get_user(ref['id']), user_updated
+ self.assertLessEqual(
+ PROVIDERS.identity_api.get_user(ref['id']).items(),
+ user_updated.items()
)
- self.assertDictContainsSubset(
+ self.assertLessEqual(
PROVIDERS.identity_api.get_user_by_name(
ref['name'], ref['domain_id']
- ),
- user_updated
+ ).items(),
+ user_updated.items()
)
def test_create_user_none_mapping(self):
@@ -1633,26 +1636,29 @@ class LDAPIdentity(BaseLDAPIdentity):
project_id, updated_project
)
# Verify get_project still returns the original project_ref
- self.assertDictContainsSubset(
- project, PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Invalidate cache
PROVIDERS.resource_api.get_project.invalidate(
PROVIDERS.resource_api, project_id
)
# Verify get_project now returns the new project
- self.assertDictContainsSubset(
- updated_project,
- PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ updated_project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Update project using the resource_api manager back to original
PROVIDERS.resource_api.update_project(project['id'], project)
# Verify get_project returns the original project_ref
- self.assertDictContainsSubset(
- project, PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Delete project bypassing resource_api
PROVIDERS.resource_api.driver.delete_project(project_id)
# Verify get_project still returns the project_ref
- self.assertDictContainsSubset(
- project, PROVIDERS.resource_api.get_project(project_id))
+ self.assertLessEqual(
+ project.items(),
+ PROVIDERS.resource_api.get_project(project_id).items())
# Invalidate cache
PROVIDERS.resource_api.get_project.invalidate(
PROVIDERS.resource_api, project_id
diff --git a/keystone/tests/unit/test_backend_sql.py b/keystone/tests/unit/test_backend_sql.py
index 7e53dce43..8738d3d68 100644
--- a/keystone/tests/unit/test_backend_sql.py
+++ b/keystone/tests/unit/test_backend_sql.py
@@ -244,7 +244,7 @@ class SqlModels(SqlTests):
else:
actual_schema.append((column.name, type(column.type), None))
- self.assertItemsEqual(expected_schema, actual_schema)
+ self.assertCountEqual(expected_schema, actual_schema)
def test_user_model(self):
cols = (('id', sql.String, 64),
@@ -1194,7 +1194,7 @@ class SqlCatalog(SqlTests, catalog_tests.CatalogTests):
)
self.assertThat(catalog_ref, matchers.HasLength(2))
srv_id_list = [catalog_ref[0]['id'], catalog_ref[1]['id']]
- self.assertItemsEqual([srv_1['id'], srv_2['id']], srv_id_list)
+ self.assertCountEqual([srv_1['id'], srv_2['id']], srv_id_list)
class SqlPolicy(SqlTests, policy_tests.PolicyTests):
diff --git a/keystone/tests/unit/test_backend_templated.py b/keystone/tests/unit/test_backend_templated.py
index 4b56b37b2..ad103eb1b 100644
--- a/keystone/tests/unit/test_backend_templated.py
+++ b/keystone/tests/unit/test_backend_templated.py
@@ -93,7 +93,7 @@ class TestTemplatedCatalog(unit.TestCase, catalog_tests.CatalogTests):
expected_endpoints = e.pop('endpoints')
observed_endpoints = o.pop('endpoints')
self.assertDictEqual(e, o)
- self.assertItemsEqual(expected_endpoints, observed_endpoints)
+ self.assertCountEqual(expected_endpoints, observed_endpoints)
def test_get_v3_catalog(self):
user_id = uuid.uuid4().hex
@@ -232,7 +232,7 @@ class TestTemplatedCatalog(unit.TestCase, catalog_tests.CatalogTests):
'enabled': True,
'name': "'Identity Service'",
'id': 'identity'}]
- self.assertItemsEqual(exp_services, services)
+ self.assertCountEqual(exp_services, services)
# NOTE(dstanek): the following methods have been overridden
# from unit.catalog.test_backends.CatalogTests.
diff --git a/keystone/tests/unit/test_cli.py b/keystone/tests/unit/test_cli.py
index e2c42ec8a..2f9bed064 100644
--- a/keystone/tests/unit/test_cli.py
+++ b/keystone/tests/unit/test_cli.py
@@ -25,7 +25,6 @@ import fixtures
import freezegun
import http.client
import oslo_config.fixture
-from oslo_db.sqlalchemy import migration
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_upgradecheck import upgradecheck
@@ -755,18 +754,28 @@ class CliDBSyncTestCase(unit.BaseTestCase):
self.version = None
def setUp(self):
- super(CliDBSyncTestCase, self).setUp()
+ super().setUp()
self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF))
self.config_fixture.register_cli_opt(cli.command_opt)
- upgrades.offline_sync_database_to_version = mock.Mock()
- upgrades.expand_schema = mock.Mock()
- upgrades.migrate_data = mock.Mock()
- upgrades.contract_schema = mock.Mock()
+
+ self.patchers = patchers = [
+ mock.patch.object(upgrades, "offline_sync_database_to_version"),
+ mock.patch.object(upgrades, "expand_schema"),
+ mock.patch.object(upgrades, "migrate_data"),
+ mock.patch.object(upgrades, "contract_schema"),
+ ]
+ for p in patchers:
+ p.start()
self.command_check = False
self.command_expand = False
self.command_migrate = False
self.command_contract = False
+ def tearDown(self):
+ for p in self.patchers:
+ p.stop()
+ super().tearDown()
+
def _assert_correct_call(self, mocked_function):
for func in [upgrades.offline_sync_database_to_version,
upgrades.expand_schema,
@@ -805,17 +814,6 @@ class CliDBSyncTestCase(unit.BaseTestCase):
cli.DbSync.main()
self._assert_correct_call(upgrades.contract_schema)
- @mock.patch('keystone.cmd.cli.upgrades.get_db_version')
- def test_db_sync_check_when_database_is_empty(self, mocked_get_db_version):
- e = migration.exception.DBMigrationError("Invalid version")
- mocked_get_db_version.side_effect = e
- checker = cli.DbSync()
-
- log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
- status = checker.check_db_sync_status()
- self.assertIn("not currently under version control", log_info.output)
- self.assertEqual(status, 2)
-
class TestMappingPopulate(unit.SQLDriverOverrides, unit.TestCase):
diff --git a/keystone/tests/unit/test_config.py b/keystone/tests/unit/test_config.py
index 634ccd9fe..321906b4f 100644
--- a/keystone/tests/unit/test_config.py
+++ b/keystone/tests/unit/test_config.py
@@ -41,35 +41,3 @@ class ConfigTestCase(unit.TestCase):
self.assertIsNone(CONF.auth.token)
# Check config.set_config_defaults() has set [profiler]enabled.
self.assertEqual(False, CONF.profiler.enabled)
-
-
-class DeprecatedTestCase(unit.TestCase):
- """Test using the original (deprecated) name for renamed options."""
-
- def config_files(self):
- config_files = super(DeprecatedTestCase, self).config_files()
- config_files.append(unit.dirs.tests_conf('deprecated.conf'))
- return config_files
-
- def test_sql(self):
- # Options in [sql] were moved to [database] in Icehouse for the change
- # to use oslo-incubator's db.sqlalchemy.sessions.
-
- self.assertEqual('sqlite://deprecated', CONF.database.connection)
- self.assertEqual(54321, CONF.database.idle_timeout)
-
-
-class DeprecatedOverrideTestCase(unit.TestCase):
- """Test using the deprecated AND new name for renamed options."""
-
- def config_files(self):
- config_files = super(DeprecatedOverrideTestCase, self).config_files()
- config_files.append(unit.dirs.tests_conf('deprecated_override.conf'))
- return config_files
-
- def test_sql(self):
- # Options in [sql] were moved to [database] in Icehouse for the change
- # to use oslo-incubator's db.sqlalchemy.sessions.
-
- self.assertEqual('sqlite://new', CONF.database.connection)
- self.assertEqual(65432, CONF.database.idle_timeout)
diff --git a/keystone/tests/unit/test_contrib_s3_core.py b/keystone/tests/unit/test_contrib_s3_core.py
index 90328938d..a9c8acd7c 100644
--- a/keystone/tests/unit/test_contrib_s3_core.py
+++ b/keystone/tests/unit/test_contrib_s3_core.py
@@ -121,9 +121,40 @@ class S3ContribCore(test_v3.RestfulTestCase):
self.assertIsNone(s3tokens.S3Resource._check_signature(
creds_ref, credentials))
+ def test_good_iam_signature_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
+ 'MTUwODI0L1JlZ2lvbk9uZS9pYW0vYXdzNF9yZXF1ZXN0CmYy'
+ 'MjE1NTgwZWViOWExNjczNTFiZDkzZTg2YzNiNmYwNGE5Mjhm'
+ 'NWM1NTIwYTM5MzVhNDUzNTQwYTA5NTY0YjU=',
+ 'signature':
+ 'db4e15b3040f6afaa9d9d16002de2fc3425b'
+ 'eea0c6ea8c1b2bb674f052030b7d'}
+
+ self.assertIsNone(s3tokens.S3Resource._check_signature(
+ creds_ref, credentials))
+
+ def test_good_sts_signature_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
+ 'MTUwODI0L1JlZ2lvbk9uZS9zdHMvYXdzNF9yZXF1ZXN0CmYy'
+ 'MjE1NTgwZWViOWExNjczNTFiZDkzZTg2YzNiNmYwNGE5Mjhm'
+ 'NWM1NTIwYTM5MzVhNDUzNTQwYTA5NTY0YjU=',
+ 'signature':
+ '3aa0b6f1414b92b2a32584068f83c6d09b7f'
+ 'daa11d4ea58912bbf1d8616ef56d'}
+
+ self.assertIsNone(s3tokens.S3Resource._check_signature(
+ creds_ref, credentials))
+
def test_bad_signature_v4(self):
creds_ref = {'secret':
u'e7a7a2240136494986991a6598d9fb9f'}
+ # the signature is wrong on an otherwise correctly formed token
credentials = {'token':
'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy'
@@ -135,6 +166,57 @@ class S3ContribCore(test_v3.RestfulTestCase):
s3tokens.S3Resource._check_signature,
creds_ref, credentials)
+ def test_bad_service_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ # use 'bad' as the service scope instead of a recognised service
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
+ 'MTUwODI0L1JlZ2lvbk9uZS9iYWQvYXdzNF9yZXF1ZXN0CmYy'
+ 'MjE1NTgwZWViOWExNjczNTFiZDkzZTg2YzNiNmYwNGE5Mjhm'
+ 'NWM1NTIwYTM5MzVhNDUzNTQwYTA5NTY0YjU=',
+ 'signature':
+ '1a2dec50eb1bba97887d1103c2ead6a39911'
+ '98c4be2537cf14d40b64cceb888b'}
+
+ self.assertRaises(exception.Unauthorized,
+ s3tokens.S3Resource._check_signature,
+ creds_ref, credentials)
+
+ def test_bad_signing_key_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ # signed with aws4_badrequest instead of aws4_request
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
+ 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy'
+ 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1'
+ 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==',
+ 'signature':
+ '52d02211a3767d00b2104ab28c9859003b0e'
+ '9c8735cd10de7975f3b1212cca41'}
+
+ self.assertRaises(exception.Unauthorized,
+ s3tokens.S3Resource._check_signature,
+ creds_ref, credentials)
+
+ def test_bad_short_scope_v4(self):
+ creds_ref = {'secret':
+ u'e7a7a2240136494986991a6598d9fb9f'}
+ # credential scope has too few parts, missing final /aws4_request
+ credentials = {'token':
+ 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw'
+ 'MTUwODI0L1JlZ2lvbk9uZS9zMwpmMjIxNTU4MGVlYjlhMTY3'
+ 'MzUxYmQ5M2U4NmMzYjZmMDRhOTI4ZjVjNTUyMGEzOTM1YTQ1'
+ 'MzU0MGEwOTU2NGI1',
+ 'signature':
+ '28a075f1ee41e96c431153914998443ff0f5'
+ '5fe93d31b37181f13ff4865942a2'}
+
+ self.assertRaises(exception.Unauthorized,
+ s3tokens.S3Resource._check_signature,
+ creds_ref, credentials)
+
def test_bad_token_v4(self):
creds_ref = {'secret':
u'e7a7a2240136494986991a6598d9fb9f'}
diff --git a/keystone/tests/unit/test_hacking_checks.py b/keystone/tests/unit/test_hacking_checks.py
index f7be762c0..9afe4d4e3 100644
--- a/keystone/tests/unit/test_hacking_checks.py
+++ b/keystone/tests/unit/test_hacking_checks.py
@@ -49,7 +49,7 @@ class BaseStyleCheck(unit.BaseTestCase):
def assert_has_errors(self, code, expected_errors=None):
actual_errors = [e[:3] for e in self.run_check(code)]
- self.assertItemsEqual(expected_errors or [], actual_errors)
+ self.assertCountEqual(expected_errors or [], actual_errors)
class TestCheckForMutableDefaultArgs(BaseStyleCheck):
diff --git a/keystone/tests/unit/test_policy.py b/keystone/tests/unit/test_policy.py
index d58ea6399..d0feec639 100644
--- a/keystone/tests/unit/test_policy.py
+++ b/keystone/tests/unit/test_policy.py
@@ -151,7 +151,7 @@ class PolicyScopeTypesEnforcementTestCase(unit.TestCase):
def test_warning_message_is_logged_if_enforce_scope_is_false(self):
self.config_fixture.config(group='oslo_policy', enforce_scope=False)
expected_msg = (
- 'Policy foo failed scope check. The token used to make the '
+ 'Policy "foo": "" failed scope check. The token used to make the '
'request was project scoped but the policy requires [\'system\'] '
'scope. This behavior may change in the future where using the '
'intended scope is required'
@@ -228,7 +228,7 @@ class PolicyJsonTestCase(unit.TestCase):
yield str(target)
doc_targets = list(read_doc_targets())
- self.assertItemsEqual(policy_keys, doc_targets + policy_rule_keys)
+ self.assertCountEqual(policy_keys, doc_targets + policy_rule_keys)
class GeneratePolicyFileTestCase(unit.TestCase):
diff --git a/keystone/tests/unit/test_sql_banned_operations.py b/keystone/tests/unit/test_sql_banned_operations.py
index 9916657da..2a9be1029 100644
--- a/keystone/tests/unit/test_sql_banned_operations.py
+++ b/keystone/tests/unit/test_sql_banned_operations.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-
import os
import fixtures
@@ -25,10 +24,9 @@ from oslotest import base as test_base
import sqlalchemy
import testtools
-from keystone.common.sql import contract_repo
-from keystone.common.sql import data_migration_repo
-from keystone.common.sql import expand_repo
-from keystone.common.sql import migrate_repo
+from keystone.common.sql.legacy_migrations import contract_repo
+from keystone.common.sql.legacy_migrations import data_migration_repo
+from keystone.common.sql.legacy_migrations import expand_repo
from keystone.common.sql import upgrades
@@ -39,9 +37,8 @@ class DBOperationNotAllowed(Exception):
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations."""
- def __init__(self, banned_ops=None,
- migration_repo=migrate_repo.__file__):
- super(BannedDBSchemaOperations, self).__init__()
+ def __init__(self, banned_ops, migration_repo):
+ super().__init__()
self._banned_ops = banned_ops or {}
self._migration_repo = migration_repo
@@ -54,7 +51,7 @@ class BannedDBSchemaOperations(fixtures.Fixture):
resource_op, repo_name))
def setUp(self):
- super(BannedDBSchemaOperations, self).setUp()
+ super().setUp()
explode_lambda = {
'Table.create': lambda *a, **k: self._explode(
'Table.create', self._migration_repo),
@@ -91,7 +88,9 @@ class TestBannedDBSchemaOperations(testtools.TestCase):
"""Test column operations raise DBOperationNotAllowed."""
column = sqlalchemy.Column()
with BannedDBSchemaOperations(
- banned_ops={'Column': ['create', 'alter', 'drop']}):
+ banned_ops={'Column': ['create', 'alter', 'drop']},
+ migration_repo=expand_repo.__file__,
+ ):
self.assertRaises(DBOperationNotAllowed, column.drop)
self.assertRaises(DBOperationNotAllowed, column.alter)
self.assertRaises(DBOperationNotAllowed, column.create)
@@ -100,8 +99,10 @@ class TestBannedDBSchemaOperations(testtools.TestCase):
"""Test table operations raise DBOperationNotAllowed."""
table = sqlalchemy.Table()
with BannedDBSchemaOperations(
- banned_ops={'Table': ['create', 'alter', 'drop',
- 'insert', 'update', 'delete']}):
+ banned_ops={'Table': ['create', 'alter', 'drop',
+ 'insert', 'update', 'delete']},
+ migration_repo=expand_repo.__file__,
+ ):
self.assertRaises(DBOperationNotAllowed, table.drop)
self.assertRaises(DBOperationNotAllowed, table.alter)
self.assertRaises(DBOperationNotAllowed, table.create)
@@ -113,35 +114,19 @@ class TestBannedDBSchemaOperations(testtools.TestCase):
class KeystoneMigrationsCheckers(test_migrations.WalkVersionsMixin):
"""Walk over and test all sqlalchemy-migrate migrations."""
- # NOTE(xek): We start requiring things be additive in Newton, so
- # ignore all migrations before the first version in Newton.
- migrate_file = migrate_repo.__file__
- first_version = 101
- # NOTE(henry-nash): We don't ban data modification in the legacy repo,
- # since there are already migrations that do this for Newton (and these
- # do not cause us issues, or are already worked around).
- banned_ops = {'Table': ['alter', 'drop'],
- 'Column': ['alter', 'drop']}
+ migrate_file = None
+ first_version = 1
+ # A mapping of entity (Table, Column, ...) to operation
+ banned_ops = {}
exceptions = [
# NOTE(xek): Reviewers: DO NOT ALLOW THINGS TO BE ADDED HERE UNLESS
# JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT CAUSE
# PROBLEMS FOR ROLLING UPGRADES.
-
- # Migration 102 drops the domain table in the Newton release. All
- # code that referenced the domain table was removed in the Mitaka
- # release, hence this migration will not cause problems when
- # running a mixture of Mitaka and Newton versions of keystone.
- 102,
-
- # Migration 106 simply allows the password column to be nullable.
- # This change would not impact a rolling upgrade.
- 106
]
@property
def INIT_VERSION(self):
- return upgrades.get_init_version(
- abs_path=os.path.abspath(os.path.dirname(self.migrate_file)))
+ return upgrades.INITIAL_VERSION
@property
def REPOSITORY(self):
@@ -190,8 +175,7 @@ class KeystoneMigrationsCheckers(test_migrations.WalkVersionsMixin):
else:
banned_ops = None
with BannedDBSchemaOperations(banned_ops, self.migrate_file):
- super(KeystoneMigrationsCheckers,
- self).migrate_up(version, with_data)
+ super().migrate_up(version, with_data)
snake_walk = False
downgrade = False
@@ -200,43 +184,7 @@ class KeystoneMigrationsCheckers(test_migrations.WalkVersionsMixin):
self.walk_versions(self.snake_walk, self.downgrade)
-class TestKeystoneMigrationsMySQL(
- KeystoneMigrationsCheckers,
- db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
- def setUp(self):
- super(TestKeystoneMigrationsMySQL, self).setUp()
- self.engine = enginefacade.writer.get_engine()
- self.sessionmaker = enginefacade.writer.get_sessionmaker()
-
-
-class TestKeystoneMigrationsPostgreSQL(
- KeystoneMigrationsCheckers,
- db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
- def setUp(self):
- super(TestKeystoneMigrationsPostgreSQL, self).setUp()
- self.engine = enginefacade.writer.get_engine()
- self.sessionmaker = enginefacade.writer.get_sessionmaker()
-
-
-class TestKeystoneMigrationsSQLite(
- KeystoneMigrationsCheckers,
- db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase):
-
- def setUp(self):
- super(TestKeystoneMigrationsSQLite, self).setUp()
- self.engine = enginefacade.writer.get_engine()
- self.sessionmaker = enginefacade.writer.get_sessionmaker()
-
-
-class TestKeystoneExpandSchemaMigrations(
- KeystoneMigrationsCheckers):
+class TestKeystoneExpandSchemaMigrations(KeystoneMigrationsCheckers):
migrate_file = expand_repo.__file__
first_version = 1
@@ -263,7 +211,12 @@ class TestKeystoneExpandSchemaMigrations(
# timestamp to datetime and updates the initial value in the contract
# phase. Adding an exception here to pass expand banned tests,
# otherwise fails.
- 4
+ 4,
+
+ # Migration 79 changes a varchar column length, doesn't
+ # convert the data within that column/table and doesn't rebuild
+ # indexes.
+ 79
]
def setUp(self):
@@ -280,7 +233,6 @@ class TestKeystoneExpandSchemaMigrationsMySQL(
super(TestKeystoneExpandSchemaMigrationsMySQL, self).setUp()
self.engine = enginefacade.writer.get_engine()
self.sessionmaker = enginefacade.writer.get_sessionmaker()
- self.migrate_fully(migrate_repo.__file__)
class TestKeystoneExpandSchemaMigrationsPostgreSQL(
@@ -293,7 +245,6 @@ class TestKeystoneExpandSchemaMigrationsPostgreSQL(
super(TestKeystoneExpandSchemaMigrationsPostgreSQL, self).setUp()
self.engine = enginefacade.writer.get_engine()
self.sessionmaker = enginefacade.writer.get_sessionmaker()
- self.migrate_fully(migrate_repo.__file__)
class TestKeystoneDataMigrations(
@@ -321,7 +272,6 @@ class TestKeystoneDataMigrations(
def setUp(self):
super(TestKeystoneDataMigrations, self).setUp()
- self.migrate_fully(migrate_repo.__file__)
self.migrate_fully(expand_repo.__file__)
@@ -382,7 +332,6 @@ class TestKeystoneContractSchemaMigrations(
def setUp(self):
super(TestKeystoneContractSchemaMigrations, self).setUp()
- self.migrate_fully(migrate_repo.__file__)
self.migrate_fully(expand_repo.__file__)
self.migrate_fully(data_migration_repo.__file__)
diff --git a/keystone/tests/unit/test_sql_upgrade.py b/keystone/tests/unit/test_sql_upgrade.py
index 50c28707a..78f644977 100644
--- a/keystone/tests/unit/test_sql_upgrade.py
+++ b/keystone/tests/unit/test_sql_upgrade.py
@@ -11,6 +11,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
"""
Test for SQL migration extensions.
@@ -25,12 +26,12 @@ To run these tests against a live database:
3. Run the tests using::
- tox -e py27 -- keystone.tests.unit.test_sql_upgrade
+ tox -e py39 -- keystone.tests.unit.test_sql_upgrade
For further information, see `oslo.db documentation
<https://docs.openstack.org/oslo.db/latest/contributor/index.html#how-to-run-unit-tests>`_.
-WARNING::
+.. warning::
Your database will be wiped.
@@ -38,35 +39,25 @@ WARNING::
all data will be lost.
"""
-import datetime
import glob
-import json
import os
-from unittest import mock
-import uuid
import fixtures
-import migrate
-from migrate.versioning import repository
+from migrate.versioning import api as migrate_api
from migrate.versioning import script
from oslo_db import exception as db_exception
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures as db_fixtures
+from oslo_log import fixture as log_fixture
from oslo_log import log
-from oslo_serialization import jsonutils
from oslotest import base as test_base
-import pytz
import sqlalchemy.exc
-from sqlalchemy import inspect
-from testtools import matchers
from keystone.cmd import cli
from keystone.common import sql
from keystone.common.sql import upgrades
from keystone.credential.providers import fernet as credential_fernet
-from keystone.resource.backends import base as resource_base
from keystone.tests import unit
-from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import database
@@ -75,11 +66,12 @@ from keystone.tests.unit.ksfixtures import database
# is done to mirror the expected structure of the DB in the format of
# { <DB_TABLE_NAME>: [<COLUMN>, <COLUMN>, ...], ... }
INITIAL_TABLE_STRUCTURE = {
- 'credential': [
- 'id', 'user_id', 'project_id', 'blob', 'type', 'extra',
+ 'config_register': [
+ 'type', 'domain_id',
],
- 'domain': [
- 'id', 'name', 'enabled', 'extra',
+ 'credential': [
+ 'id', 'user_id', 'project_id', 'type', 'extra', 'key_hash',
+ 'encrypted_blob',
],
'endpoint': [
'id', 'legacy_endpoint_id', 'interface', 'region_id', 'service_id',
@@ -93,10 +85,19 @@ INITIAL_TABLE_STRUCTURE = {
],
'project': [
'id', 'name', 'extra', 'description', 'enabled', 'domain_id',
- 'parent_id',
+ 'parent_id', 'is_domain',
+ ],
+ 'project_option': [
+ 'project_id', 'option_id', 'option_value',
+ ],
+ 'project_tag': [
+ 'project_id', 'name',
],
'role': [
- 'id', 'name', 'extra',
+ 'id', 'name', 'extra', 'domain_id', 'description',
+ ],
+ 'role_option': [
+ 'role_id', 'option_id', 'option_value',
],
'service': [
'id', 'type', 'extra', 'enabled',
@@ -107,13 +108,17 @@ INITIAL_TABLE_STRUCTURE = {
'trust': [
'id', 'trustor_user_id', 'trustee_user_id', 'project_id',
'impersonation', 'deleted_at', 'expires_at', 'remaining_uses', 'extra',
+ 'expires_at_int', 'redelegated_trust_id', 'redelegation_count',
],
'trust_role': [
'trust_id', 'role_id',
],
'user': [
- 'id', 'name', 'extra', 'password', 'enabled', 'domain_id',
- 'default_project_id',
+ 'id', 'extra', 'enabled', 'default_project_id', 'created_at',
+ 'last_active_at', 'domain_id',
+ ],
+ 'user_option': [
+ 'user_id', 'option_id', 'option_value',
],
'user_group_membership': [
'user_id', 'group_id',
@@ -133,77 +138,152 @@ INITIAL_TABLE_STRUCTURE = {
'sensitive_config': [
'domain_id', 'group', 'option', 'value',
],
+ 'policy_association': [
+ 'id', 'policy_id', 'endpoint_id', 'service_id', 'region_id',
+ ],
+ 'identity_provider': [
+ 'id', 'enabled', 'description', 'domain_id', 'authorization_ttl',
+ ],
+ 'federation_protocol': [
+ 'id', 'idp_id', 'mapping_id', 'remote_id_attribute',
+ ],
+ 'mapping': [
+ 'id', 'rules',
+ ],
+ 'service_provider': [
+ 'auth_url', 'id', 'enabled', 'description', 'sp_url',
+ 'relay_state_prefix',
+ ],
+ 'idp_remote_ids': [
+ 'idp_id', 'remote_id',
+ ],
+ 'consumer': [
+ 'id', 'description', 'secret', 'extra',
+ ],
+ 'request_token': [
+ 'id', 'request_secret', 'verifier', 'authorizing_user_id',
+ 'requested_project_id', 'role_ids', 'consumer_id', 'expires_at',
+ ],
+ 'access_token': [
+ 'id', 'access_secret', 'authorizing_user_id', 'project_id', 'role_ids',
+ 'consumer_id', 'expires_at',
+ ],
+ 'revocation_event': [
+ 'id', 'domain_id', 'project_id', 'user_id', 'role_id', 'trust_id',
+ 'consumer_id', 'access_token_id', 'issued_before', 'expires_at',
+ 'revoked_at', 'audit_id', 'audit_chain_id',
+ ],
+ 'project_endpoint': [
+ 'endpoint_id', 'project_id'
+ ],
+ 'endpoint_group': [
+ 'id', 'name', 'description', 'filters',
+ ],
+ 'project_endpoint_group': [
+ 'endpoint_group_id', 'project_id',
+ ],
+ 'implied_role': [
+ 'prior_role_id', 'implied_role_id',
+ ],
+ 'local_user': [
+ 'id', 'user_id', 'domain_id', 'name', 'failed_auth_count',
+ 'failed_auth_at',
+ ],
+ 'password': [
+ 'id', 'local_user_id', 'created_at', 'expires_at',
+ 'self_service', 'password_hash', 'created_at_int', 'expires_at_int',
+ ],
+ 'federated_user': [
+ 'id', 'user_id', 'idp_id', 'protocol_id', 'unique_id', 'display_name',
+ ],
+ 'nonlocal_user': [
+ 'domain_id', 'name', 'user_id',
+ ],
+ 'system_assignment': [
+ 'type', 'actor_id', 'target_id', 'role_id', 'inherited',
+ ],
+ 'registered_limit': [
+ 'internal_id', 'id', 'service_id', 'region_id', 'resource_name',
+ 'default_limit', 'description',
+ ],
+ 'limit': [
+ 'internal_id', 'id', 'project_id', 'resource_limit', 'description',
+ 'registered_limit_id', 'domain_id',
+ ],
+ 'application_credential': [
+ 'internal_id', 'id', 'name', 'secret_hash', 'description', 'user_id',
+ 'project_id', 'expires_at', 'system', 'unrestricted',
+ ],
+ 'application_credential_role': [
+ 'application_credential_id', 'role_id',
+ ],
+ 'access_rule': [
+ 'id', 'service', 'path', 'method', 'external_id', 'user_id',
+ ],
+ 'application_credential_access_rule': [
+ 'application_credential_id', 'access_rule_id',
+ ],
+ 'expiring_user_group_membership': [
+ 'user_id', 'group_id', 'idp_id', 'last_verified',
+ ],
}
-LEGACY_REPO = 'migrate_repo'
-EXPAND_REPO = 'expand_repo'
-DATA_MIGRATION_REPO = 'data_migration_repo'
-CONTRACT_REPO = 'contract_repo'
-
-
-# Test upgrades.get_init_version separately to ensure it works before
-# using in the SqlUpgrade tests.
-class SqlUpgradeGetInitVersionTests(unit.TestCase):
- @mock.patch.object(repository, 'Repository')
- def test_get_init_version_no_path(self, repo):
- migrate_versions = mock.MagicMock()
- # make a version list starting with zero. `get_init_version` will
- # return None for this value.
- migrate_versions.versions.versions = list(range(0, 5))
- repo.return_value = migrate_versions
-
- # os.path.isdir() is called by `find_repo()`. Mock it to avoid
- # an exception.
- with mock.patch('os.path.isdir', return_value=True):
- # since 0 is the smallest version expect None
- version = upgrades.get_init_version()
- self.assertIsNone(version)
-
- # check that the default path was used as the first argument to the
- # first invocation of repo. Cannot match the full path because it is
- # based on where the test is run.
- param = repo.call_args_list[0][0][0]
- self.assertTrue(param.endswith('/sql/' + LEGACY_REPO))
-
- @mock.patch.object(repository, 'Repository')
- def test_get_init_version_with_path_initial_version_0(self, repo):
- migrate_versions = mock.MagicMock()
- # make a version list starting with zero. `get_init_version` will
- # return None for this value.
- migrate_versions.versions.versions = list(range(0, 5))
- repo.return_value = migrate_versions
-
- # os.path.isdir() is called by `find_repo()`. Mock it to avoid
- # an exception.
- with mock.patch('os.path.isdir', return_value=True):
- path = '/keystone/' + LEGACY_REPO + '/'
-
- # since 0 is the smallest version expect None
- version = upgrades.get_init_version(abs_path=path)
- self.assertIsNone(version)
-
- @mock.patch.object(repository, 'Repository')
- def test_get_init_version_with_path(self, repo):
- initial_version = 10
-
- migrate_versions = mock.MagicMock()
- migrate_versions.versions.versions = list(range(initial_version + 1,
- initial_version + 5))
- repo.return_value = migrate_versions
-
- # os.path.isdir() is called by `find_repo()`. Mock it to avoid
- # an exception.
- with mock.patch('os.path.isdir', return_value=True):
- path = '/keystone/' + LEGACY_REPO + '/'
-
- version = upgrades.get_init_version(abs_path=path)
- self.assertEqual(initial_version, version)
-
-
-class SqlMigrateBase(db_fixtures.OpportunisticDBTestMixin,
- test_base.BaseTestCase):
+
+class Repository:
+
+ def __init__(self, engine, repo_name):
+ self.repo_name = repo_name
+
+ self.repo_path = upgrades._get_migrate_repo_path(self.repo_name)
+ self.min_version = upgrades.INITIAL_VERSION
+ self.schema_ = migrate_api.ControlledSchema.create(
+ engine, self.repo_path, self.min_version,
+ )
+ self.max_version = self.schema_.repository.version().version
+
+ def upgrade(self, version=None, current_schema=None):
+ version = version or self.max_version
+ err = ''
+ upgrade = True
+ version = migrate_api._migrate_version(
+ self.schema_, version, upgrade, err,
+ )
+ upgrades._validate_upgrade_order(
+ self.repo_name, target_repo_version=version,
+ )
+ if not current_schema:
+ current_schema = self.schema_
+ changeset = current_schema.changeset(version)
+ for ver, change in changeset:
+ self.schema_.runchange(ver, change, changeset.step)
+
+ if self.schema_.version != version:
+ raise Exception(
+ 'Actual version (%s) of %s does not equal expected '
+ 'version (%s)' % (
+ self.schema_.version, self.repo_name, version,
+ ),
+ )
+
+ @property
+ def version(self):
+ with sql.session_for_read() as session:
+ return upgrades._migrate_db_version(
+ session.get_bind(), self.repo_path, self.min_version,
+ )
+
+
+class MigrateBase(
+ db_fixtures.OpportunisticDBTestMixin,
+ test_base.BaseTestCase,
+):
def setUp(self):
- super(SqlMigrateBase, self).setUp()
+ super().setUp()
+
+ self.useFixture(log_fixture.get_logging_handle_error_fixture())
+ self.stdlog = self.useFixture(ksfixtures.StandardLogging())
+ self.useFixture(ksfixtures.WarningsFixture())
+
self.engine = enginefacade.writer.get_engine()
self.sessionmaker = enginefacade.writer.get_sessionmaker()
@@ -225,27 +305,28 @@ class SqlMigrateBase(db_fixtures.OpportunisticDBTestMixin,
self.addCleanup(sql.cleanup)
self.repos = {
- LEGACY_REPO: upgrades.Repository(self.engine, LEGACY_REPO),
- EXPAND_REPO: upgrades.Repository(self.engine, EXPAND_REPO),
- DATA_MIGRATION_REPO: upgrades.Repository(
- self.engine, DATA_MIGRATION_REPO),
- CONTRACT_REPO: upgrades.Repository(self.engine, CONTRACT_REPO)}
-
- def upgrade(self, *args, **kwargs):
- """Upgrade the legacy migration repository."""
- self.repos[LEGACY_REPO].upgrade(*args, **kwargs)
+ upgrades.EXPAND_BRANCH: Repository(
+ self.engine, upgrades.EXPAND_BRANCH,
+ ),
+ upgrades.DATA_MIGRATION_BRANCH: Repository(
+ self.engine, upgrades.DATA_MIGRATION_BRANCH,
+ ),
+ upgrades.CONTRACT_BRANCH: Repository(
+ self.engine, upgrades.CONTRACT_BRANCH,
+ ),
+ }
def expand(self, *args, **kwargs):
"""Expand database schema."""
- self.repos[EXPAND_REPO].upgrade(*args, **kwargs)
+ self.repos[upgrades.EXPAND_BRANCH].upgrade(*args, **kwargs)
def migrate(self, *args, **kwargs):
"""Migrate data."""
- self.repos[DATA_MIGRATION_REPO].upgrade(*args, **kwargs)
+ self.repos[upgrades.DATA_MIGRATION_BRANCH].upgrade(*args, **kwargs)
def contract(self, *args, **kwargs):
"""Contract database schema."""
- self.repos[CONTRACT_REPO].upgrade(*args, **kwargs)
+ self.repos[upgrades.CONTRACT_BRANCH].upgrade(*args, **kwargs)
@property
def metadata(self):
@@ -253,17 +334,9 @@ class SqlMigrateBase(db_fixtures.OpportunisticDBTestMixin,
return sqlalchemy.MetaData(self.engine)
def load_table(self, name):
- table = sqlalchemy.Table(name,
- self.metadata,
- autoload=True)
+ table = sqlalchemy.Table(name, self.metadata, autoload=True)
return table
- def assertTableExists(self, table_name):
- try:
- self.load_table(table_name)
- except sqlalchemy.exc.NoSuchTableError:
- raise AssertionError('Table "%s" does not exist' % table_name)
-
def assertTableDoesNotExist(self, table_name):
"""Assert that a given table exists cannot be selected by name."""
# Switch to a different metadata otherwise you might still
@@ -275,1301 +348,81 @@ class SqlMigrateBase(db_fixtures.OpportunisticDBTestMixin,
else:
raise AssertionError('Table "%s" already exists' % table_name)
- def calc_table_row_count(self, table_name):
- """Return the number of rows in the table."""
- t = sqlalchemy.Table(table_name, self.metadata, autoload=True)
- session = self.sessionmaker()
- row_count = session.query(
- sqlalchemy.func.count('*')).select_from(t).scalar()
- return row_count
-
- def assertTableCountsMatch(self, table1_name, table2_name):
- table1_count = self.calc_table_row_count(table1_name)
- table2_count = self.calc_table_row_count(table2_name)
- if table1_count != table2_count:
- raise AssertionError('Table counts do not match: {0} ({1}), {2} '
- '({3})'.format(table1_name, table1_count,
- table2_name, table2_count))
-
def assertTableColumns(self, table_name, expected_cols):
"""Assert that the table contains the expected set of columns."""
table = self.load_table(table_name)
actual_cols = [col.name for col in table.columns]
# Check if the columns are equal, but allow for a different order,
# which might occur after an upgrade followed by a downgrade
- self.assertItemsEqual(expected_cols, actual_cols,
+ self.assertCountEqual(expected_cols, actual_cols,
'%s table' % table_name)
- def insert_dict(self, session, table_name, d, table=None):
- """Naively inserts key-value pairs into a table, given a dictionary."""
- if table is None:
- this_table = sqlalchemy.Table(table_name, self.metadata,
- autoload=True)
- else:
- this_table = table
- insert = this_table.insert().values(**d)
- session.execute(insert)
-
- def does_pk_exist(self, table, pk_column):
- """Check whether a column is primary key on a table."""
- inspector = inspect(self.engine)
- pk_columns = inspector.get_pk_constraint(table)['constrained_columns']
-
- return pk_column in pk_columns
-
- def does_fk_exist(self, table, fk_column):
- inspector = inspect(self.engine)
- for fk in inspector.get_foreign_keys(table):
- if fk_column in fk['constrained_columns']:
- return True
- return False
-
- def does_constraint_exist(self, table_name, constraint_name):
- table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
- return constraint_name in [con.name for con in table.constraints]
-
- def does_index_exist(self, table_name, index_name):
- table = sqlalchemy.Table(table_name, self.metadata, autoload=True)
- return index_name in [idx.name for idx in table.indexes]
-
- def does_unique_constraint_exist(self, table_name, column_names):
- inspector = inspect(self.engine)
- constraints = inspector.get_unique_constraints(table_name)
- for c in constraints:
- if (len(c['column_names']) == 1 and
- column_names in c['column_names']):
- return True
- if (len(c['column_names'])) > 1 and isinstance(column_names, list):
- return set(c['column_names']) == set(column_names)
- return False
-
-
-class SqlLegacyRepoUpgradeTests(SqlMigrateBase):
- def test_blank_db_to_start(self):
- self.assertTableDoesNotExist('user')
+
+class ExpandSchemaUpgradeTests(MigrateBase):
def test_start_version_db_init_version(self):
self.assertEqual(
- self.repos[LEGACY_REPO].min_version,
- self.repos[LEGACY_REPO].version,
- 'DB is not at version %s' % (
- self.repos[LEGACY_REPO].min_version)
- )
+ self.repos[upgrades.EXPAND_BRANCH].min_version,
+ self.repos[upgrades.EXPAND_BRANCH].version)
+
+ def test_blank_db_to_start(self):
+ self.assertTableDoesNotExist('user')
def test_upgrade_add_initial_tables(self):
- self.upgrade(self.repos[LEGACY_REPO].min_version + 1)
+ self.expand(upgrades.INITIAL_VERSION + 1)
self.check_initial_table_structure()
def check_initial_table_structure(self):
for table in INITIAL_TABLE_STRUCTURE:
self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table])
- def test_kilo_squash(self):
- self.upgrade(67)
-
- # In 053 the size of ID and parent region ID columns were changed
- table = sqlalchemy.Table('region', self.metadata, autoload=True)
- self.assertEqual(255, table.c.id.type.length)
- self.assertEqual(255, table.c.parent_region_id.type.length)
- table = sqlalchemy.Table('endpoint', self.metadata, autoload=True)
- self.assertEqual(255, table.c.region_id.type.length)
-
- # In 054 an index was created for the actor_id of the assignment table
- table = sqlalchemy.Table('assignment', self.metadata, autoload=True)
- index_data = [(idx.name, list(idx.columns.keys()))
- for idx in table.indexes]
- self.assertIn(('ix_actor_id', ['actor_id']), index_data)
-
- # In 055 indexes were created for user and trust IDs in the token table
- table = sqlalchemy.Table('token', self.metadata, autoload=True)
- index_data = [(idx.name, list(idx.columns.keys()))
- for idx in table.indexes]
- self.assertIn(('ix_token_user_id', ['user_id']), index_data)
- self.assertIn(('ix_token_trust_id', ['trust_id']), index_data)
-
- # In 062 the role ID foreign key was removed from the assignment table
- if self.engine.name == "mysql":
- self.assertFalse(self.does_fk_exist('assignment', 'role_id'))
-
- # In 064 the domain ID FK was removed from the group and user tables
- if self.engine.name != 'sqlite':
- # sqlite does not support FK deletions (or enforcement)
- self.assertFalse(self.does_fk_exist('group', 'domain_id'))
- self.assertFalse(self.does_fk_exist('user', 'domain_id'))
-
- # In 067 the role ID index was removed from the assignment table
- if self.engine.name == "mysql":
- self.assertFalse(self.does_index_exist('assignment',
- 'assignment_role_id_fkey'))
-
- def test_insert_assignment_inherited_pk(self):
- ASSIGNMENT_TABLE_NAME = 'assignment'
- INHERITED_COLUMN_NAME = 'inherited'
- ROLE_TABLE_NAME = 'role'
-
- self.upgrade(72)
-
- # Check that the 'inherited' column is not part of the PK
- self.assertFalse(self.does_pk_exist(ASSIGNMENT_TABLE_NAME,
- INHERITED_COLUMN_NAME))
-
- session = self.sessionmaker()
-
- role = {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex}
- self.insert_dict(session, ROLE_TABLE_NAME, role)
-
- # Create both inherited and noninherited role assignments
- inherited = {'type': 'UserProject',
- 'actor_id': uuid.uuid4().hex,
- 'target_id': uuid.uuid4().hex,
- 'role_id': role['id'],
- 'inherited': True}
-
- noninherited = inherited.copy()
- noninherited['inherited'] = False
-
- # Create another inherited role assignment as a spoiler
- spoiler = inherited.copy()
- spoiler['actor_id'] = uuid.uuid4().hex
-
- self.insert_dict(session, ASSIGNMENT_TABLE_NAME, inherited)
- self.insert_dict(session, ASSIGNMENT_TABLE_NAME, spoiler)
-
- # Since 'inherited' is not part of the PK, we can't insert noninherited
- self.assertRaises(db_exception.DBDuplicateEntry,
- self.insert_dict,
- session,
- ASSIGNMENT_TABLE_NAME,
- noninherited)
-
- session.close()
-
- self.upgrade(73)
-
- session = self.sessionmaker()
-
- # Check that the 'inherited' column is now part of the PK
- self.assertTrue(self.does_pk_exist(ASSIGNMENT_TABLE_NAME,
- INHERITED_COLUMN_NAME))
-
- # The noninherited role assignment can now be inserted
- self.insert_dict(session, ASSIGNMENT_TABLE_NAME, noninherited)
-
- assignment_table = sqlalchemy.Table(ASSIGNMENT_TABLE_NAME,
- self.metadata,
- autoload=True)
-
- assignments = session.query(assignment_table).all()
- for assignment in (inherited, spoiler, noninherited):
- self.assertIn((assignment['type'], assignment['actor_id'],
- assignment['target_id'], assignment['role_id'],
- assignment['inherited']),
- assignments)
-
- def test_endpoint_policy_upgrade(self):
- self.assertTableDoesNotExist('policy_association')
- self.upgrade(81)
- self.assertTableColumns('policy_association',
- ['id', 'policy_id', 'endpoint_id',
- 'service_id', 'region_id'])
-
- @mock.patch.object(upgrades, 'get_db_version', return_value=1)
- def test_endpoint_policy_already_migrated(self, mock_ep):
-
- # By setting the return value to 1, the migration has already been
- # run, and there's no need to create the table again
-
- self.upgrade(81)
-
- mock_ep.assert_called_once_with(extension='endpoint_policy',
- engine=mock.ANY)
-
- # It won't exist because we are mocking it, but we can verify
- # that 081 did not create the table
- self.assertTableDoesNotExist('policy_association')
-
- def test_create_federation_tables(self):
- self.identity_provider = 'identity_provider'
- self.federation_protocol = 'federation_protocol'
- self.service_provider = 'service_provider'
- self.mapping = 'mapping'
- self.remote_ids = 'idp_remote_ids'
-
- self.assertTableDoesNotExist(self.identity_provider)
- self.assertTableDoesNotExist(self.federation_protocol)
- self.assertTableDoesNotExist(self.service_provider)
- self.assertTableDoesNotExist(self.mapping)
- self.assertTableDoesNotExist(self.remote_ids)
-
- self.upgrade(82)
- self.assertTableColumns(self.identity_provider,
- ['id', 'description', 'enabled'])
-
- self.assertTableColumns(self.federation_protocol,
- ['id', 'idp_id', 'mapping_id'])
-
- self.assertTableColumns(self.mapping,
- ['id', 'rules'])
-
- self.assertTableColumns(self.service_provider,
- ['id', 'description', 'enabled', 'auth_url',
- 'relay_state_prefix', 'sp_url'])
-
- self.assertTableColumns(self.remote_ids, ['idp_id', 'remote_id'])
-
- federation_protocol = sqlalchemy.Table(self.federation_protocol,
- self.metadata,
- autoload=True)
- self.assertFalse(federation_protocol.c.mapping_id.nullable)
-
- sp_table = sqlalchemy.Table(self.service_provider,
- self.metadata,
- autoload=True)
- self.assertFalse(sp_table.c.auth_url.nullable)
- self.assertFalse(sp_table.c.sp_url.nullable)
-
- @mock.patch.object(upgrades, 'get_db_version', return_value=8)
- def test_federation_already_migrated(self, mock_federation):
-
- # By setting the return value to 8, the migration has already been
- # run, and there's no need to create the table again.
- self.upgrade(82)
-
- mock_federation.assert_any_call(extension='federation',
- engine=mock.ANY)
-
- # It won't exist because we are mocking it, but we can verify
- # that 082 did not create the table.
- self.assertTableDoesNotExist('identity_provider')
- self.assertTableDoesNotExist('federation_protocol')
- self.assertTableDoesNotExist('mapping')
- self.assertTableDoesNotExist('service_provider')
- self.assertTableDoesNotExist('idp_remote_ids')
-
- def test_create_oauth_tables(self):
- consumer = 'consumer'
- request_token = 'request_token'
- access_token = 'access_token'
- self.assertTableDoesNotExist(consumer)
- self.assertTableDoesNotExist(request_token)
- self.assertTableDoesNotExist(access_token)
- self.upgrade(83)
- self.assertTableColumns(consumer,
- ['id',
- 'description',
- 'secret',
- 'extra'])
- self.assertTableColumns(request_token,
- ['id',
- 'request_secret',
- 'verifier',
- 'authorizing_user_id',
- 'requested_project_id',
- 'role_ids',
- 'consumer_id',
- 'expires_at'])
- self.assertTableColumns(access_token,
- ['id',
- 'access_secret',
- 'authorizing_user_id',
- 'project_id',
- 'role_ids',
- 'consumer_id',
- 'expires_at'])
-
- @mock.patch.object(upgrades, 'get_db_version', return_value=5)
- def test_oauth1_already_migrated(self, mock_oauth1):
-
- # By setting the return value to 5, the migration has already been
- # run, and there's no need to create the table again.
- self.upgrade(83)
-
- mock_oauth1.assert_any_call(extension='oauth1', engine=mock.ANY)
-
- # It won't exist because we are mocking it, but we can verify
- # that 083 did not create the table.
- self.assertTableDoesNotExist('consumer')
- self.assertTableDoesNotExist('request_token')
- self.assertTableDoesNotExist('access_token')
-
- def test_create_revoke_table(self):
- self.assertTableDoesNotExist('revocation_event')
- self.upgrade(84)
- self.assertTableColumns('revocation_event',
- ['id', 'domain_id', 'project_id', 'user_id',
- 'role_id', 'trust_id', 'consumer_id',
- 'access_token_id', 'issued_before',
- 'expires_at', 'revoked_at',
- 'audit_chain_id', 'audit_id'])
-
- @mock.patch.object(upgrades, 'get_db_version', return_value=2)
- def test_revoke_already_migrated(self, mock_revoke):
-
- # By setting the return value to 2, the migration has already been
- # run, and there's no need to create the table again.
- self.upgrade(84)
-
- mock_revoke.assert_any_call(extension='revoke', engine=mock.ANY)
-
- # It won't exist because we are mocking it, but we can verify
- # that 084 did not create the table.
- self.assertTableDoesNotExist('revocation_event')
-
- def test_project_is_domain_upgrade(self):
- self.upgrade(74)
- self.assertTableColumns('project',
- ['id', 'name', 'extra', 'description',
- 'enabled', 'domain_id', 'parent_id',
- 'is_domain'])
-
- def test_implied_roles_upgrade(self):
- self.upgrade(87)
- self.assertTableColumns('implied_role',
- ['prior_role_id', 'implied_role_id'])
- self.assertTrue(self.does_fk_exist('implied_role', 'prior_role_id'))
- self.assertTrue(self.does_fk_exist('implied_role', 'implied_role_id'))
-
- def test_add_config_registration(self):
- config_registration = 'config_register'
- self.upgrade(74)
- self.assertTableDoesNotExist(config_registration)
- self.upgrade(75)
- self.assertTableColumns(config_registration, ['type', 'domain_id'])
-
- def test_endpoint_filter_upgrade(self):
- def assert_tables_columns_exist():
- self.assertTableColumns('project_endpoint',
- ['endpoint_id', 'project_id'])
- self.assertTableColumns('endpoint_group',
- ['id', 'name', 'description', 'filters'])
- self.assertTableColumns('project_endpoint_group',
- ['endpoint_group_id', 'project_id'])
-
- self.assertTableDoesNotExist('project_endpoint')
- self.upgrade(85)
- assert_tables_columns_exist()
-
- @mock.patch.object(upgrades, 'get_db_version', return_value=2)
- def test_endpoint_filter_already_migrated(self, mock_endpoint_filter):
-
- # By setting the return value to 2, the migration has already been
- # run, and there's no need to create the table again.
- self.upgrade(85)
-
- mock_endpoint_filter.assert_any_call(extension='endpoint_filter',
- engine=mock.ANY)
-
- # It won't exist because we are mocking it, but we can verify
- # that 085 did not create the table.
- self.assertTableDoesNotExist('project_endpoint')
- self.assertTableDoesNotExist('endpoint_group')
- self.assertTableDoesNotExist('project_endpoint_group')
-
- def test_add_trust_unique_constraint_upgrade(self):
- self.upgrade(86)
- inspector = inspect(self.engine)
- constraints = inspector.get_unique_constraints('trust')
- constraint_names = [constraint['name'] for constraint in constraints]
- self.assertIn('duplicate_trust_constraint', constraint_names)
-
- def test_add_domain_specific_roles(self):
- """Check database upgraded successfully for domain specific roles.
-
- The following items need to be checked:
-
- - The domain_id column has been added
- - That it has been added to the uniqueness constraints
- - Existing roles have their domain_id columns set to the specific
- string of '<<null>>'
-
- """
- NULL_DOMAIN_ID = '<<null>>'
-
- self.upgrade(87)
- session = self.sessionmaker()
- role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
- # Add a role before we upgrade, so we can check that its new domain_id
- # attribute is handled correctly
- role_id = uuid.uuid4().hex
- self.insert_dict(session, 'role',
- {'id': role_id, 'name': uuid.uuid4().hex})
- session.close()
-
- self.upgrade(88)
-
- session = self.sessionmaker()
- self.assertTableColumns('role', ['id', 'name', 'domain_id', 'extra'])
- # Check the domain_id has been added to the uniqueness constraint
- inspector = inspect(self.engine)
- constraints = inspector.get_unique_constraints('role')
- constraint_columns = [
- constraint['column_names'] for constraint in constraints
- if constraint['name'] == 'ixu_role_name_domain_id']
- self.assertIn('domain_id', constraint_columns[0])
-
- # Now check our role has its domain_id attribute set correctly
- role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
- cols = [role_table.c.domain_id]
- filter = role_table.c.id == role_id
- statement = sqlalchemy.select(cols).where(filter)
- role_entry = session.execute(statement).fetchone()
- self.assertEqual(NULL_DOMAIN_ID, role_entry[0])
-
- def test_add_root_of_all_domains(self):
- NULL_DOMAIN_ID = '<<keystone.domain.root>>'
- self.upgrade(89)
- session = self.sessionmaker()
-
- domain_table = sqlalchemy.Table(
- 'domain', self.metadata, autoload=True)
- query = session.query(domain_table).filter_by(id=NULL_DOMAIN_ID)
- domain_from_db = query.one()
- self.assertIn(NULL_DOMAIN_ID, domain_from_db)
-
- project_table = sqlalchemy.Table(
- 'project', self.metadata, autoload=True)
- query = session.query(project_table).filter_by(id=NULL_DOMAIN_ID)
- project_from_db = query.one()
- self.assertIn(NULL_DOMAIN_ID, project_from_db)
-
- session.close()
-
- def test_add_local_user_and_password_tables(self):
- local_user_table = 'local_user'
- password_table = 'password'
- self.upgrade(89)
- self.assertTableDoesNotExist(local_user_table)
- self.assertTableDoesNotExist(password_table)
- self.upgrade(90)
- self.assertTableColumns(local_user_table,
- ['id',
- 'user_id',
- 'domain_id',
- 'name'])
- self.assertTableColumns(password_table,
- ['id',
- 'local_user_id',
- 'password'])
-
- def test_migrate_data_to_local_user_and_password_tables(self):
- def get_expected_users():
- expected_users = []
- for test_user in default_fixtures.USERS:
- user = {}
- user['id'] = uuid.uuid4().hex
- user['name'] = test_user['name']
- user['domain_id'] = test_user['domain_id']
- user['password'] = test_user['password']
- user['enabled'] = True
- user['extra'] = json.dumps(uuid.uuid4().hex)
- user['default_project_id'] = uuid.uuid4().hex
- expected_users.append(user)
- return expected_users
-
- def add_users_to_db(expected_users, user_table):
- for user in expected_users:
- ins = user_table.insert().values(
- {'id': user['id'],
- 'name': user['name'],
- 'domain_id': user['domain_id'],
- 'password': user['password'],
- 'enabled': user['enabled'],
- 'extra': user['extra'],
- 'default_project_id': user['default_project_id']})
- ins.execute()
-
- def get_users_from_db(user_table, local_user_table, password_table):
- sel = (
- sqlalchemy.select([user_table.c.id,
- user_table.c.enabled,
- user_table.c.extra,
- user_table.c.default_project_id,
- local_user_table.c.name,
- local_user_table.c.domain_id,
- password_table.c.password])
- .select_from(user_table.join(local_user_table,
- user_table.c.id ==
- local_user_table.c.user_id)
- .join(password_table,
- local_user_table.c.id ==
- password_table.c.local_user_id))
- )
- user_rows = sel.execute()
- users = []
- for row in user_rows:
- users.append(
- {'id': row['id'],
- 'name': row['name'],
- 'domain_id': row['domain_id'],
- 'password': row['password'],
- 'enabled': row['enabled'],
- 'extra': row['extra'],
- 'default_project_id': row['default_project_id']})
- return users
-
- user_table_name = 'user'
- local_user_table_name = 'local_user'
- password_table_name = 'password'
-
- # populate current user table
- self.upgrade(90)
- user_table = sqlalchemy.Table(
- user_table_name, self.metadata, autoload=True)
- expected_users = get_expected_users()
- add_users_to_db(expected_users, user_table)
-
- # upgrade to migration and test
- self.upgrade(91)
- self.assertTableCountsMatch(user_table_name, local_user_table_name)
- self.assertTableCountsMatch(local_user_table_name, password_table_name)
- user_table = sqlalchemy.Table(
- user_table_name, self.metadata, autoload=True)
- local_user_table = sqlalchemy.Table(
- local_user_table_name, self.metadata, autoload=True)
- password_table = sqlalchemy.Table(
- password_table_name, self.metadata, autoload=True)
- actual_users = get_users_from_db(user_table, local_user_table,
- password_table)
- self.assertItemsEqual(expected_users, actual_users)
-
- def test_migrate_user_with_null_password_to_password_tables(self):
- USER_TABLE_NAME = 'user'
- LOCAL_USER_TABLE_NAME = 'local_user'
- PASSWORD_TABLE_NAME = 'password'
- self.upgrade(90)
- user_ref = unit.new_user_ref(uuid.uuid4().hex)
- user_ref.pop('password')
- # pop extra attribute which doesn't recognized by SQL expression
- # layer.
- user_ref.pop('email')
- session = self.sessionmaker()
- self.insert_dict(session, USER_TABLE_NAME, user_ref)
- self.upgrade(91)
- # migration should be successful.
- self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME)
- # no new entry was added to the password table because the
- # user doesn't have a password.
- rows = self.calc_table_row_count(PASSWORD_TABLE_NAME)
- self.assertEqual(0, rows)
-
- def test_migrate_user_skip_user_already_exist_in_local_user(self):
- USER_TABLE_NAME = 'user'
- LOCAL_USER_TABLE_NAME = 'local_user'
- self.upgrade(90)
- user1_ref = unit.new_user_ref(uuid.uuid4().hex)
- # pop extra attribute which doesn't recognized by SQL expression
- # layer.
- user1_ref.pop('email')
- user2_ref = unit.new_user_ref(uuid.uuid4().hex)
- user2_ref.pop('email')
- session = self.sessionmaker()
- self.insert_dict(session, USER_TABLE_NAME, user1_ref)
- self.insert_dict(session, USER_TABLE_NAME, user2_ref)
- user_id = user1_ref.pop('id')
- user_name = user1_ref.pop('name')
- domain_id = user1_ref.pop('domain_id')
- local_user_ref = {'user_id': user_id, 'name': user_name,
- 'domain_id': domain_id}
- self.insert_dict(session, LOCAL_USER_TABLE_NAME, local_user_ref)
- self.upgrade(91)
- # migration should be successful and user2_ref has been migrated to
- # `local_user` table.
- self.assertTableCountsMatch(USER_TABLE_NAME, LOCAL_USER_TABLE_NAME)
-
- def test_implied_roles_fk_on_delete_cascade(self):
- if self.engine.name == 'sqlite':
- self.skipTest('sqlite backend does not support foreign keys')
-
- self.upgrade(92)
-
- session = self.sessionmaker()
-
- ROLE_TABLE_NAME = 'role'
- role_table = sqlalchemy.Table(ROLE_TABLE_NAME, self.metadata,
- autoload=True)
- IMPLIED_ROLE_TABLE_NAME = 'implied_role'
- implied_role_table = sqlalchemy.Table(
- IMPLIED_ROLE_TABLE_NAME, self.metadata, autoload=True)
-
- def _create_three_roles():
- id_list = []
- for _ in range(3):
- new_role_fields = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- }
- self.insert_dict(session, ROLE_TABLE_NAME, new_role_fields,
- table=role_table)
- id_list.append(new_role_fields['id'])
- return id_list
-
- role_id_list = _create_three_roles()
- implied_role_fields = {
- 'prior_role_id': role_id_list[0],
- 'implied_role_id': role_id_list[1],
- }
- self.insert_dict(session, IMPLIED_ROLE_TABLE_NAME, implied_role_fields,
- table=implied_role_table)
-
- implied_role_fields = {
- 'prior_role_id': role_id_list[0],
- 'implied_role_id': role_id_list[2],
- }
- self.insert_dict(session, IMPLIED_ROLE_TABLE_NAME, implied_role_fields,
- table=implied_role_table)
-
- # assert that there are two roles implied by role 0.
- implied_roles = session.query(implied_role_table).filter_by(
- prior_role_id=role_id_list[0]).all()
- self.assertThat(implied_roles, matchers.HasLength(2))
-
- session.execute(
- role_table.delete().where(role_table.c.id == role_id_list[0]))
- # assert the cascade deletion is effective.
- implied_roles = session.query(implied_role_table).filter_by(
- prior_role_id=role_id_list[0]).all()
- self.assertThat(implied_roles, matchers.HasLength(0))
-
- def test_domain_as_project_upgrade(self):
- self.skipTest('Domain as Project Upgrade Test is no longer needed and '
- 'unfortunately broken by the resource options code.')
-
- def _populate_domain_and_project_tables(session):
- # Three domains, with various different attributes
- self.domains = [{'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'enabled': True,
- 'extra': {'description': uuid.uuid4().hex,
- 'another_attribute': True}},
- {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'enabled': True,
- 'extra': {'description': uuid.uuid4().hex}},
- {'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'enabled': False}]
- # Four projects, two top level, two children
- self.projects = []
- self.projects.append(unit.new_project_ref(
- domain_id=self.domains[0]['id'],
- parent_id=None))
- self.projects.append(unit.new_project_ref(
- domain_id=self.domains[0]['id'],
- parent_id=self.projects[0]['id']))
- self.projects.append(unit.new_project_ref(
- domain_id=self.domains[1]['id'],
- parent_id=None))
- self.projects.append(unit.new_project_ref(
- domain_id=self.domains[1]['id'],
- parent_id=self.projects[2]['id']))
-
- for domain in self.domains:
- this_domain = domain.copy()
- if 'extra' in this_domain:
- this_domain['extra'] = json.dumps(this_domain['extra'])
- self.insert_dict(session, 'domain', this_domain)
- for project in self.projects:
- # Tags are done via relationship, not column
- project.pop('tags', None)
- self.insert_dict(session, 'project', project)
-
- def _check_projects(projects):
-
- def _assert_domain_matches_project(project):
- for domain in self.domains:
- if project.id == domain['id']:
- self.assertEqual(domain['name'], project.name)
- self.assertEqual(domain['enabled'], project.enabled)
- if domain['id'] == self.domains[0]['id']:
- self.assertEqual(domain['extra']['description'],
- project.description)
- self.assertEqual({'another_attribute': True},
- json.loads(project.extra))
- elif domain['id'] == self.domains[1]['id']:
- self.assertEqual(domain['extra']['description'],
- project.description)
- self.assertEqual({}, json.loads(project.extra))
-
- # We had domains 3 we created, which should now be projects acting
- # as domains, To this we add the 4 original projects, plus the root
- # of all domains row.
- self.assertEqual(8, projects.count())
-
- project_ids = []
- for project in projects:
- if project.is_domain:
- self.assertEqual(NULL_DOMAIN_ID, project.domain_id)
- self.assertIsNone(project.parent_id)
- else:
- self.assertIsNotNone(project.domain_id)
- self.assertIsNotNone(project.parent_id)
- project_ids.append(project.id)
-
- for domain in self.domains:
- self.assertIn(domain['id'], project_ids)
- for project in self.projects:
- self.assertIn(project['id'], project_ids)
-
- # Now check the attributes of the domains came across OK
- for project in projects:
- _assert_domain_matches_project(project)
-
- NULL_DOMAIN_ID = '<<keystone.domain.root>>'
- self.upgrade(92)
-
- session = self.sessionmaker()
-
- _populate_domain_and_project_tables(session)
-
- self.upgrade(93)
- proj_table = sqlalchemy.Table('project', self.metadata, autoload=True)
-
- projects = session.query(proj_table)
- _check_projects(projects)
-
- def test_add_federated_user_table(self):
- federated_user_table = 'federated_user'
- self.upgrade(93)
- self.assertTableDoesNotExist(federated_user_table)
- self.upgrade(94)
- self.assertTableColumns(federated_user_table,
- ['id',
- 'user_id',
- 'idp_id',
- 'protocol_id',
- 'unique_id',
- 'display_name'])
-
- def test_add_int_pkey_to_revocation_event_table(self):
- REVOCATION_EVENT_TABLE_NAME = 'revocation_event'
- self.upgrade(94)
- revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME,
- self.metadata, autoload=True)
- # assert id column is a string (before)
- self.assertEqual('VARCHAR(64)', str(revocation_event_table.c.id.type))
- self.upgrade(95)
- revocation_event_table = sqlalchemy.Table(REVOCATION_EVENT_TABLE_NAME,
- self.metadata, autoload=True)
- # assert id column is an integer (after)
- self.assertIsInstance(revocation_event_table.c.id.type, sql.Integer)
-
- def _add_unique_constraint_to_role_name(self,
- constraint_name='ixu_role_name'):
- role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
- migrate.UniqueConstraint(role_table.c.name,
- name=constraint_name).create()
-
- def _drop_unique_constraint_to_role_name(self,
- constraint_name='ixu_role_name'):
- role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
- migrate.UniqueConstraint(role_table.c.name,
- name=constraint_name).drop()
-
- def _add_unique_constraint_to_user_name_domainid(
- self,
- constraint_name='ixu_role_name'):
- user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
- migrate.UniqueConstraint(user_table.c.name, user_table.c.domain_id,
- name=constraint_name).create()
-
- def _add_name_domain_id_columns_to_user(self):
- user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
- column_name = sqlalchemy.Column('name', sql.String(255))
- column_domain_id = sqlalchemy.Column('domain_id', sql.String(64))
- user_table.create_column(column_name)
- user_table.create_column(column_domain_id)
-
- def _drop_unique_constraint_to_user_name_domainid(
- self,
- constraint_name='ixu_user_name_domain_id'):
- user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
- migrate.UniqueConstraint(user_table.c.name, user_table.c.domain_id,
- name=constraint_name).drop()
-
- def test_migration_88_drops_unique_constraint(self):
- self.upgrade(87)
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertTrue(self.does_constraint_exist('role',
- 'ixu_role_name'))
- self.upgrade(88)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- def test_migration_88_inconsistent_constraint_name(self):
- self.upgrade(87)
- self._drop_unique_constraint_to_role_name()
-
- constraint_name = uuid.uuid4().hex
- self._add_unique_constraint_to_role_name(
- constraint_name=constraint_name)
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('role', constraint_name))
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertTrue(self.does_constraint_exist('role',
- constraint_name))
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- self.upgrade(88)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', constraint_name))
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- constraint_name))
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- def test_migration_91_drops_unique_constraint(self):
- self.upgrade(90)
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertTrue(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- self.upgrade(91)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- def test_migration_91_inconsistent_constraint_name(self):
- self.upgrade(90)
- self._drop_unique_constraint_to_user_name_domainid()
-
- constraint_name = uuid.uuid4().hex
- self._add_unique_constraint_to_user_name_domainid(
- constraint_name=constraint_name)
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('user', constraint_name))
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertTrue(self.does_constraint_exist('user',
- constraint_name))
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- self.upgrade(91)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('user', constraint_name))
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist('user',
- constraint_name))
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- def test_migration_96(self):
- self.upgrade(95)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- self.upgrade(96)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- def test_migration_96_constraint_exists(self):
- self.upgrade(95)
- self._add_unique_constraint_to_role_name()
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertTrue(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- self.upgrade(96)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- def test_migration_97(self):
- self.upgrade(96)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- self.upgrade(97)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- def test_migration_97_constraint_exists(self):
- self.upgrade(96)
- self._add_name_domain_id_columns_to_user()
- self._add_unique_constraint_to_user_name_domainid(
- constraint_name='ixu_user_name_domain_id')
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertTrue(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- self.upgrade(97)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- def test_migration_97_inconsistent_constraint_exists(self):
- self.upgrade(96)
- constraint_name = uuid.uuid4().hex
- self._add_name_domain_id_columns_to_user()
- self._add_unique_constraint_to_user_name_domainid(
- constraint_name=constraint_name)
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('user', constraint_name))
- else:
- self.assertTrue(self.does_constraint_exist('user',
- constraint_name))
-
- self.upgrade(97)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('user', constraint_name))
- else:
- self.assertFalse(self.does_constraint_exist('user',
- constraint_name))
-
- def test_migration_101(self):
- self.upgrade(100)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
- self.upgrade(101)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- def test_migration_101_constraint_exists(self):
- self.upgrade(100)
- self._add_unique_constraint_to_role_name()
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertTrue(self.does_constraint_exist('role',
- 'ixu_role_name'))
- self.upgrade(101)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('role', 'ixu_role_name'))
- else:
- self.assertFalse(self.does_constraint_exist('role',
- 'ixu_role_name'))
-
- def test_drop_domain_table(self):
- self.upgrade(101)
- self.assertTableExists('domain')
- self.upgrade(102)
- self.assertTableDoesNotExist('domain')
-
- def test_add_nonlocal_user_table(self):
- nonlocal_user_table = 'nonlocal_user'
- self.upgrade(102)
- self.assertTableDoesNotExist(nonlocal_user_table)
- self.upgrade(103)
- self.assertTableColumns(nonlocal_user_table,
- ['domain_id',
- 'name',
- 'user_id'])
-
- def test_migration_104(self):
- self.upgrade(103)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- self.upgrade(104)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- def test_migration_104_constraint_exists(self):
- self.upgrade(103)
- self._add_name_domain_id_columns_to_user()
- self._add_unique_constraint_to_user_name_domainid(
- constraint_name='ixu_user_name_domain_id')
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertTrue(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- self.upgrade(104)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist(
- 'user',
- 'ixu_user_name_domain_id'))
- else:
- self.assertFalse(self.does_constraint_exist(
- 'user',
- 'ixu_user_name_domain_id'))
-
- def test_migration_104_inconsistent_constraint_exists(self):
- self.upgrade(103)
- constraint_name = uuid.uuid4().hex
- self._add_name_domain_id_columns_to_user()
- self._add_unique_constraint_to_user_name_domainid(
- constraint_name=constraint_name)
-
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist('user', constraint_name))
- else:
- self.assertTrue(self.does_constraint_exist('user',
- constraint_name))
-
- self.upgrade(104)
- if self.engine.name == 'mysql':
- self.assertFalse(self.does_index_exist('user', constraint_name))
- else:
- self.assertFalse(self.does_constraint_exist('user',
- constraint_name))
-
- def test_migration_105_add_password_date_columns(self):
- def add_user_model_record(session):
- # add a user
- user = {'id': uuid.uuid4().hex}
- self.insert_dict(session, 'user', user)
- # add a local user
- local_user = {
- 'id': 1,
- 'user_id': user['id'],
- 'domain_id': 'default',
- 'name': uuid.uuid4().hex
- }
- self.insert_dict(session, 'local_user', local_user)
- # add a password
- password = {
- 'local_user_id': local_user['id'],
- 'password': uuid.uuid4().hex
- }
- self.insert_dict(session, 'password', password)
- self.upgrade(104)
- session = self.sessionmaker()
- password_name = 'password'
- # columns before
- self.assertTableColumns(password_name,
- ['id',
- 'local_user_id',
- 'password'])
- # add record and verify table count is greater than zero
- add_user_model_record(session)
- password_table = sqlalchemy.Table(password_name, self.metadata,
- autoload=True)
- cnt = session.query(password_table).count()
- self.assertGreater(cnt, 0)
- self.upgrade(105)
- # columns after
- self.assertTableColumns(password_name,
- ['id',
- 'local_user_id',
- 'password',
- 'created_at',
- 'expires_at'])
- password_table = sqlalchemy.Table(password_name, self.metadata,
- autoload=True)
- # verify created_at is not null
- null_created_at_cnt = (
- session.query(password_table).filter_by(created_at=None).count())
- self.assertEqual(null_created_at_cnt, 0)
- # verify expires_at is null
- null_expires_at_cnt = (
- session.query(password_table).filter_by(expires_at=None).count())
- self.assertGreater(null_expires_at_cnt, 0)
-
- def test_migration_106_allow_password_column_to_be_nullable(self):
- password_table_name = 'password'
- self.upgrade(105)
- password_table = sqlalchemy.Table(password_table_name, self.metadata,
- autoload=True)
- self.assertFalse(password_table.c.password.nullable)
- self.upgrade(106)
- password_table = sqlalchemy.Table(password_table_name, self.metadata,
- autoload=True)
- self.assertTrue(password_table.c.password.nullable)
-
- def test_migration_107_add_user_date_columns(self):
- user_table = 'user'
- self.upgrade(106)
- self.assertTableColumns(user_table,
- ['id',
- 'extra',
- 'enabled',
- 'default_project_id'])
- self.upgrade(107)
- self.assertTableColumns(user_table,
- ['id',
- 'extra',
- 'enabled',
- 'default_project_id',
- 'created_at',
- 'last_active_at'])
-
- def test_migration_108_add_failed_auth_columns(self):
- self.upgrade(107)
- table_name = 'local_user'
- self.assertTableColumns(table_name,
- ['id',
- 'user_id',
- 'domain_id',
- 'name'])
- self.upgrade(108)
- self.assertTableColumns(table_name,
- ['id',
- 'user_id',
- 'domain_id',
- 'name',
- 'failed_auth_count',
- 'failed_auth_at'])
-
- def test_migration_109_add_password_self_service_column(self):
- password_table = 'password'
- self.upgrade(108)
- self.assertTableColumns(password_table,
- ['id',
- 'local_user_id',
- 'password',
- 'created_at',
- 'expires_at'])
- self.upgrade(109)
- self.assertTableColumns(password_table,
- ['id',
- 'local_user_id',
- 'password',
- 'created_at',
- 'expires_at',
- 'self_service'])
-
-
-class MySQLOpportunisticUpgradeTestCase(SqlLegacyRepoUpgradeTests):
- FIXTURE = db_fixtures.MySQLOpportunisticFixture
-
-
-class PostgreSQLOpportunisticUpgradeTestCase(SqlLegacyRepoUpgradeTests):
- FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-
-
-class SqlExpandSchemaUpgradeTests(SqlMigrateBase):
-
- def setUp(self):
- # Make sure the main repo is fully upgraded for this release since the
- # expand phase is only run after such an upgrade
- super(SqlExpandSchemaUpgradeTests, self).setUp()
- self.upgrade()
-
- def test_start_version_db_init_version(self):
- self.assertEqual(
- self.repos[EXPAND_REPO].min_version,
- self.repos[EXPAND_REPO].version)
-
class MySQLOpportunisticExpandSchemaUpgradeTestCase(
- SqlExpandSchemaUpgradeTests):
+ ExpandSchemaUpgradeTests,
+):
FIXTURE = db_fixtures.MySQLOpportunisticFixture
class PostgreSQLOpportunisticExpandSchemaUpgradeTestCase(
- SqlExpandSchemaUpgradeTests):
+ ExpandSchemaUpgradeTests,
+):
FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-class SqlDataMigrationUpgradeTests(SqlMigrateBase):
+class DataMigrationUpgradeTests(MigrateBase):
def setUp(self):
- # Make sure the legacy and expand repos are fully upgraded, since the
- # data migration phase is only run after these are upgraded
- super(SqlDataMigrationUpgradeTests, self).setUp()
- self.upgrade()
+ # Make sure the expand repo is fully upgraded, since the data migration
+ # phase is only run after this is upgraded
+ super().setUp()
self.expand()
def test_start_version_db_init_version(self):
self.assertEqual(
- self.repos[DATA_MIGRATION_REPO].min_version,
- self.repos[DATA_MIGRATION_REPO].version)
+ self.repos[upgrades.DATA_MIGRATION_BRANCH].min_version,
+ self.repos[upgrades.DATA_MIGRATION_BRANCH].version,
+ )
class MySQLOpportunisticDataMigrationUpgradeTestCase(
- SqlDataMigrationUpgradeTests):
+ DataMigrationUpgradeTests,
+):
FIXTURE = db_fixtures.MySQLOpportunisticFixture
class PostgreSQLOpportunisticDataMigrationUpgradeTestCase(
- SqlDataMigrationUpgradeTests):
+ DataMigrationUpgradeTests,
+):
FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-class SqlContractSchemaUpgradeTests(SqlMigrateBase, unit.TestCase):
+class ContractSchemaUpgradeTests(MigrateBase, unit.TestCase):
def setUp(self):
- # Make sure the legacy, expand and data migration repos are fully
+ # Make sure the expand and data migration repos are fully
# upgraded, since the contract phase is only run after these are
# upgraded.
- super(SqlContractSchemaUpgradeTests, self).setUp()
+ super().setUp()
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
@@ -1577,64 +430,29 @@ class SqlContractSchemaUpgradeTests(SqlMigrateBase, unit.TestCase):
credential_fernet.MAX_ACTIVE_KEYS
)
)
- self.upgrade()
self.expand()
self.migrate()
def test_start_version_db_init_version(self):
self.assertEqual(
- self.repos[CONTRACT_REPO].min_version,
- self.repos[CONTRACT_REPO].version)
+ self.repos[upgrades.CONTRACT_BRANCH].min_version,
+ self.repos[upgrades.CONTRACT_BRANCH].version,
+ )
class MySQLOpportunisticContractSchemaUpgradeTestCase(
- SqlContractSchemaUpgradeTests):
+ ContractSchemaUpgradeTests,
+):
FIXTURE = db_fixtures.MySQLOpportunisticFixture
class PostgreSQLOpportunisticContractSchemaUpgradeTestCase(
- SqlContractSchemaUpgradeTests):
+ ContractSchemaUpgradeTests,
+):
FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
-class VersionTests(SqlMigrateBase):
- def test_core_initial(self):
- """Get the version before migrated, it's the initial DB version."""
- self.assertEqual(
- self.repos[LEGACY_REPO].min_version,
- self.repos[LEGACY_REPO].version)
-
- def test_core_max(self):
- """When get the version after upgrading, it's the new version."""
- self.upgrade()
- self.assertEqual(
- self.repos[LEGACY_REPO].max_version,
- self.repos[LEGACY_REPO].version)
-
- def test_assert_not_schema_downgrade(self):
- self.upgrade()
- self.assertRaises(
- db_exception.DBMigrationError,
- upgrades._sync_common_repo,
- self.repos[LEGACY_REPO].max_version - 1)
-
- def test_these_are_not_the_migrations_you_are_looking_for(self):
- """Keystone has shifted to rolling upgrades.
-
- New database migrations should no longer land in the legacy migration
- repository. Instead, new database migrations should be divided into
- three discrete steps: schema expansion, data migration, and schema
- contraction. These migrations live in a new set of database migration
- repositories, called ``expand_repo``, ``data_migration_repo``, and
- ``contract_repo``.
-
- For more information, see "Database Migrations" here:
-
- https://docs.openstack.org/keystone/latest/contributor/database-migrations.html
-
- """
- # Note to reviewers: this version number should never change.
- self.assertEqual(109, self.repos[LEGACY_REPO].max_version)
+class VersionTests(MigrateBase):
def test_migrate_repos_stay_in_lockstep(self):
"""Rolling upgrade repositories should always stay in lockstep.
@@ -1658,11 +476,13 @@ class VersionTests(SqlMigrateBase):
"""
# Transitive comparison: expand == data migration == contract
self.assertEqual(
- self.repos[EXPAND_REPO].max_version,
- self.repos[DATA_MIGRATION_REPO].max_version)
+ self.repos[upgrades.EXPAND_BRANCH].max_version,
+ self.repos[upgrades.DATA_MIGRATION_BRANCH].max_version,
+ )
self.assertEqual(
- self.repos[DATA_MIGRATION_REPO].max_version,
- self.repos[CONTRACT_REPO].max_version)
+ self.repos[upgrades.DATA_MIGRATION_BRANCH].max_version,
+ self.repos[upgrades.CONTRACT_BRANCH].max_version,
+ )
def test_migrate_repos_file_names_have_prefix(self):
"""Migration files should be unique to avoid caching errors.
@@ -1674,17 +494,20 @@ class VersionTests(SqlMigrateBase):
"""
versions_path = '/versions'
+
# test for expand prefix, e.g. 001_expand_new_fk_constraint.py
- expand_list = glob.glob(
- self.repos[EXPAND_REPO].repo_path + versions_path + '/*.py')
+ repo_path = self.repos[upgrades.EXPAND_BRANCH].repo_path
+ expand_list = glob.glob(repo_path + versions_path + '/*.py')
self.assertRepoFileNamePrefix(expand_list, 'expand')
+
# test for migrate prefix, e.g. 001_migrate_new_fk_constraint.py
- repo_path = self.repos[DATA_MIGRATION_REPO].repo_path
+ repo_path = self.repos[upgrades.DATA_MIGRATION_BRANCH].repo_path
migrate_list = glob.glob(repo_path + versions_path + '/*.py')
self.assertRepoFileNamePrefix(migrate_list, 'migrate')
+
# test for contract prefix, e.g. 001_contract_new_fk_constraint.py
- contract_list = glob.glob(
- self.repos[CONTRACT_REPO].repo_path + versions_path + '/*.py')
+ repo_path = self.repos[upgrades.CONTRACT_BRANCH].repo_path
+ contract_list = glob.glob(repo_path + versions_path + '/*.py')
self.assertRepoFileNamePrefix(contract_list, 'contract')
def assertRepoFileNamePrefix(self, repo_list, prefix):
@@ -1699,58 +522,45 @@ class VersionTests(SqlMigrateBase):
self.assertRegex(file_name, pattern, msg)
-class MigrationValidation(SqlMigrateBase, unit.TestCase):
+class MigrationValidation(MigrateBase, unit.TestCase):
"""Test validation of database between database phases."""
def _set_db_sync_command_versions(self):
- self.expand(1)
- self.migrate(1)
- self.contract(1)
- self.assertEqual(upgrades.get_db_version('expand_repo'), 1)
- self.assertEqual(upgrades.get_db_version('data_migration_repo'), 1)
- self.assertEqual(upgrades.get_db_version('contract_repo'), 1)
-
- def test_running_db_sync_expand_without_up_to_date_legacy_fails(self):
- # Set Legacy version and then test that running expand fails if Legacy
- # isn't at the latest version.
- self.upgrade(67)
- latest_version = self.repos[EXPAND_REPO].max_version
- self.assertRaises(
- db_exception.DBMigrationError,
- self.expand,
- latest_version,
- "You are attempting to upgrade migrate ahead of expand")
+ self.expand(upgrades.INITIAL_VERSION + 1)
+ self.migrate(upgrades.INITIAL_VERSION + 1)
+ self.contract(upgrades.INITIAL_VERSION + 1)
+ for version in (
+ upgrades.get_db_version('expand'),
+ upgrades.get_db_version('data_migration'),
+ upgrades.get_db_version('contract'),
+ ):
+ self.assertEqual(upgrades.INITIAL_VERSION + 1, version)
def test_running_db_sync_migrate_ahead_of_expand_fails(self):
- self.upgrade()
self._set_db_sync_command_versions()
self.assertRaises(
db_exception.DBMigrationError,
self.migrate,
- 2,
- "You are attempting to upgrade migrate ahead of expand")
+ upgrades.INITIAL_VERSION + 2,
+ "You are attempting to upgrade migrate ahead of expand",
+ )
def test_running_db_sync_contract_ahead_of_migrate_fails(self):
- self.upgrade()
self._set_db_sync_command_versions()
self.assertRaises(
db_exception.DBMigrationError,
self.contract,
- 2,
- "You are attempting to upgrade contract ahead of migrate")
+ upgrades.INITIAL_VERSION + 2,
+ "You are attempting to upgrade contract ahead of migrate",
+ )
-class FullMigration(SqlMigrateBase, unit.TestCase):
+class FullMigration(MigrateBase, unit.TestCase):
"""Test complete orchestration between all database phases."""
- def setUp(self):
- super(FullMigration, self).setUp()
- # Upgrade the legacy repository
- self.upgrade()
-
def test_db_sync_check(self):
checker = cli.DbSync()
- latest_version = self.repos[EXPAND_REPO].max_version
+ latest_version = self.repos[upgrades.EXPAND_BRANCH].max_version
# If the expand repository doesn't exist yet, then we need to make sure
# we advertise that `--expand` must be run first.
@@ -1761,7 +571,7 @@ class FullMigration(SqlMigrateBase, unit.TestCase):
# Assert the correct message is printed when expand is the first step
# that needs to run
- self.expand(1)
+ self.expand(upgrades.INITIAL_VERSION + 1)
log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO))
status = checker.check_db_sync_status()
self.assertIn("keystone-manage db_sync --expand", log_info.output)
@@ -1795,1722 +605,37 @@ class FullMigration(SqlMigrateBase, unit.TestCase):
# We shouldn't allow for operators to accidentally run migration out of
# order. This test ensures we fail if we attempt to upgrade the
# contract repository ahead of the expand or migrate repositories.
- self.expand(3)
- self.migrate(3)
- self.assertRaises(db_exception.DBMigrationError, self.contract, 4)
-
- def test_migration_002_password_created_at_not_nullable(self):
- # upgrade each repository to 001
- self.expand(1)
- self.migrate(1)
- self.contract(1)
-
- password = sqlalchemy.Table('password', self.metadata, autoload=True)
- self.assertTrue(password.c.created_at.nullable)
- # upgrade each repository to 002
- self.expand(2)
- self.migrate(2)
- self.contract(2)
- password = sqlalchemy.Table('password', self.metadata, autoload=True)
- if self.engine.name != 'sqlite':
- self.assertFalse(password.c.created_at.nullable)
-
- def test_migration_003_migrate_unencrypted_credentials(self):
- self.useFixture(
- ksfixtures.KeyRepository(
- self.config_fixture,
- 'credential',
- credential_fernet.MAX_ACTIVE_KEYS
- )
- )
-
- session = self.sessionmaker()
- credential_table_name = 'credential'
-
- # upgrade each repository to 002
- self.expand(2)
- self.migrate(2)
- self.contract(2)
-
- # populate the credential table with some sample credentials
- credentials = list()
- for i in range(5):
- credential = {'id': uuid.uuid4().hex,
- 'blob': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'type': 'cert'}
- credentials.append(credential)
- self.insert_dict(session, credential_table_name, credential)
-
- # verify the current schema
- self.assertTableColumns(
- credential_table_name,
- ['id', 'user_id', 'project_id', 'type', 'blob', 'extra']
- )
-
- # upgrade expand repo to 003 to add new columns
- self.expand(3)
-
- # verify encrypted_blob and key_hash columns have been added and verify
- # the original blob column is still there
- self.assertTableColumns(
- credential_table_name,
- ['id', 'user_id', 'project_id', 'type', 'blob', 'extra',
- 'key_hash', 'encrypted_blob']
- )
-
- # verify triggers by making sure we can't write to the credential table
- credential = {'id': uuid.uuid4().hex,
- 'blob': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'type': 'cert'}
- self.assertRaises(db_exception.DBError,
- self.insert_dict,
- session,
- credential_table_name,
- credential)
-
- # upgrade migrate repo to 003 to migrate existing credentials
- self.migrate(3)
-
- # make sure we've actually updated the credential with the
- # encrypted blob and the corresponding key hash
- credential_table = sqlalchemy.Table(
- credential_table_name,
- self.metadata,
- autoload=True
- )
- for credential in credentials:
- filter = credential_table.c.id == credential['id']
- cols = [credential_table.c.key_hash, credential_table.c.blob,
- credential_table.c.encrypted_blob]
- q = sqlalchemy.select(cols).where(filter)
- result = session.execute(q).fetchone()
-
- self.assertIsNotNone(result.encrypted_blob)
- self.assertIsNotNone(result.key_hash)
- # verify the original blob column is still populated
- self.assertEqual(result.blob, credential['blob'])
-
- # verify we can't make any writes to the credential table
- credential = {'id': uuid.uuid4().hex,
- 'blob': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'key_hash': uuid.uuid4().hex,
- 'type': 'cert'}
- self.assertRaises(db_exception.DBError,
- self.insert_dict,
- session,
- credential_table_name,
- credential)
-
- # upgrade contract repo to 003 to remove triggers and blob column
- self.contract(3)
-
- # verify the new schema doesn't have a blob column anymore
- self.assertTableColumns(
- credential_table_name,
- ['id', 'user_id', 'project_id', 'type', 'extra', 'key_hash',
- 'encrypted_blob']
- )
-
- # verify that the triggers are gone by writing to the database
- credential = {'id': uuid.uuid4().hex,
- 'encrypted_blob': uuid.uuid4().hex,
- 'key_hash': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'type': 'cert'}
- self.insert_dict(session, credential_table_name, credential)
-
- def test_migration_004_reset_password_created_at(self):
- # upgrade each repository to 003 and test
- self.expand(3)
- self.migrate(3)
- self.contract(3)
- password = sqlalchemy.Table('password', self.metadata, autoload=True)
- # postgresql returns 'TIMESTAMP WITHOUT TIME ZONE'
- self.assertTrue(
- str(password.c.created_at.type).startswith('TIMESTAMP'))
- # upgrade each repository to 004 and test
- self.expand(4)
- self.migrate(4)
- self.contract(4)
- password = sqlalchemy.Table('password', self.metadata, autoload=True)
- # type would still be TIMESTAMP with postgresql
- if self.engine.name == 'postgresql':
- self.assertTrue(
- str(password.c.created_at.type).startswith('TIMESTAMP'))
- else:
- self.assertEqual('DATETIME', str(password.c.created_at.type))
- self.assertFalse(password.c.created_at.nullable)
-
- def test_migration_010_add_revocation_event_indexes(self):
- self.expand(9)
- self.migrate(9)
- self.contract(9)
- self.assertFalse(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_issued_before'))
- self.assertFalse(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_project_id_issued_before'))
- self.assertFalse(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_user_id_issued_before'))
- self.assertFalse(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_audit_id_issued_before'))
- self.expand(10)
- self.migrate(10)
- self.contract(10)
- self.assertTrue(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_issued_before'))
- self.assertTrue(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_project_id_issued_before'))
- self.assertTrue(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_user_id_issued_before'))
- self.assertTrue(self.does_index_exist(
- 'revocation_event',
- 'ix_revocation_event_audit_id_issued_before'))
-
- def test_migration_011_user_id_unique_for_nonlocal_user(self):
- table_name = 'nonlocal_user'
- column = 'user_id'
- self.expand(10)
- self.migrate(10)
- self.contract(10)
- self.assertFalse(self.does_unique_constraint_exist(table_name, column))
- self.expand(11)
- self.migrate(11)
- self.contract(11)
- self.assertTrue(self.does_unique_constraint_exist(table_name, column))
-
- def test_migration_012_add_domain_id_to_idp(self):
- def _create_domain():
- domain_id = uuid.uuid4().hex
- domain = {
- 'id': domain_id,
- 'name': domain_id,
- 'enabled': True,
- 'description': uuid.uuid4().hex,
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None,
- 'extra': '{}'
- }
- self.insert_dict(session, 'project', domain)
- return domain_id
-
- def _get_new_idp(domain_id):
- new_idp = {'id': uuid.uuid4().hex,
- 'domain_id': domain_id,
- 'enabled': True,
- 'description': uuid.uuid4().hex}
- return new_idp
-
- session = self.sessionmaker()
- idp_name = 'identity_provider'
- self.expand(11)
- self.migrate(11)
- self.contract(11)
- self.assertTableColumns(idp_name,
- ['id',
- 'enabled',
- 'description'])
- # add some data
- for i in range(5):
- idp = {'id': uuid.uuid4().hex,
- 'enabled': True,
- 'description': uuid.uuid4().hex}
- self.insert_dict(session, idp_name, idp)
-
- # upgrade
- self.expand(12)
- self.assertTableColumns(idp_name,
- ['id',
- 'domain_id',
- 'enabled',
- 'description'])
-
- # confirm we cannot insert an idp during expand
- domain_id = _create_domain()
- new_idp = _get_new_idp(domain_id)
- self.assertRaises(db_exception.DBError, self.insert_dict, session,
- idp_name, new_idp)
-
- # confirm we cannot insert an idp during migrate
- self.migrate(12)
- self.assertRaises(db_exception.DBError, self.insert_dict, session,
- idp_name, new_idp)
-
- # confirm we can insert a new idp after contract
- self.contract(12)
- self.insert_dict(session, idp_name, new_idp)
-
- # confirm domain_id column is not null
- idp_table = sqlalchemy.Table(idp_name, self.metadata, autoload=True)
- self.assertFalse(idp_table.c.domain_id.nullable)
-
- def test_migration_013_protocol_cascade_delete_for_federated_user(self):
- if self.engine.name == 'sqlite':
- self.skipTest('sqlite backend does not support foreign keys')
-
- self.expand(12)
- self.migrate(12)
- self.contract(12)
-
- # This test requires a bit of setup to properly work, first we create
- # an identity provider, mapping and a protocol. Then, we create a
- # federated user and delete the protocol. We expect the federated user
- # to be deleted as well.
-
- session = self.sessionmaker()
-
- def _create_protocol():
- domain = {
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None
- }
- self.insert_dict(session, 'project', domain)
-
- idp = {'id': uuid.uuid4().hex, 'enabled': True,
- 'domain_id': domain['id']}
- self.insert_dict(session, 'identity_provider', idp)
-
- mapping = {'id': uuid.uuid4().hex, 'rules': json.dumps([])}
- self.insert_dict(session, 'mapping', mapping)
-
- protocol = {'id': uuid.uuid4().hex, 'idp_id': idp['id'],
- 'mapping_id': mapping['id']}
- protocol_table = sqlalchemy.Table(
- 'federation_protocol', self.metadata, autoload=True)
- self.insert_dict(session, 'federation_protocol', protocol,
- table=protocol_table)
-
- return protocol, protocol_table
-
- def _create_federated_user(idp_id, protocol_id):
- user = {'id': uuid.uuid4().hex}
- self.insert_dict(session, 'user', user)
-
- # NOTE(rodrigods): do not set the ID, the engine will do that
- # for us and we won't need it later.
- federated_user = {
- 'user_id': user['id'], 'idp_id': idp_id,
- 'protocol_id': protocol_id, 'unique_id': uuid.uuid4().hex}
- federated_table = sqlalchemy.Table(
- 'federated_user', self.metadata, autoload=True)
- self.insert_dict(session, 'federated_user', federated_user,
- table=federated_table)
-
- return federated_user, federated_table
-
- protocol, protocol_table = _create_protocol()
- federated_user, federated_table = _create_federated_user(
- protocol['idp_id'], protocol['id'])
-
- # before updating the foreign key, we won't be able to delete the
- # protocol
- self.assertRaises(db_exception.DBError,
- session.execute,
- protocol_table.delete().where(
- protocol_table.c.id == protocol['id']))
-
- self.expand(13)
- self.migrate(13)
- self.contract(13)
-
- # now we are able to delete the protocol
- session.execute(
- protocol_table.delete().where(
- protocol_table.c.id == protocol['id']))
-
- # assert the cascade deletion worked
- federated_users = session.query(federated_table).filter_by(
- protocol_id=federated_user['protocol_id']).all()
- self.assertThat(federated_users, matchers.HasLength(0))
-
- def test_migration_014_add_domain_id_to_user_table(self):
- def create_domain():
- table = sqlalchemy.Table('project', self.metadata, autoload=True)
- domain_id = uuid.uuid4().hex
- domain = {
- 'id': domain_id,
- 'name': domain_id,
- 'enabled': True,
- 'description': uuid.uuid4().hex,
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None,
- 'extra': '{}'
- }
- table.insert().values(domain).execute()
- return domain_id
-
- def create_user(table):
- user_id = uuid.uuid4().hex
- user = {'id': user_id, 'enabled': True}
- table.insert().values(user).execute()
- return user_id
-
- # insert local_user or nonlocal_user
- def create_child_user(table, user_id, domain_id):
- child_user = {
- 'user_id': user_id,
- 'domain_id': domain_id,
- 'name': uuid.uuid4().hex
- }
- table.insert().values(child_user).execute()
-
- # update local_user or nonlocal_user
- def update_child_user(table, user_id, new_domain_id):
- table.update().where(table.c.user_id == user_id).values(
- domain_id=new_domain_id).execute()
-
- def assertUserDomain(user_id, domain_id):
- user = sqlalchemy.Table('user', self.metadata, autoload=True)
- cols = [user.c.domain_id]
- filter = user.c.id == user_id
- sel = sqlalchemy.select(cols).where(filter)
- domains = sel.execute().fetchone()
- self.assertEqual(domain_id, domains[0])
-
- user_table_name = 'user'
- self.expand(13)
- self.migrate(13)
- self.contract(13)
- self.assertTableColumns(
- user_table_name, ['id', 'extra', 'enabled', 'default_project_id',
- 'created_at', 'last_active_at'])
- self.expand(14)
- self.assertTableColumns(
- user_table_name, ['id', 'extra', 'enabled', 'default_project_id',
- 'created_at', 'last_active_at', 'domain_id'])
- user_table = sqlalchemy.Table(user_table_name, self.metadata,
- autoload=True)
- local_user_table = sqlalchemy.Table('local_user', self.metadata,
- autoload=True)
- nonlocal_user_table = sqlalchemy.Table('nonlocal_user', self.metadata,
- autoload=True)
-
- # add users before migrate to test that the user.domain_id gets updated
- # after migrate
- user_ids = []
- expected_domain_id = create_domain()
- user_id = create_user(user_table)
- create_child_user(local_user_table, user_id, expected_domain_id)
- user_ids.append(user_id)
- user_id = create_user(user_table)
- create_child_user(nonlocal_user_table, user_id, expected_domain_id)
- user_ids.append(user_id)
-
- self.migrate(14)
- # test local_user insert trigger updates user.domain_id
- user_id = create_user(user_table)
- domain_id = create_domain()
- create_child_user(local_user_table, user_id, domain_id)
- assertUserDomain(user_id, domain_id)
-
- # test local_user update trigger updates user.domain_id
- new_domain_id = create_domain()
- update_child_user(local_user_table, user_id, new_domain_id)
- assertUserDomain(user_id, new_domain_id)
-
- # test nonlocal_user insert trigger updates user.domain_id
- user_id = create_user(user_table)
- create_child_user(nonlocal_user_table, user_id, domain_id)
- assertUserDomain(user_id, domain_id)
-
- # test nonlocal_user update trigger updates user.domain_id
- update_child_user(nonlocal_user_table, user_id, new_domain_id)
- assertUserDomain(user_id, new_domain_id)
-
- self.contract(14)
- # test migrate updated the user.domain_id
- for user_id in user_ids:
- assertUserDomain(user_id, expected_domain_id)
-
- # test unique and fk constraints
- if self.engine.name == 'mysql':
- self.assertTrue(
- self.does_index_exist('user', 'ixu_user_id_domain_id'))
- else:
- self.assertTrue(
- self.does_constraint_exist('user', 'ixu_user_id_domain_id'))
- self.assertTrue(self.does_fk_exist('local_user', 'user_id'))
- self.assertTrue(self.does_fk_exist('local_user', 'domain_id'))
- self.assertTrue(self.does_fk_exist('nonlocal_user', 'user_id'))
- self.assertTrue(self.does_fk_exist('nonlocal_user', 'domain_id'))
-
- def test_migration_015_update_federated_user_domain(self):
- def create_domain():
- table = sqlalchemy.Table('project', self.metadata, autoload=True)
- domain_id = uuid.uuid4().hex
- domain = {
- 'id': domain_id,
- 'name': domain_id,
- 'enabled': True,
- 'description': uuid.uuid4().hex,
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'is_domain': True,
- 'parent_id': None,
- 'extra': '{}'
- }
- table.insert().values(domain).execute()
- return domain_id
-
- def create_idp(domain_id):
- table = sqlalchemy.Table('identity_provider', self.metadata,
- autoload=True)
- idp_id = uuid.uuid4().hex
- idp = {
- 'id': idp_id,
- 'domain_id': domain_id,
- 'enabled': True,
- 'description': uuid.uuid4().hex
- }
- table.insert().values(idp).execute()
- return idp_id
-
- def create_protocol(idp_id):
- table = sqlalchemy.Table('federation_protocol', self.metadata,
- autoload=True)
- protocol_id = uuid.uuid4().hex
- protocol = {
- 'id': protocol_id,
- 'idp_id': idp_id,
- 'mapping_id': uuid.uuid4().hex
- }
- table.insert().values(protocol).execute()
- return protocol_id
-
- def create_user():
- table = sqlalchemy.Table('user', self.metadata, autoload=True)
- user_id = uuid.uuid4().hex
- user = {'id': user_id, 'enabled': True}
- table.insert().values(user).execute()
- return user_id
-
- def create_federated_user(user_id, idp_id, protocol_id):
- table = sqlalchemy.Table('federated_user', self.metadata,
- autoload=True)
- federated_user = {
- 'user_id': user_id,
- 'idp_id': idp_id,
- 'protocol_id': protocol_id,
- 'unique_id': uuid.uuid4().hex,
- 'display_name': uuid.uuid4().hex
- }
- table.insert().values(federated_user).execute()
-
- def assertUserDomain(user_id, domain_id):
- table = sqlalchemy.Table('user', self.metadata, autoload=True)
- where = table.c.id == user_id
- stmt = sqlalchemy.select([table.c.domain_id]).where(where)
- domains = stmt.execute().fetchone()
- self.assertEqual(domain_id, domains[0])
-
- def assertUserDomainIsNone(user_id):
- table = sqlalchemy.Table('user', self.metadata, autoload=True)
- where = table.c.id == user_id
- stmt = sqlalchemy.select([table.c.domain_id]).where(where)
- domains = stmt.execute().fetchone()
- self.assertIsNone(domains[0])
-
- self.expand(14)
- self.migrate(14)
- self.contract(14)
-
- domain_id = create_domain()
- idp_id = create_idp(domain_id)
- protocol_id = create_protocol(idp_id)
-
- # create user before expand to test data migration
- user_id_before_expand = create_user()
- create_federated_user(user_id_before_expand, idp_id, protocol_id)
- assertUserDomainIsNone(user_id_before_expand)
-
- self.expand(15)
- # create user before migrate to test insert trigger
- user_id_before_migrate = create_user()
- create_federated_user(user_id_before_migrate, idp_id, protocol_id)
- assertUserDomain(user_id_before_migrate, domain_id)
-
- self.migrate(15)
- # test insert trigger after migrate
- user_id = create_user()
- create_federated_user(user_id, idp_id, protocol_id)
- assertUserDomain(user_id, domain_id)
-
- self.contract(15)
- # test migrate updated the user.domain_id
- assertUserDomain(user_id_before_expand, domain_id)
-
- # verify that the user.domain_id is now not nullable
- user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
- self.assertFalse(user_table.c.domain_id.nullable)
-
- def test_migration_016_add_user_options(self):
- self.expand(15)
- self.migrate(15)
- self.contract(15)
-
- user_option = 'user_option'
- self.assertTableDoesNotExist(user_option)
- self.expand(16)
- self.migrate(16)
- self.contract(16)
- self.assertTableColumns(user_option,
- ['user_id', 'option_id', 'option_value'])
-
- def test_migration_024_add_created_expires_at_int_columns_password(self):
-
- self.expand(23)
- self.migrate(23)
- self.contract(23)
-
- password_table_name = 'password'
-
- self.assertTableColumns(
- password_table_name,
- ['id', 'local_user_id', 'password', 'password_hash', 'created_at',
- 'expires_at', 'self_service']
- )
-
- self.expand(24)
-
- self.assertTableColumns(
- password_table_name,
- ['id', 'local_user_id', 'password', 'password_hash', 'created_at',
- 'expires_at', 'created_at_int', 'expires_at_int', 'self_service']
- )
-
- # Create User and Local User
- project_table = sqlalchemy.Table('project', self.metadata,
- autoload=True)
- domain_data = {'id': '_domain', 'domain_id': '_domain',
- 'enabled': True, 'name': '_domain', 'is_domain': True}
- project_table.insert().values(domain_data).execute()
- user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
- user_id = uuid.uuid4().hex
- user = {'id': user_id, 'enabled': True, 'domain_id': domain_data['id']}
- user_table.insert().values(user).execute()
- local_user_table = sqlalchemy.Table('local_user', self.metadata,
- autoload=True)
- local_user = {
- 'id': 1, 'user_id': user_id, 'domain_id': user['domain_id'],
- 'name': 'name'}
-
- local_user_table.insert().values(local_user).execute()
-
- password_table = sqlalchemy.Table('password',
- self.metadata, autoload=True)
- password_data = {
- 'local_user_id': local_user['id'],
- 'created_at': datetime.datetime.utcnow(),
- 'expires_at': datetime.datetime.utcnow()}
- password_table.insert().values(password_data).execute()
-
- self.migrate(24)
- self.contract(24)
- passwords = list(password_table.select().execute())
-
- epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
-
- for p in passwords:
- c = (p.created_at.replace(tzinfo=pytz.UTC) - epoch).total_seconds()
- e = (p.expires_at.replace(tzinfo=pytz.UTC) - epoch).total_seconds()
- self.assertEqual(p.created_at_int, int(c * 1000000))
- self.assertEqual(p.expires_at_int, int(e * 1000000))
-
- # Test contract phase and ensure data can not be null
- self.contract(24)
- meta = sqlalchemy.MetaData(self.engine)
- pw_table = sqlalchemy.Table('password', meta, autoload=True)
- self.assertFalse(pw_table.c.created_at_int.nullable)
-
- def test_migration_30_expand_add_project_tags_table(self):
- self.expand(29)
- self.migrate(29)
- self.contract(29)
-
- table_name = 'project_tag'
- self.assertTableDoesNotExist(table_name)
-
- self.expand(30)
- self.migrate(30)
- self.contract(30)
-
- self.assertTableExists(table_name)
- self.assertTableColumns(
- table_name,
- ['project_id', 'name'])
-
- def test_migration_030_project_tags_works_correctly_after_migration(self):
- if self.engine.name == 'sqlite':
- self.skipTest('sqlite backend does not support foreign keys')
-
- self.expand(30)
- self.migrate(30)
- self.contract(30)
-
- project_table = sqlalchemy.Table(
- 'project', self.metadata, autoload=True)
- tag_table = sqlalchemy.Table(
- 'project_tag', self.metadata, autoload=True)
-
- session = self.sessionmaker()
- project_id = uuid.uuid4().hex
-
- project = {
- 'id': project_id,
- 'name': uuid.uuid4().hex,
- 'enabled': True,
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'is_domain': False
- }
-
- tag = {
- 'project_id': project_id,
- 'name': uuid.uuid4().hex
- }
-
- self.insert_dict(session, 'project', project)
- self.insert_dict(session, 'project_tag', tag)
-
- tags_query = session.query(tag_table).filter_by(
- project_id=project_id).all()
- self.assertThat(tags_query, matchers.HasLength(1))
-
- # Adding duplicate tags should cause error.
- self.assertRaises(db_exception.DBDuplicateEntry,
- self.insert_dict,
- session, 'project_tag', tag)
-
- session.execute(
- project_table.delete().where(project_table.c.id == project_id)
- )
-
- tags_query = session.query(tag_table).filter_by(
- project_id=project_id).all()
- self.assertThat(tags_query, matchers.HasLength(0))
-
- session.close()
-
- def test_migration_031_adds_system_assignment_table(self):
- self.expand(30)
- self.migrate(30)
- self.contract(30)
-
- system_assignment_table_name = 'system_assignment'
- self.assertTableDoesNotExist(system_assignment_table_name)
-
- self.expand(31)
- self.migrate(31)
- self.contract(31)
-
- self.assertTableExists(system_assignment_table_name)
- self.assertTableColumns(
- system_assignment_table_name,
- ['type', 'actor_id', 'target_id', 'role_id', 'inherited']
- )
-
- system_assignment_table = sqlalchemy.Table(
- system_assignment_table_name, self.metadata, autoload=True
- )
-
- system_user = {
- 'type': 'UserSystem',
- 'target_id': uuid.uuid4().hex,
- 'actor_id': uuid.uuid4().hex,
- 'role_id': uuid.uuid4().hex,
- 'inherited': False
- }
- system_assignment_table.insert().values(system_user).execute()
-
- system_group = {
- 'type': 'GroupSystem',
- 'target_id': uuid.uuid4().hex,
- 'actor_id': uuid.uuid4().hex,
- 'role_id': uuid.uuid4().hex,
- 'inherited': False
- }
- system_assignment_table.insert().values(system_group).execute()
-
- def test_migration_032_add_expires_at_int_column_trust(self):
-
- self.expand(31)
- self.migrate(31)
- self.contract(31)
-
- trust_table_name = 'trust'
-
- self.assertTableColumns(
- trust_table_name,
- ['id', 'trustor_user_id', 'trustee_user_id', 'project_id',
- 'impersonation', 'deleted_at', 'expires_at', 'remaining_uses',
- 'extra'],
- )
-
- self.expand(32)
-
- self.assertTableColumns(
- trust_table_name,
- ['id', 'trustor_user_id', 'trustee_user_id', 'project_id',
- 'impersonation', 'deleted_at', 'expires_at', 'expires_at_int',
- 'remaining_uses', 'extra'],
- )
-
- # Create Trust
- trust_table = sqlalchemy.Table('trust', self.metadata,
- autoload=True)
- trust_1_data = {
- 'id': uuid.uuid4().hex,
- 'trustor_user_id': uuid.uuid4().hex,
- 'trustee_user_id': uuid.uuid4().hex,
- 'project_id': uuid.uuid4().hex,
- 'impersonation': False,
- 'expires_at': datetime.datetime.utcnow()
- }
- trust_2_data = {
- 'id': uuid.uuid4().hex,
- 'trustor_user_id': uuid.uuid4().hex,
- 'trustee_user_id': uuid.uuid4().hex,
- 'project_id': uuid.uuid4().hex,
- 'impersonation': False,
- 'expires_at': None
- }
- trust_table.insert().values(trust_1_data).execute()
- trust_table.insert().values(trust_2_data).execute()
-
- self.migrate(32)
- self.contract(32)
- trusts = list(trust_table.select().execute())
-
- epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
-
- for t in trusts:
- if t.expires_at:
- e = t.expires_at.replace(tzinfo=pytz.UTC) - epoch
- e = e.total_seconds()
- self.assertEqual(t.expires_at_int, int(e * 1000000))
-
- def test_migration_033_adds_limits_table(self):
- self.expand(32)
- self.migrate(32)
- self.contract(32)
-
- registered_limit_table_name = 'registered_limit'
- limit_table_name = 'limit'
- self.assertTableDoesNotExist(registered_limit_table_name)
- self.assertTableDoesNotExist(limit_table_name)
-
- self.expand(33)
- self.migrate(33)
- self.contract(33)
-
- self.assertTableExists(registered_limit_table_name)
- self.assertTableColumns(
- registered_limit_table_name,
- ['id', 'service_id', 'resource_name', 'region_id', 'default_limit']
- )
- self.assertTableExists(limit_table_name)
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'resource_name', 'region_id',
- 'resource_limit']
- )
-
- session = self.sessionmaker()
- service_id = uuid.uuid4().hex
- service = {
- 'id': service_id,
- 'type': 'compute',
- 'enabled': True
- }
- region = {
- 'id': 'RegionOne',
- 'description': 'test'
- }
- project_id = uuid.uuid4().hex
- project = {
- 'id': project_id,
- 'name': 'nova',
- 'enabled': True,
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'is_domain': False
- }
- self.insert_dict(session, 'service', service)
- self.insert_dict(session, 'region', region)
- self.insert_dict(session, 'project', project)
-
- # Insert one registered limit
- registered_limit_table = sqlalchemy.Table(
- registered_limit_table_name, self.metadata, autoload=True)
- registered_limit = {
- 'id': uuid.uuid4().hex,
- 'service_id': service_id,
- 'region_id': 'RegionOne',
- 'resource_name': 'cores',
- 'default_limit': 10
- }
- registered_limit_table.insert().values(registered_limit).execute()
-
- # It will raise error if insert another one with same service_id,
- # region_id and resource name.
- registered_limit['id'] = uuid.uuid4().hex
- registered_limit['default_limit'] = 20
- self.assertRaises(db_exception.DBDuplicateEntry,
- registered_limit_table.insert().values(
- registered_limit).execute)
-
- # Insert one without region_id
- registered_limit_without_region = {
- 'id': uuid.uuid4().hex,
- 'service_id': service_id,
- 'resource_name': 'cores',
- 'default_limit': 10
- }
- registered_limit_table.insert().values(
- registered_limit_without_region).execute()
-
- # It will not raise error if insert another one with same service_id
- # and resource_name but the region_id is None. Because that
- # UniqueConstraint doesn't work if one of the columns is None. This
- # should be controlled at the Manager layer to forbid this behavior.
- registered_limit_without_region['id'] = uuid.uuid4().hex
- registered_limit_table.insert().values(
- registered_limit_without_region).execute()
-
- # Insert one limit
- limit_table = sqlalchemy.Table(
- limit_table_name, self.metadata, autoload=True)
- limit = {
- 'id': uuid.uuid4().hex,
- 'project_id': project_id,
- 'service_id': service_id,
- 'region_id': 'RegionOne',
- 'resource_name': 'cores',
- 'resource_limit': 5
- }
- limit_table.insert().values(limit).execute()
-
- # Insert another one with the same project_id, service_id, region_id
- # and resource_name, then raise error.
- limit['id'] = uuid.uuid4().hex
- limit['resource_limit'] = 10
- self.assertRaises(db_exception.DBDuplicateEntry,
- limit_table.insert().values(limit).execute)
-
- # Insert one without region_id
- limit_without_region = {
- 'id': uuid.uuid4().hex,
- 'project_id': project_id,
- 'service_id': service_id,
- 'resource_name': 'cores',
- 'resource_limit': 5
- }
- limit_table.insert().values(limit_without_region).execute()
-
- def test_migration_034_adds_application_credential_table(self):
- self.expand(33)
- self.migrate(33)
- self.contract(33)
-
- application_credential_table_name = 'application_credential'
- self.assertTableDoesNotExist(application_credential_table_name)
- application_credential_role_table_name = 'application_credential_role'
- self.assertTableDoesNotExist(application_credential_role_table_name)
-
- self.expand(34)
- self.migrate(34)
- self.contract(34)
-
- self.assertTableExists(application_credential_table_name)
- self.assertTableColumns(
- application_credential_table_name,
- ['internal_id', 'id', 'name', 'secret_hash',
- 'description', 'user_id', 'project_id', 'expires_at',
- 'allow_application_credential_creation']
- )
- if self.engine.name == 'mysql':
- self.assertTrue(self.does_index_exist(
- 'application_credential', 'duplicate_app_cred_constraint'))
- else:
- self.assertTrue(self.does_constraint_exist(
- 'application_credential', 'duplicate_app_cred_constraint'))
- self.assertTableExists(application_credential_role_table_name)
- self.assertTableColumns(
- application_credential_role_table_name,
- ['application_credential_id', 'role_id']
- )
-
- app_cred_table = sqlalchemy.Table(
- application_credential_table_name, self.metadata, autoload=True
- )
- app_cred_role_table = sqlalchemy.Table(
- application_credential_role_table_name,
- self.metadata, autoload=True
- )
- self.assertTrue(self.does_fk_exist('application_credential_role',
- 'application_credential_id'))
-
- expires_at = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
- epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
- expires_at_int = (expires_at - epoch).total_seconds()
- app_cred = {
- 'internal_id': 1,
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'secret_hash': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'project_id': uuid.uuid4().hex,
- 'expires_at': expires_at_int,
- 'allow_application_credential_creation': False
- }
- app_cred_table.insert().values(app_cred).execute()
-
- # Exercise unique constraint
- dup_app_cred = {
- 'internal_id': 2,
- 'id': uuid.uuid4().hex,
- 'name': app_cred['name'],
- 'secret_hash': uuid.uuid4().hex,
- 'user_id': app_cred['user_id'],
- 'project_id': uuid.uuid4().hex
- }
- insert = app_cred_table.insert().values(dup_app_cred)
- self.assertRaises(db_exception.DBDuplicateEntry,
- insert.execute)
-
- role_rel = {
- 'application_credential_id': app_cred['internal_id'],
- 'role_id': uuid.uuid4().hex
- }
- app_cred_role_table.insert().values(role_rel).execute()
-
- # Exercise role table primary keys
- insert = app_cred_role_table.insert().values(role_rel)
- self.assertRaises(db_exception.DBDuplicateEntry, insert.execute)
-
- def test_migration_035_add_system_column_to_credential_table(self):
- self.expand(34)
- self.migrate(34)
- self.contract(34)
-
- application_credential_table_name = 'application_credential'
- self.assertTableExists(application_credential_table_name)
- self.assertTableColumns(
- application_credential_table_name,
- ['internal_id', 'id', 'name', 'secret_hash',
- 'description', 'user_id', 'project_id', 'expires_at',
- 'allow_application_credential_creation']
- )
-
- self.expand(35)
- self.migrate(35)
- self.contract(35)
-
- self.assertTableColumns(
- application_credential_table_name,
- ['internal_id', 'id', 'name', 'secret_hash',
- 'description', 'user_id', 'project_id', 'system', 'expires_at',
- 'allow_application_credential_creation']
- )
-
- application_credential_table = sqlalchemy.Table(
- application_credential_table_name, self.metadata, autoload=True
- )
-
- # Test that we can insert an application credential without project_id
- # defined.
- expires_at = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
- epoch = datetime.datetime.fromtimestamp(0, tz=pytz.UTC)
- expires_at_int = (expires_at - epoch).total_seconds()
- app_cred = {
- 'internal_id': 1,
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'secret_hash': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'system': uuid.uuid4().hex,
- 'expires_at': expires_at_int,
- 'allow_application_credential_creation': False
- }
- application_credential_table.insert().values(app_cred).execute()
-
- # Test that we can insert an application credential with a project_id
- # and without system defined.
- app_cred = {
- 'internal_id': 2,
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'secret_hash': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'project_id': uuid.uuid4().hex,
- 'expires_at': expires_at_int,
- 'allow_application_credential_creation': False
- }
- application_credential_table.insert().values(app_cred).execute()
-
- # Test that we can create an application credential without a project
- # or a system defined. Technically, project_id and system should be
- # mutually exclusive, which will be handled by the application and not
- # the data layer.
- app_cred = {
- 'internal_id': 3,
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'secret_hash': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'expires_at': expires_at_int,
- 'allow_application_credential_creation': False
- }
- application_credential_table.insert().values(app_cred).execute()
-
- def test_migration_036_rename_application_credentials_column(self):
- self.expand(35)
- self.migrate(35)
- self.contract(35)
-
- application_credential_table_name = 'application_credential'
- application_credential_role_table_name = 'application_credential_role'
-
- self.expand(36)
- self.migrate(36)
- self.contract(36)
-
- self.assertTableColumns(
- application_credential_table_name,
- ['internal_id', 'id', 'name', 'secret_hash',
- 'description', 'user_id', 'project_id', 'system', 'expires_at',
- 'unrestricted']
- )
-
- application_credential_table = sqlalchemy.Table(
- application_credential_table_name, self.metadata, autoload=True
- )
- app_cred_role_table = sqlalchemy.Table(
- application_credential_role_table_name,
- self.metadata, autoload=True
- )
-
- # Test that the new column works
- app_cred = {
- 'internal_id': 1,
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'secret_hash': uuid.uuid4().hex,
- 'description': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'system': uuid.uuid4().hex,
- 'expires_at': None,
- 'unrestricted': False
- }
- application_credential_table.insert().values(app_cred).execute()
- role_rel = {
- 'application_credential_id': app_cred['internal_id'],
- 'role_id': uuid.uuid4().hex
- }
- app_cred_role_table.insert().values(role_rel).execute()
-
- def test_migration_037_remove_service_and_region_fk_for_registered_limit(
- self):
- self.expand(37)
- self.migrate(37)
- self.contract(37)
-
- registered_limit_table_name = 'registered_limit'
- registered_limit_table = sqlalchemy.Table(registered_limit_table_name,
- self.metadata, autoload=True)
- self.assertEqual(set([]), registered_limit_table.foreign_keys)
-
- def test_migration_045_add_description_to_limit(self):
-
- self.expand(44)
- self.migrate(44)
- self.contract(44)
-
- registered_limit_table_name = 'registered_limit'
- limit_table_name = 'limit'
-
- self.assertTableExists(registered_limit_table_name)
- self.assertTableExists(limit_table_name)
- self.assertTableColumns(
- registered_limit_table_name,
- ['id', 'service_id', 'region_id', 'resource_name', 'default_limit']
- )
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit']
- )
-
- self.expand(45)
- self.migrate(45)
- self.contract(45)
-
- registered_limit_table = sqlalchemy.Table(registered_limit_table_name,
- self.metadata, autoload=True)
- limit_table = sqlalchemy.Table(limit_table_name,
- self.metadata, autoload=True)
- self.assertTableColumns(
- registered_limit_table_name,
- ['id', 'service_id', 'region_id', 'resource_name', 'default_limit',
- 'description']
- )
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description']
- )
-
- session = self.sessionmaker()
- service_id = uuid.uuid4().hex
- service = {
- 'id': service_id,
- 'type': 'compute',
- 'enabled': True
- }
- region = {
- 'id': 'RegionOne',
- 'description': 'test'
- }
- project_id = uuid.uuid4().hex
- project = {
- 'id': project_id,
- 'name': 'nova',
- 'enabled': True,
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'is_domain': False
- }
- self.insert_dict(session, 'service', service)
- self.insert_dict(session, 'region', region)
- self.insert_dict(session, 'project', project)
-
- # with description
- registered_limit = {
- 'id': uuid.uuid4().hex,
- 'service_id': service_id,
- 'region_id': 'RegionOne',
- 'resource_name': 'cores',
- 'default_limit': 10,
- 'description': 'this is a description'
- }
- registered_limit_table.insert().values(registered_limit).execute()
-
- # without description
- limit = {
- 'id': uuid.uuid4().hex,
- 'project_id': project_id,
- 'service_id': service_id,
- 'region_id': 'RegionOne',
- 'resource_name': 'cores',
- 'resource_limit': 5
- }
- limit_table.insert().values(limit).execute()
-
- def test_migration_046_copies_data_from_password_to_password_hash(self):
- self.expand(46)
- self.migrate(45)
- self.contract(45)
- # Create User and Local User
- project_table = sqlalchemy.Table('project', self.metadata,
- autoload=True)
- domain_data = {'id': '_domain', 'domain_id': '_domain',
- 'enabled': True, 'name': '_domain', 'is_domain': True}
- project_table.insert().values(domain_data).execute()
- user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
- user_id = uuid.uuid4().hex
- user = {'id': user_id, 'enabled': True, 'domain_id': domain_data['id']}
- user_table.insert().values(user).execute()
- local_user_table = sqlalchemy.Table('local_user', self.metadata,
- autoload=True)
- local_user = {
- 'id': 1, 'user_id': user_id, 'domain_id': user['domain_id'],
- 'name': 'name'}
-
- local_user_table.insert().values(local_user).execute()
-
- password_table = sqlalchemy.Table('password',
- self.metadata, autoload=True)
- password_data = {
- 'local_user_id': local_user['id'],
- 'created_at': datetime.datetime.utcnow(),
- 'expires_at': datetime.datetime.utcnow(),
- 'password': uuid.uuid4().hex}
- password_data1 = {
- 'local_user_id': local_user['id'],
- 'created_at': datetime.datetime.utcnow(),
- 'expires_at': datetime.datetime.utcnow(),
- 'password_hash': uuid.uuid4().hex}
- password_data2 = {
- 'local_user_id': local_user['id'],
- 'created_at': datetime.datetime.utcnow(),
- 'expires_at': datetime.datetime.utcnow(),
- 'password': uuid.uuid4().hex,
- 'password_hash': uuid.uuid4().hex}
- password_table.insert().values(password_data).execute()
- password_table.insert().values(password_data1).execute()
- password_table.insert().values(password_data2).execute()
- self.migrate(46)
- passwords = list(password_table.select().execute())
- for p in passwords:
- if p.password == password_data['password']:
- self.assertEqual(p.password_hash, p.password)
- self.assertIsNotNone(p.password)
- self.assertIsNotNone(p.password_hash)
- elif p.password_hash == password_data1['password_hash']:
- self.assertIsNone(p.password)
- self.assertIsNotNone(p.password_hash)
- elif p.password_hash == password_data2['password_hash']:
- self.assertIsNotNone(p.password)
- self.assertIsNotNone(p.password_hash)
- self.assertNotEqual(p.password, p.password_hash)
- else:
- raise ValueError('Too Many Passwords Found')
-
- def test_migration_047_add_auto_increment_pk_column_to_unified_limit(self):
- self.expand(46)
- self.migrate(46)
- self.contract(46)
- registered_limit_table_name = 'registered_limit'
- limit_table_name = 'limit'
- self.assertTableColumns(
- registered_limit_table_name,
- ['id', 'service_id', 'region_id', 'resource_name', 'default_limit',
- 'description']
- )
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description']
- )
- self.assertTrue(self.does_pk_exist('registered_limit', 'id'))
- self.assertTrue(self.does_pk_exist('limit', 'id'))
- self.assertTrue(self.does_fk_exist('limit', 'project_id'))
-
- self.expand(47)
- self.migrate(47)
- self.contract(47)
- self.assertTableColumns(
- registered_limit_table_name,
- ['id', 'service_id', 'region_id', 'resource_name', 'default_limit',
- 'description', 'internal_id']
- )
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description', 'internal_id']
- )
- self.assertFalse(self.does_pk_exist('registered_limit', 'id'))
- self.assertTrue(self.does_pk_exist('registered_limit', 'internal_id'))
- self.assertFalse(self.does_pk_exist('limit', 'id'))
- self.assertTrue(self.does_pk_exist('limit', 'internal_id'))
- limit_table = sqlalchemy.Table(limit_table_name,
- self.metadata, autoload=True)
- self.assertEqual(set([]), limit_table.foreign_keys)
-
- def test_migration_048_add_registered_limit_id_column_for_limit(self):
- self.expand(47)
- self.migrate(47)
- self.contract(47)
-
- limit_table_name = 'limit'
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description', 'internal_id']
- )
-
- self.expand(48)
- self.migrate(48)
- self.contract(48)
-
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description', 'internal_id',
- 'registered_limit_id']
- )
- self.assertTrue(self.does_fk_exist('limit', 'registered_limit_id'))
-
- def test_migration_053_adds_description_to_role(self):
- self.expand(52)
- self.migrate(52)
- self.contract(52)
-
- role_table_name = 'role'
- self.assertTableColumns(
- role_table_name,
- ['id', 'name', 'domain_id', 'extra']
- )
-
- self.expand(53)
- self.migrate(53)
- self.contract(53)
-
- self.assertTableColumns(
- role_table_name,
- ['id', 'name', 'domain_id', 'extra', 'description']
- )
-
- role_table = sqlalchemy.Table(
- role_table_name, self.metadata, autoload=True
- )
-
- role = {
- 'id': uuid.uuid4().hex,
- 'name': "test",
- 'domain_id': resource_base.NULL_DOMAIN_ID,
- 'description': "This is a string"
- }
- role_table.insert().values(role).execute()
-
- role_without_description = {
- 'id': uuid.uuid4().hex,
- 'name': "test1",
- 'domain_id': resource_base.NULL_DOMAIN_ID
- }
- role_table.insert().values(role_without_description).execute()
-
- def test_migration_054_drop_old_password_column(self):
- self.expand(53)
- self.migrate(53)
- self.contract(53)
-
- password_table = 'password'
- self.assertTableColumns(
- password_table,
- ['id', 'local_user_id', 'password', 'password_hash',
- 'self_service', 'created_at_int', 'created_at', 'expires_at_int',
- 'expires_at']
- )
-
- self.expand(54)
- self.migrate(54)
- self.contract(54)
-
- self.assertTableColumns(
- password_table,
- ['id', 'local_user_id', 'password_hash', 'self_service',
- 'created_at_int', 'created_at', 'expires_at_int', 'expires_at']
- )
-
- def test_migration_055_add_domain_to_limit(self):
- self.expand(54)
- self.migrate(54)
- self.contract(54)
-
- limit_table_name = 'limit'
- limit_table = sqlalchemy.Table(limit_table_name, self.metadata,
- autoload=True)
- self.assertFalse(hasattr(limit_table.c, 'domain_id'))
-
- self.expand(55)
- self.migrate(55)
- self.contract(55)
-
- self.assertTableColumns(
- limit_table_name,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description', 'internal_id',
- 'registered_limit_id', 'domain_id'])
- self.assertTrue(limit_table.c.project_id.nullable)
-
- def test_migration_056_add_application_credential_access_rules(self):
- self.expand(55)
- self.migrate(55)
- self.contract(55)
-
- self.assertTableDoesNotExist('access_rule')
- self.assertTableDoesNotExist('application_credential_access_rule')
-
- self.expand(56)
- self.migrate(56)
- self.contract(56)
-
- self.assertTableExists('access_rule')
- self.assertTableExists('application_credential_access_rule')
- self.assertTableColumns(
- 'access_rule',
- ['id', 'service', 'path', 'method']
- )
- self.assertTableColumns(
- 'application_credential_access_rule',
- ['application_credential_id', 'access_rule_id']
- )
- self.assertTrue(self.does_fk_exist(
- 'application_credential_access_rule', 'application_credential_id'))
- self.assertTrue(self.does_fk_exist(
- 'application_credential_access_rule', 'access_rule_id'))
-
- app_cred_table = sqlalchemy.Table(
- 'application_credential', self.metadata, autoload=True
- )
- access_rule_table = sqlalchemy.Table(
- 'access_rule', self.metadata, autoload=True
- )
- app_cred_access_rule_table = sqlalchemy.Table(
- 'application_credential_access_rule',
- self.metadata, autoload=True
- )
- app_cred = {
- 'internal_id': 1,
- 'id': uuid.uuid4().hex,
- 'name': uuid.uuid4().hex,
- 'secret_hash': uuid.uuid4().hex,
- 'user_id': uuid.uuid4().hex,
- 'project_id': uuid.uuid4().hex
- }
- app_cred_table.insert().values(app_cred).execute()
- access_rule = {
- 'id': 1,
- 'service': uuid.uuid4().hex,
- 'path': '/v2.1/servers',
- 'method': 'GET'
- }
- access_rule_table.insert().values(access_rule).execute()
- app_cred_access_rule_rel = {
- 'application_credential_id': app_cred['internal_id'],
- 'access_rule_id': access_rule['id']
- }
- app_cred_access_rule_table.insert().values(
- app_cred_access_rule_rel).execute()
-
- def test_migration_062_add_trust_redelegation(self):
- # ensure initial schema
- self.expand(61)
- self.migrate(61)
- self.contract(61)
- self.assertTableColumns('trust', ['id',
- 'trustor_user_id',
- 'trustee_user_id',
- 'project_id',
- 'impersonation',
- 'expires_at',
- 'expires_at_int',
- 'remaining_uses',
- 'deleted_at',
- 'extra'])
-
- # fixture
- trust = {
- 'id': uuid.uuid4().hex,
- 'trustor_user_id': uuid.uuid4().hex,
- 'trustee_user_id': uuid.uuid4().hex,
- 'project_id': uuid.uuid4().hex,
- 'impersonation': True,
- 'expires_at': datetime.datetime.now(),
- 'remaining_uses': 10,
- 'deleted_at': datetime.datetime.now(),
- 'redelegated_trust_id': uuid.uuid4().hex,
- 'redelegation_count': 3,
- 'other': uuid.uuid4().hex
- }
- old_trust = trust.copy()
- old_extra = {
- 'redelegated_trust_id': old_trust.pop('redelegated_trust_id'),
- 'redelegation_count': old_trust.pop('redelegation_count'),
- 'other': old_trust.pop('other')
- }
- old_trust['extra'] = jsonutils.dumps(old_extra)
- # load fixture
- session = self.sessionmaker()
- self.insert_dict(session, 'trust', old_trust)
-
- # ensure redelegation data is in extra
- stored_trust = list(
- session.execute(self.load_table('trust').select())
- )[0]
- self.assertDictEqual({
- 'redelegated_trust_id': trust['redelegated_trust_id'],
- 'redelegation_count': trust['redelegation_count'],
- 'other': trust['other']},
- jsonutils.loads(stored_trust.extra))
-
- # upgrade and ensure expected schema
- self.expand(62)
- self.migrate(62)
- self.contract(62)
- self.assertTableColumns('trust', ['id',
- 'trustor_user_id',
- 'trustee_user_id',
- 'project_id',
- 'impersonation',
- 'expires_at',
- 'expires_at_int',
- 'remaining_uses',
- 'deleted_at',
- 'redelegated_trust_id',
- 'redelegation_count',
- 'extra'])
-
- trust_table = sqlalchemy.Table('trust', self.metadata, autoload=True)
- self.assertTrue(trust_table.c.redelegated_trust_id.nullable)
- self.assertTrue(trust_table.c.redelegation_count.nullable)
-
- # test target data layout
- upgraded_trust = list(
- session.execute(self.load_table('trust').select())
- )[0]
- self.assertDictEqual({'other': trust['other']},
- jsonutils.loads(upgraded_trust.extra))
- self.assertEqual(trust['redelegated_trust_id'],
- upgraded_trust.redelegated_trust_id)
- self.assertEqual(trust['redelegation_count'],
- upgraded_trust.redelegation_count)
-
- def test_migration_063_drop_limit_columns(self):
- self.expand(62)
- self.migrate(62)
- self.contract(62)
-
- limit_table = 'limit'
- self.assertTableColumns(
- limit_table,
- ['id', 'project_id', 'service_id', 'region_id', 'resource_name',
- 'resource_limit', 'description', 'internal_id',
- 'registered_limit_id', 'domain_id'])
-
- self.expand(63)
- self.migrate(63)
- self.contract(63)
-
- self.assertTableColumns(
- limit_table,
- ['id', 'project_id', 'resource_limit', 'description',
- 'internal_id', 'registered_limit_id', 'domain_id'])
-
- def test_migration_064_add_remote_id_attribute_federation_protocol(self):
- self.expand(63)
- self.migrate(63)
- self.contract(63)
-
- federation_protocol_table_name = 'federation_protocol'
- self.assertTableColumns(
- federation_protocol_table_name,
- ['id', 'idp_id', 'mapping_id']
- )
-
- self.expand(64)
- self.migrate(64)
- self.contract(64)
-
- self.assertTableColumns(
- federation_protocol_table_name,
- ['id', 'idp_id', 'mapping_id', 'remote_id_attribute']
+ self.expand(upgrades.INITIAL_VERSION + 1)
+ self.migrate(upgrades.INITIAL_VERSION + 1)
+ self.assertRaises(
+ db_exception.DBMigrationError,
+ self.contract,
+ upgrades.INITIAL_VERSION + 2,
)
- def test_migration_065_add_user_external_id_to_access_rule(self):
- self.expand(64)
- self.migrate(64)
- self.contract(64)
+ def test_migration_079_expand_update_local_id_limit(self):
+ self.expand(78)
+ self.migrate(78)
+ self.contract(78)
- self.assertTableColumns(
- 'access_rule',
- ['id', 'service', 'path', 'method']
- )
+ id_mapping_table = sqlalchemy.Table('id_mapping',
+ self.metadata, autoload=True)
+ # assert local_id column is a string of 64 characters (before)
+ self.assertEqual('VARCHAR(64)', str(id_mapping_table.c.local_id.type))
- self.expand(65)
- self.migrate(65)
- self.contract(65)
+ self.expand(79)
+ self.migrate(79)
+ self.contract(79)
- self.assertTableColumns(
- 'access_rule',
- ['id', 'external_id', 'user_id', 'service', 'path', 'method']
- )
- self.assertTrue(self.does_index_exist('access_rule', 'external_id'))
- self.assertTrue(self.does_index_exist('access_rule', 'user_id'))
- self.assertTrue(self.does_unique_constraint_exist(
- 'access_rule', 'external_id'))
- self.assertTrue(self.does_unique_constraint_exist(
- 'access_rule', ['user_id', 'service', 'path', 'method']))
-
- def test_migration_066_add_role_and_project_options_tables(self):
- self.expand(65)
- self.migrate(65)
- self.contract(65)
-
- role_option = 'role_option'
- project_option = 'project_option'
- self.assertTableDoesNotExist(role_option)
- self.assertTableDoesNotExist(project_option)
-
- self.expand(66)
- self.migrate(66)
- self.contract(66)
-
- self.assertTableColumns(
- project_option,
- ['project_id', 'option_id', 'option_value'])
-
- self.assertTableColumns(
- role_option,
- ['role_id', 'option_id', 'option_value'])
-
- def test_migration_072_drop_domain_id_fk(self):
- self.expand(71)
- self.migrate(71)
- self.contract(71)
-
- self.assertTrue(self.does_fk_exist('user', 'domain_id'))
- self.assertTrue(self.does_fk_exist('identity_provider', 'domain_id'))
-
- self.expand(72)
- self.migrate(72)
- self.contract(72)
-
- self.assertFalse(self.does_fk_exist('user', 'domain_id'))
- self.assertFalse(self.does_fk_exist('identity_provider', 'domain_id'))
-
- def test_migration_073_contract_expiring_group_membership(self):
- self.expand(72)
- self.migrate(72)
- self.contract(72)
-
- membership_table = 'expiring_user_group_membership'
- self.assertTableDoesNotExist(membership_table)
-
- idp_table = 'identity_provider'
- self.assertTableColumns(
- idp_table,
- ['id', 'domain_id', 'enabled', 'description'])
-
- self.expand(73)
- self.migrate(73)
- self.contract(73)
-
- self.assertTableColumns(
- membership_table,
- ['user_id', 'group_id', 'idp_id', 'last_verified'])
- self.assertTableColumns(
- idp_table,
- ['id', 'domain_id', 'enabled', 'description',
- 'authorization_ttl'])
+ id_mapping_table = sqlalchemy.Table('id_mapping',
+ self.metadata, autoload=True)
+ # assert local_id column is a string of 255 characters (after)
+ self.assertEqual('VARCHAR(255)', str(id_mapping_table.c.local_id.type))
class MySQLOpportunisticFullMigration(FullMigration):
FIXTURE = db_fixtures.MySQLOpportunisticFixture
- def test_migration_003_migrate_unencrypted_credentials(self):
- self.skip_test_overrides('skipped to update u-c for PyMySql version'
- 'to 0.10.0')
-
- def test_migration_012_add_domain_id_to_idp(self):
- self.skip_test_overrides('skipped to update u-c for PyMySql version'
- 'to 0.10.0')
-
class PostgreSQLOpportunisticFullMigration(FullMigration):
FIXTURE = db_fixtures.PostgresqlOpportunisticFixture
diff --git a/keystone/tests/unit/test_v3.py b/keystone/tests/unit/test_v3.py
index 7d6c6b11f..951a8f83f 100644
--- a/keystone/tests/unit/test_v3.py
+++ b/keystone/tests/unit/test_v3.py
@@ -1101,7 +1101,7 @@ class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
if ref:
links = ref.pop('links')
try:
- self.assertDictContainsSubset(ref, entity)
+ self.assertLessEqual(ref.items(), entity.items())
self.assertIn(links['assignment'],
entity['links']['assignment'])
finally:
diff --git a/keystone/tests/unit/test_v3_assignment.py b/keystone/tests/unit/test_v3_assignment.py
index d9e438991..605db5029 100644
--- a/keystone/tests/unit/test_v3_assignment.py
+++ b/keystone/tests/unit/test_v3_assignment.py
@@ -2866,7 +2866,7 @@ class ImpliedRolesTests(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin,
actual_implied_ids = [implied['id']
for implied in role_inference['implies']]
- self.assertItemsEqual(expected_implied_ids, actual_implied_ids)
+ self.assertCountEqual(expected_implied_ids, actual_implied_ids)
self.assertIsNotNone(role_inference['prior_role']['links']['self'])
for implied in role_inference['implies']:
diff --git a/keystone/tests/unit/test_v3_federation.py b/keystone/tests/unit/test_v3_federation.py
index b0cd5581b..4f09ea9d5 100644
--- a/keystone/tests/unit/test_v3_federation.py
+++ b/keystone/tests/unit/test_v3_federation.py
@@ -1304,7 +1304,7 @@ class FederatedIdentityProviderTests(test_v3.RestfulTestCase):
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
- self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
+ self.assertCountEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by ID.
url = self.base_url() + '?id=' + idp1_id
@@ -1333,7 +1333,7 @@ class FederatedIdentityProviderTests(test_v3.RestfulTestCase):
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = [e['id'] for e in entities]
- self.assertItemsEqual(entities_ids, [idp1_id, idp2_id])
+ self.assertCountEqual(entities_ids, [idp1_id, idp2_id])
# filter the IdP by 'enabled'.
url = self.base_url() + '?enabled=True'
@@ -3332,7 +3332,7 @@ class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
fed_projects = r.result['projects']
# compare
- self.assertItemsEqual(auth_projects, fed_projects)
+ self.assertCountEqual(auth_projects, fed_projects)
def test_auth_projects_matches_federation_projects_with_group_assign(self):
# create project, role, group
@@ -3367,7 +3367,7 @@ class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
fed_projects = r.result['projects']
# compare
- self.assertItemsEqual(auth_projects, fed_projects)
+ self.assertCountEqual(auth_projects, fed_projects)
def test_auth_domains_matches_federation_domains(self):
# create domain and role
@@ -3393,7 +3393,7 @@ class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
fed_domains = r.result['domains']
# compare
- self.assertItemsEqual(auth_domains, fed_domains)
+ self.assertCountEqual(auth_domains, fed_domains)
def test_auth_domains_matches_federation_domains_with_group_assign(self):
# create role, group, and domain
@@ -3427,7 +3427,7 @@ class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin):
fed_domains = r.result['domains']
# compare
- self.assertItemsEqual(auth_domains, fed_domains)
+ self.assertCountEqual(auth_domains, fed_domains)
def test_list_head_domains_for_user_duplicates(self):
# create role
diff --git a/keystone/trust/backends/base.py b/keystone/trust/backends/base.py
index f37b62f5d..314eace87 100644
--- a/keystone/trust/backends/base.py
+++ b/keystone/trust/backends/base.py
@@ -48,7 +48,7 @@ class TrustDriverBase(object, metaclass=abc.ABCMeta):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
- def list_trusts_for_trustor(self, trustor):
+ def list_trusts_for_trustor(self, trustor, redelegated_trust_id=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
diff --git a/keystone/trust/backends/sql.py b/keystone/trust/backends/sql.py
index 8bb067423..cb4e79737 100644
--- a/keystone/trust/backends/sql.py
+++ b/keystone/trust/backends/sql.py
@@ -171,11 +171,15 @@ class Trust(base.TrustDriverBase):
filter_by(trustee_user_id=trustee_user_id))
return [trust_ref.to_dict() for trust_ref in trusts]
- def list_trusts_for_trustor(self, trustor_user_id):
+ def list_trusts_for_trustor(self, trustor_user_id,
+ redelegated_trust_id=None):
with sql.session_for_read() as session:
trusts = (session.query(TrustModel).
filter_by(deleted_at=None).
filter_by(trustor_user_id=trustor_user_id))
+ if redelegated_trust_id:
+ trusts = trusts.filter_by(
+ redelegated_trust_id=redelegated_trust_id)
return [trust_ref.to_dict() for trust_ref in trusts]
@sql.handle_conflicts(conflict_type='trust')
diff --git a/keystone/trust/core.py b/keystone/trust/core.py
index a4d91da49..9afa22465 100644
--- a/keystone/trust/core.py
+++ b/keystone/trust/core.py
@@ -194,17 +194,17 @@ class Manager(manager.Manager):
"""
trust = self.driver.get_trust(trust_id)
trusts = self.driver.list_trusts_for_trustor(
- trust['trustor_user_id'])
+ trust['trustor_user_id'],
+ redelegated_trust_id=trust_id)
for t in trusts:
- if t.get('redelegated_trust_id') == trust_id:
- # recursive call to make sure all notifications are sent
- try:
- self.delete_trust(t['id'])
- except exception.TrustNotFound: # nosec
- # if trust was deleted by concurrent process
- # consistency must not suffer
- pass
+ # recursive call to make sure all notifications are sent
+ try:
+ self.delete_trust(t['id'])
+ except exception.TrustNotFound: # nosec
+ # if trust was deleted by concurrent process
+ # consistency must not suffer
+ pass
# end recursion
self.driver.delete_trust(trust_id)
diff --git a/lower-constraints.txt b/lower-constraints.txt
deleted file mode 100644
index 431589d02..000000000
--- a/lower-constraints.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-amqp==5.0.0
-Babel==2.3.4
-bashate==0.5.1
-bcrypt==3.1.3
-coverage==4.0
-cryptography==2.7
-docutils==0.14
-dogpile.cache==1.0.2
-fixtures==3.0.0
-flake8-docstrings==0.2.1.post1
-flake8==2.6.0
-Flask===1.0.2
-Flask-RESTful===0.3.5
-freezegun==0.3.6
-hacking==1.1.0
-iso8601==0.1.12
-jsonschema==3.2.0
-keystoneauth1==3.4.0
-keystonemiddleware==7.0.0
-ldappool===2.3.1
-lxml==4.5.0
-mock==2.0.0
-msgpack==0.5.0
-oauthlib==0.6.2
-os-api-ref==1.4.0
-oslo.cache==1.26.0
-oslo.concurrency==3.26.0
-oslo.config==6.8.0
-oslo.context==2.22.0
-oslo.db==6.0.0
-oslo.i18n==3.15.3
-oslo.log==3.44.0
-oslo.messaging==5.29.0
-oslo.middleware==3.31.0
-oslo.policy==3.6.0
-oslo.serialization==2.18.0
-oslo.upgradecheck==1.3.0
-oslo.utils==3.33.0
-oslotest==3.2.0
-osprofiler==1.4.0
-passlib==1.7.0
-pbr==2.0.0
-pep257==0.7.0
-pika==0.10.0
-pycadf==1.1.0
-pycodestyle==2.0.0
-python-ldap===3.0.0
-pymongo===3.0.2
-pysaml2==5.0.0
-PyJWT==1.6.1
-PyMySQL==0.8.0
-python-keystoneclient==3.8.0
-python-memcached===1.56
-pytz==2013.6
-requests==2.14.2
-scrypt==0.8.0
-six==1.10.0
-sqlalchemy-migrate==0.13.0
-SQLAlchemy==1.3.0
-stestr==1.0.0
-stevedore==1.20.0
-tempest==17.1.0
-testtools==2.2.0
-urllib3==1.22
-vine==1.3.0
-WebOb==1.7.1
-WebTest==2.0.27
-Werkzeug==0.14.1
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 000000000..c8f042dba
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,4 @@
+- hosts: all
+ tasks:
+ - include_role:
+ name: enable-fips
diff --git a/releasenotes/notes/bug-1688137-e4203c9a728690a7.yaml b/releasenotes/notes/bug-1688137-e4203c9a728690a7.yaml
new file mode 100644
index 000000000..bd7a06069
--- /dev/null
+++ b/releasenotes/notes/bug-1688137-e4203c9a728690a7.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ [`bug 1688137 <https://bugs.launchpad.net/keystone/+bug/1688137>`_]
+ Fixed the AccountLocked exception being shown to the end user since
+ it provides some information that could be exploited by a
+ malicious user. The end user will now see Unauthorized instead of
+ AccountLocked, preventing user info oracle exploitation.
diff --git a/releasenotes/notes/bug-1897280-e7065c4368a325ad.yaml b/releasenotes/notes/bug-1897280-e7065c4368a325ad.yaml
new file mode 100644
index 000000000..b6a6163b1
--- /dev/null
+++ b/releasenotes/notes/bug-1897280-e7065c4368a325ad.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ [ `Bug 1897230 <https://launchpad.net/bugs/1897280>`_]
+ Allows s3 tokens with service types sts and iam to authenticate. This
+ is necessary when using assumed role features of Ceph object storage and
+ keystone is providing the authentication service for Rados Gateway.
diff --git a/releasenotes/notes/bug-1929066-6e741c9182620a37.yaml b/releasenotes/notes/bug-1929066-6e741c9182620a37.yaml
new file mode 100644
index 000000000..0acd1abc9
--- /dev/null
+++ b/releasenotes/notes/bug-1929066-6e741c9182620a37.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ [`bug 1929066 <https://bugs.launchpad.net/keystone/+bug/1929066>`_]
+ Increase the length of the `local_id` column in the `id_mapping` table
+ to accommodate LDAP group names that result in names greater than
+ 64 characters.
diff --git a/releasenotes/notes/bug-1941020-f694395a9bcea72f.yaml b/releasenotes/notes/bug-1941020-f694395a9bcea72f.yaml
new file mode 100644
index 000000000..179455225
--- /dev/null
+++ b/releasenotes/notes/bug-1941020-f694395a9bcea72f.yaml
@@ -0,0 +1,11 @@
+---
+deprecations:
+ - |
+ The following options in the ``[memcache]`` section have been deprecated
+ because these options have had no effect since Pike. Please use
+ ``memcache_*`` options in the ``[cache]`` section instead.
+
+ - ``dead_retry``
+ - ``pool_maxsize``
+ - ``pool_unused_timeout``
+ - ``pool_connection_get_timeout``
diff --git a/releasenotes/notes/change_min_pool_retry_max-f5e7c8d315401426.yaml b/releasenotes/notes/change_min_pool_retry_max-f5e7c8d315401426.yaml
new file mode 100644
index 000000000..44109b144
--- /dev/null
+++ b/releasenotes/notes/change_min_pool_retry_max-f5e7c8d315401426.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Change the min value of pool_retry_max to 1. Setting this value to 0
+ caused the pool to fail before connecting to ldap, always raising
+ MaxConnectionReachedError.
diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml
new file mode 100644
index 000000000..db420d739
--- /dev/null
+++ b/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Python 3.6 & 3.7 support has been dropped. The minimum version of Python now
+ supported is Python 3.8.
diff --git a/releasenotes/notes/remove-db_sync-extension-opt-2ab1f29340281215.yaml b/releasenotes/notes/remove-db_sync-extension-opt-2ab1f29340281215.yaml
new file mode 100644
index 000000000..249a16ec3
--- /dev/null
+++ b/releasenotes/notes/remove-db_sync-extension-opt-2ab1f29340281215.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The ``--extension`` option of ``keystone-manage db_sync`` has been
+ deprecated since 10.0.0 (Newton) and raised an error when provided. It
+ has now been removed entirely.
diff --git a/releasenotes/notes/remove-legacy-migrations-647f60019c8dd9e8.yaml b/releasenotes/notes/remove-legacy-migrations-647f60019c8dd9e8.yaml
new file mode 100644
index 000000000..11f33886b
--- /dev/null
+++ b/releasenotes/notes/remove-legacy-migrations-647f60019c8dd9e8.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ The legacy migrations that existed before the split into separate expand
+ schema, contract schema, and data migration migration have now been
+ removed. These have been deprecated since 10.0.0 (Newton). This should
+ have no user-facing impact.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 99a138ab0..620554687 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -19,6 +19,10 @@
:maxdepth: 1
unreleased
+ yoga
+ xena
+ wallaby
+ victoria
ussuri
train
stein
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 600d9e0b0..713bd7089 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,15 +1,16 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Keystone Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2021-01-08 19:54+0000\n"
+"POT-Creation-Date: 2022-07-01 18:09+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-12-19 01:35+0000\n"
+"PO-Revision-Date: 2022-06-20 11:10+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -81,6 +82,9 @@ msgstr "13.0.2"
msgid "13.0.3"
msgstr "13.0.3"
+msgid "13.0.4-9"
+msgstr "13.0.4-9"
+
msgid "14.0.0"
msgstr "14.0.0"
@@ -93,8 +97,8 @@ msgstr "14.1.0"
msgid "14.2.0"
msgstr "14.2.0"
-msgid "14.2.0-4"
-msgstr "14.2.0-4"
+msgid "14.2.0-7"
+msgstr "14.2.0-7"
msgid "15.0.0"
msgstr "15.0.0"
@@ -102,17 +106,41 @@ msgstr "15.0.0"
msgid "15.0.1"
msgstr "15.0.1"
+msgid "15.0.1-9"
+msgstr "15.0.1-9"
+
msgid "16.0.0"
msgstr "16.0.0"
msgid "16.0.1"
msgstr "16.0.1"
+msgid "16.0.2"
+msgstr "16.0.2"
+
msgid "17.0.0"
msgstr "17.0.0"
-msgid "17.0.0-6"
-msgstr "17.0.0-6"
+msgid "17.0.1"
+msgstr "17.0.1"
+
+msgid "18.0.0"
+msgstr "18.0.0"
+
+msgid "18.1.0"
+msgstr "18.1.0"
+
+msgid "19.0.0"
+msgstr "19.0.0"
+
+msgid "19.0.0-8"
+msgstr "19.0.0-8"
+
+msgid "20.0.0"
+msgstr "20.0.0"
+
+msgid "21.0.0"
+msgstr "21.0.0"
msgid "8.0.1"
msgstr "8.0.1"
@@ -297,6 +325,15 @@ msgstr ""
"Certain variables in ``keystone.conf`` now have options, which determine if "
"the user's setting is valid."
+msgid ""
+"Change the min value of pool_retry_max to 1. Setting this value to 0 caused "
+"the pool to fail before connecting to ldap, always raising "
+"MaxConnectionReachedError."
+msgstr ""
+"Change the min value of pool_retry_max to 1. Setting this value to 0 caused "
+"the pool to fail before connecting to ldap, always raising "
+"MaxConnectionReachedError."
+
msgid "Configuring per-Identity Provider WebSSO is now supported."
msgstr "Configuring per-Identity Provider WebSSO is now supported."
@@ -463,6 +500,24 @@ msgstr ""
"this option is set back to `False`."
msgid ""
+"If you are affected by this bug, a fix in the keystone database will be "
+"needed so we recommend to dump the users' tables before doing this process:"
+msgstr ""
+"If you are affected by this bug, a fix in the keystone database will be "
+"needed so we recommend to dump the users' tables before doing this process:"
+
+msgid ""
+"If you are affected by this bug, you must remove stale role assignments "
+"manually. The following is an example SQL statement you can use to fix the "
+"issue, but you should verify it's applicability to your deployment's SQL "
+"implementation and version."
+msgstr ""
+"If you are affected by this bug, you must remove stale role assignments "
+"manually. The following is an example SQL statement you can use to fix the "
+"issue, but you should verify it's applicability to your deployment's SQL "
+"implementation and version."
+
+msgid ""
"In ``keystone-paste.ini``, using ``paste.filter_factory`` is deprecated in "
"favor of the \"use\" directive, specifying an entrypoint."
msgstr ""
@@ -681,6 +736,9 @@ msgstr ""
msgid "Queens Series Release Notes"
msgstr "Queens Series Release Notes"
+msgid "Rocky Series Release Notes"
+msgstr "Rocky Series Release Notes"
+
msgid ""
"Routes and SQL backends for the contrib extensions have been removed, they "
"have been incorporated into keystone and are no longer optional. This "
@@ -772,6 +830,9 @@ msgstr ""
"``validate_token(self, token_ref)``. If using a custom token provider, "
"update the custom provider accordingly."
+msgid "Stein Series Release Notes"
+msgstr "Stein Series Release Notes"
+
msgid ""
"Support for writing to LDAP has been removed. See ``Other Notes`` for more "
"details."
@@ -1398,6 +1459,9 @@ msgstr ""
msgid "Tokens can now be cached when issued."
msgstr "Tokens can now be cached when issued."
+msgid "Train Series Release Notes"
+msgstr "Train Series Release Notes"
+
msgid ""
"UUID token provider ``[token] provider=uuid`` has been deprecated in favor "
"of Fernet tokens ``[token] provider=fernet``. With Fernet tokens becoming "
@@ -1433,6 +1497,15 @@ msgstr ""
"Using the full path to the driver class is deprecated in favour of using the "
"entrypoint. In the Mitaka release, the entrypoint must be used."
+msgid "Ussuri Series Release Notes"
+msgstr "Ussuri Series Release Notes"
+
+msgid "Victoria Series Release Notes"
+msgstr "Victoria Series Release Notes"
+
+msgid "Wallaby Series Release Notes"
+msgstr "Wallaby Series Release Notes"
+
msgid ""
"We have added the ``password_expires_at`` attribute to the user response "
"object."
@@ -1454,6 +1527,12 @@ msgstr ""
"Write support for the LDAP has been removed in favour of read-only support. "
"The following operations are no longer supported for LDAP:"
+msgid "Xena Series Release Notes"
+msgstr "Xena Series Release Notes"
+
+msgid "Yoga Series Release Notes"
+msgstr "Yoga Series Release Notes"
+
msgid ""
"[`Bug 1645487 <https://bugs.launchpad.net/keystone/+bug/1645487>`_] Added a "
"new PCI-DSS feature that will require users to immediately change their "
@@ -1792,6 +1871,13 @@ msgstr "lt - password expires before the timestamp"
msgid "lte - password expires at or before timestamp"
msgstr "lte - password expires at or before timestamp"
+msgid ""
+"mysqldump -h <mysql host> -p -P <mysql port> -u keystone keystone "
+"federated_user local_user user > user_tables.sql"
+msgstr ""
+"mysqldump -h <mysql host> -p -P <mysql port> -u keystone keystone "
+"federated_user local_user user > user_tables.sql"
+
msgid "neq - password expires not at the timestamp"
msgstr "neq - password expires not at the timestamp"
diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst
new file mode 100644
index 000000000..4efc7b6f3
--- /dev/null
+++ b/releasenotes/source/victoria.rst
@@ -0,0 +1,6 @@
+=============================
+Victoria Series Release Notes
+=============================
+
+.. release-notes::
+ :branch: stable/victoria
diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst
new file mode 100644
index 000000000..d77b56599
--- /dev/null
+++ b/releasenotes/source/wallaby.rst
@@ -0,0 +1,6 @@
+============================
+Wallaby Series Release Notes
+============================
+
+.. release-notes::
+ :branch: stable/wallaby
diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst
new file mode 100644
index 000000000..1be85be3e
--- /dev/null
+++ b/releasenotes/source/xena.rst
@@ -0,0 +1,6 @@
+=========================
+Xena Series Release Notes
+=========================
+
+.. release-notes::
+ :branch: stable/xena
diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst
new file mode 100644
index 000000000..7cd5e908a
--- /dev/null
+++ b/releasenotes/source/yoga.rst
@@ -0,0 +1,6 @@
+=========================
+Yoga Series Release Notes
+=========================
+
+.. release-notes::
+ :branch: stable/yoga
diff --git a/requirements.txt b/requirements.txt
index 3464fb3fc..5688af2ff 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,7 @@
+# Requirements lower bounds listed here are our best effort to keep them up to
+# date but we do not test them so no guarantee of having them all correct. If
+# you find any incorrect lower bounds, let us know or propose a fix.
+
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
@@ -23,7 +27,7 @@ oslo.db>=6.0.0 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0
oslo.log>=3.44.0 # Apache-2.0
oslo.middleware>=3.31.0 # Apache-2.0
-oslo.policy>=3.6.0 # Apache-2.0
+oslo.policy>=3.10.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.upgradecheck>=1.3.0 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 0155d201d..be6b602f7 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,12 +1,12 @@
[metadata]
name = keystone
summary = OpenStack Identity
-description-file =
+description_file =
README.rst
author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = https://docs.openstack.org/keystone/latest
-python-requires = >=3.6
+author_email = openstack-discuss@lists.openstack.org
+home_page = https://docs.openstack.org/keystone/latest
+python_requires = >=3.8
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -17,8 +17,8 @@ classifier =
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.6
- Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
[files]
data_files =
diff --git a/tools/generate-schemas b/tools/generate-schemas
new file mode 100755
index 000000000..59fbb226c
--- /dev/null
+++ b/tools/generate-schemas
@@ -0,0 +1,134 @@
+#!/usr/bin/env bash
+#
+# Script to generate schemas for the various versions.
+#
+# Some setup is required, similar to the opportunistic tests.
+#
+# MySQL ->
+#
+# $ mysql -uroot
+# MariaDB [(none)]> CREATE DATABASE keystone
+# MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'password';
+# MariaDB [(none)]> quit;
+#
+# Postgres ->
+#
+# $ sudo -u postgres psql
+# postgres=# create user keystone with createdb login password 'password';
+# postgres=# create database keystone with owner keystone;
+# postgres=# quit;
+#
+# Note that you may also have to configure 'pg_hba.conf' to use password-based
+# auth instead of "ident", if you haven't done so already. You can locate this
+# with 'locate pg_hba.conf'. More details at
+# https://ubuntu.com/server/docs/databases-postgresql
+
+set -o xtrace
+set -e
+
+source .tox/py38/bin/activate
+
+INIT_VERSION=$(ls -1 keystone/common/sql/legacy_migrations/expand_repo/versions/ | head -1 | awk -F_ '{print $1}' | sed 's/^0*//')
+INIT_VERSION=$(($INIT_VERSION-1))
+
+echo "Detected init version of $INIT_VERSION"
+
+mkdir -p /tmp/keystone-schemas
+rm -f "/tmp/keystone-schemas/$INIT_VERSION-*.sql"
+
+#
+# functions
+#
+
+function sync () {
+ DB_URL=$1
+
+ python keystone/common/sql/legacy_migrations/expand_repo/manage.py version_control \
+ --database "$DB_URL" \
+ --version "$INIT_VERSION" \
+ --repository keystone/common/sql/legacy_migrations/expand_repo/
+ python keystone/common/sql/legacy_migrations/data_migration_repo/manage.py version_control \
+ --database "$DB_URL" \
+ --version "$INIT_VERSION" \
+ --repository keystone/common/sql/legacy_migrations/data_migration_repo/
+ python keystone/common/sql/legacy_migrations/contract_repo/manage.py version_control \
+ --database "$DB_URL" \
+ --version "$INIT_VERSION" \
+ --repository keystone/common/sql/legacy_migrations/contract_repo/
+
+ python keystone/common/sql/legacy_migrations/expand_repo/manage.py upgrade \
+ --database "$DB_URL" \
+ --repository keystone/common/sql/legacy_migrations/expand_repo/
+ python keystone/common/sql/legacy_migrations/data_migration_repo/manage.py upgrade \
+ --database "$DB_URL" \
+ --repository keystone/common/sql/legacy_migrations/data_migration_repo/
+ python keystone/common/sql/legacy_migrations/contract_repo/manage.py upgrade \
+ --database "$DB_URL" \
+ --repository keystone/common/sql/legacy_migrations/contract_repo/
+}
+
+#
+# sqlite
+#
+
+# cleanup from previous runs
+
+rm -f /tmp/keystone.db
+
+# sync schema
+
+sync 'sqlite:////tmp/keystone.db'
+
+# dump the schema
+
+sqlite3 /tmp/keystone.db << EOF
+.output "/tmp/keystone-schemas/${INIT_VERSION}-sqlite.sql"
+.schema
+.quit
+EOF
+
+rm -f /tmp/keystone.db
+
+#
+# mysql
+#
+
+# cleanup from previous runs
+
+mysql -u keystone -ppassword << EOF
+DROP DATABASE IF EXISTS keystone;
+CREATE DATABASE keystone;
+EOF
+
+# sync schema
+
+sync 'mysql+pymysql://keystone:password@localhost/keystone'
+
+# dump the schema
+
+mysqldump --no-data --skip-comments -u keystone -ppassword \
+ keystone > "/tmp/keystone-schemas/${INIT_VERSION}-mysql.sql"
+
+mysql -u keystone -ppassword << EOF
+DROP DATABASE IF EXISTS keystone;
+EOF
+
+#
+# postgres
+#
+
+# cleanup from previous runs
+
+sudo -u postgres dropdb --if-exists keystone
+sudo -u postgres createdb --owner=keystone keystone
+
+# sync to initial version
+
+sync 'postgresql://keystone:password@localhost/keystone'
+
+# dump the schema
+
+pg_dump postgresql://keystone:password@localhost/keystone \
+ --schema-only > "/tmp/keystone-schemas/${INIT_VERSION}-postgres.sql"
+
+sudo -u postgres dropdb --if-exists keystone
diff --git a/tox.ini b/tox.ini
index a0ef4070d..b1b1fad4c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,34 +1,29 @@
[tox]
-minversion = 3.2.0
-skipsdist = True
-envlist = py37,pep8,api-ref,docs,genconfig,genpolicy,releasenotes,protection
+minversion = 3.18.0
+envlist = py39,pep8,api-ref,docs,genconfig,genpolicy,releasenotes,protection
+ignore_basepython_conflict = true
[testenv]
-usedevelop = True
basepython = python3
-setenv = VIRTUAL_ENV={envdir}
-deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt
- .[ldap,memcache,mongodb]
+usedevelop = True
+setenv =
+ PYTHONDONTWRITEBYTECODE=1
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/test-requirements.txt
+ .[ldap,memcache,mongodb]
commands =
find keystone -type f -name "*.pyc" -delete
stestr run {posargs}
-whitelist_externals =
+allowlist_externals =
bash
find
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY PBR_VERSION
-[testenv:api-ref]
-deps = -r{toxinidir}/doc/requirements.txt
-commands =
- bash -c "rm -rf api-ref/build"
- sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html
-
[testenv:pep8]
deps =
- .[bandit]
- {[testenv]deps}
+ .[bandit]
+ {[testenv]deps}
commands =
flake8 --ignore=D100,D101,D102,D103,D104,E305,E402,W503,W504,W605
# Run bash8 during pep8 runs to ensure violations are caught by
@@ -39,6 +34,7 @@ commands =
[testenv:fast8]
envdir = {toxworkdir}/pep8
+deps = {[testenv:pep8]deps}
commands =
{toxinidir}/tools/fast8.sh
passenv = FAST8_NUM_COMMITS
@@ -46,9 +42,10 @@ passenv = FAST8_NUM_COMMITS
[testenv:bandit]
# NOTE(browne): This is required for the integration test job of the bandit
# project. Please do not remove.
-deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
- .[bandit]
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/requirements.txt
+ .[bandit]
commands = bandit -r keystone -x 'keystone/tests/*'
[testenv:cover]
@@ -87,7 +84,9 @@ passenv =
KSTEST_PROJECT_ID
[testenv:functional]
-deps = -r{toxinidir}/test-requirements.txt
+deps =
+ -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/test-requirements.txt
setenv = OS_TEST_PATH=./keystone/tests/functional
commands =
find keystone -type f -name "*.pyc" -delete
@@ -108,7 +107,6 @@ passenv =
filename= *.py,keystone-manage
show-source = true
enable-extensions = H203,H904
-
# D100: Missing docstring in public module
# D101: Missing docstring in public class
# D102: Missing docstring in public method
@@ -120,7 +118,6 @@ enable-extensions = H203,H904
# W503: line break before binary operator
# W504 line break after binary operator
ignore = D100,D101,D102,D103,D104,D203,E402,W503,W504
-
exclude=.venv,.git,.tox,build,dist,*lib/python*,*egg,tools,vendor,.update-venv,*.ini,*.po,*.pot
max-complexity=24
@@ -143,7 +140,7 @@ commands=
[testenv:pdf-docs]
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
make
mkdir
rm
@@ -154,9 +151,17 @@ commands =
make -C doc/build/pdf
[testenv:releasenotes]
-deps = -r{toxinidir}/doc/requirements.txt
+envdir = {toxworkdir}/docs
+deps = {[testenv:docs]deps}
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+[testenv:api-ref]
+envdir = {toxworkdir}/docs
+deps = {[testenv:docs]deps}
+commands =
+ bash -c "rm -rf api-ref/build"
+ sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html
+
[testenv:genconfig]
commands = oslo-config-generator --config-file=config-generator/keystone.conf
@@ -184,12 +189,6 @@ paths = ./keystone/tests/hacking
deps = bindep
commands = bindep test
-[testenv:lower-constraints]
-deps =
- -c{toxinidir}/lower-constraints.txt
- -r{toxinidir}/test-requirements.txt
- .[ldap,memcache,mongodb]
-
[testenv:protection]
commands =
find keystone -type f -name "*.pyc" -delete