summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--README.rst2
-rwxr-xr-xbin/keystone-all6
-rwxr-xr-xbin/keystone-manage1
-rw-r--r--doc/ext/apidoc.py2
-rw-r--r--doc/source/architecture.rst8
-rw-r--r--doc/source/conf.py2
-rw-r--r--doc/source/configuration.rst218
-rw-r--r--doc/source/developing.rst117
-rw-r--r--doc/source/event_notifications.rst4
-rw-r--r--doc/source/extension_development.rst253
-rw-r--r--doc/source/external-auth.rst4
-rw-r--r--doc/source/man/keystone-all.rst22
-rw-r--r--doc/source/man/keystone-manage.rst34
-rw-r--r--etc/default_catalog.templates2
-rw-r--r--etc/keystone.conf.sample1504
-rw-r--r--httpd/keystone.py3
-rw-r--r--keystone/assignment/__init__.py1
-rw-r--r--keystone/assignment/backends/kvs.py223
-rw-r--r--keystone/assignment/backends/ldap.py44
-rw-r--r--keystone/assignment/backends/sql.py652
-rw-r--r--keystone/assignment/controllers.py16
-rw-r--r--keystone/assignment/core.py39
-rw-r--r--keystone/auth/__init__.py1
-rw-r--r--keystone/auth/controllers.py35
-rw-r--r--keystone/auth/core.py2
-rw-r--r--keystone/auth/plugins/external.py37
-rw-r--r--keystone/auth/plugins/oauth1.py2
-rw-r--r--keystone/auth/plugins/password.py3
-rw-r--r--keystone/auth/plugins/token.py2
-rw-r--r--keystone/auth/routers.py2
-rw-r--r--keystone/catalog/__init__.py1
-rw-r--r--keystone/catalog/backends/kvs.py9
-rw-r--r--keystone/catalog/backends/sql.py22
-rw-r--r--keystone/catalog/backends/templated.py18
-rw-r--r--keystone/catalog/controllers.py18
-rw-r--r--keystone/catalog/core.py20
-rw-r--r--keystone/catalog/routers.py13
-rw-r--r--keystone/clean.py2
-rw-r--r--keystone/cli.py47
-rw-r--r--keystone/common/authorization.py2
-rw-r--r--keystone/common/base64utils.py2
-rw-r--r--keystone/common/cache/__init__.py2
-rw-r--r--keystone/common/cache/backends/__init__.py15
-rw-r--r--keystone/common/cache/backends/noop.py2
-rw-r--r--keystone/common/cache/core.py2
-rw-r--r--keystone/common/cms.py2
-rw-r--r--keystone/common/config.py451
-rw-r--r--keystone/common/controller.py95
-rw-r--r--keystone/common/dependency.py2
-rw-r--r--keystone/common/driver_hints.py28
-rw-r--r--keystone/common/environment/__init__.py2
-rw-r--r--keystone/common/environment/eventlet_server.py18
-rw-r--r--keystone/common/extension.py2
-rw-r--r--keystone/common/kvs/backends/memcached.py94
-rw-r--r--keystone/common/kvs/core.py22
-rw-r--r--keystone/common/kvs/legacy.py2
-rw-r--r--keystone/common/ldap/__init__.py1
-rw-r--r--keystone/common/ldap/core.py37
-rw-r--r--keystone/common/manager.py38
-rw-r--r--keystone/common/models.py2
-rw-r--r--keystone/common/openssl.py51
-rwxr-xr-xkeystone/common/pemutils.py4
-rw-r--r--keystone/common/router.py2
-rw-r--r--keystone/common/serializer.py32
-rw-r--r--keystone/common/sql/__init__.py1
-rw-r--r--keystone/common/sql/core.py276
-rw-r--r--keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/002_token_id_hash.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/003_token_valid.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/004_undo_token_id_hash.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/005_set_utf8_character_set.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/006_add_policy_table.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/007_add_domain_tables.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/008_create_default_domain.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/009_normalize_identity.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/010_normalize_identity_migration.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/011_endpoints_v3.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/012_populate_endpoint_type.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/013_drop_legacy_endpoints.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/014_add_group_tables.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/015_tenant_to_project.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/017_membership_role.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/018_add_trust_tables.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/019_fixup_role.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/023_drop_credential_constraints.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/024_add_index_to_expires.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/025_add_index_to_valid.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/026_drop_user_group_constraints.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/027_set_engine_mysql_innodb.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/028_fixup_group_metadata.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/029_update_assignment_metadata.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/030_drop_credential_constraint_sqlite.py3
-rw-r--r--keystone/common/sql/migrate_repo/versions/031_drop_credential_indexes.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/032_username_length.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/033_migrate_ec2credentials_table_credentials.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/034_add_default_project_id_column_to_user.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/035_add_compound_revoked_token_index.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/036_token_drop_valid_index.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/037_add_region_table.py2
-rw-r--r--keystone/common/sql/migrate_repo/versions/038_add_assignment_table.py51
-rw-r--r--keystone/common/sql/migrate_repo/versions/039_grant_to_assignment.py231
-rw-r--r--keystone/common/sql/migrate_repo/versions/040_drop_grant_tables.py106
-rw-r--r--keystone/common/sql/migration.py35
-rw-r--r--keystone/common/sql/migration_helpers.py17
-rw-r--r--keystone/common/systemd.py2
-rw-r--r--keystone/common/utils.py2
-rw-r--r--keystone/common/wsgi.py47
-rw-r--r--keystone/config.py27
-rw-r--r--keystone/contrib/access/__init__.py1
-rw-r--r--keystone/contrib/access/core.py2
-rw-r--r--keystone/contrib/admin_crud/__init__.py1
-rw-r--r--keystone/contrib/admin_crud/core.py2
-rw-r--r--keystone/contrib/ec2/__init__.py1
-rw-r--r--keystone/contrib/ec2/controllers.py6
-rw-r--r--keystone/contrib/ec2/core.py2
-rw-r--r--keystone/contrib/ec2/routers.py2
-rw-r--r--keystone/contrib/endpoint_filter/__init__.py1
-rw-r--r--keystone/contrib/endpoint_filter/backends/catalog_sql.py2
-rw-r--r--keystone/contrib/endpoint_filter/backends/sql.py11
-rw-r--r--keystone/contrib/endpoint_filter/controllers.py2
-rw-r--r--keystone/contrib/endpoint_filter/core.py6
-rw-r--r--keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py2
-rw-r--r--keystone/contrib/endpoint_filter/routers.py2
-rw-r--r--keystone/contrib/example/controllers.py2
-rw-r--r--keystone/contrib/example/core.py2
-rw-r--r--keystone/contrib/example/migrate_repo/versions/001_example_table.py2
-rw-r--r--keystone/contrib/example/routers.py2
-rw-r--r--keystone/contrib/federation/__init__.py2
-rw-r--r--keystone/contrib/federation/backends/sql.py17
-rw-r--r--keystone/contrib/federation/controllers.py3
-rw-r--r--keystone/contrib/federation/core.py15
-rw-r--r--keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py3
-rw-r--r--keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py3
-rw-r--r--keystone/contrib/federation/routers.py3
-rw-r--r--keystone/contrib/federation/utils.py279
-rw-r--r--keystone/contrib/kds/__init__.py11
-rw-r--r--keystone/contrib/kds/api/__init__.py13
-rw-r--r--keystone/contrib/kds/api/app.py2
-rw-r--r--keystone/contrib/kds/api/config.py2
-rw-r--r--keystone/contrib/kds/api/hooks.py2
-rw-r--r--keystone/contrib/kds/api/root.py13
-rw-r--r--keystone/contrib/kds/api/v1/__init__.py0
-rw-r--r--keystone/contrib/kds/api/v1/controllers/__init__.py19
-rw-r--r--keystone/contrib/kds/api/v1/controllers/controller.py29
-rw-r--r--keystone/contrib/kds/cli/__init__.py13
-rw-r--r--keystone/contrib/kds/cli/api.py2
-rw-r--r--keystone/contrib/kds/cli/manage.py2
-rw-r--r--keystone/contrib/kds/common/__init__.py13
-rw-r--r--keystone/contrib/kds/common/exception.py2
-rw-r--r--keystone/contrib/kds/common/service.py29
-rw-r--r--keystone/contrib/kds/common/utils.py2
-rw-r--r--keystone/contrib/kds/db/api.py2
-rw-r--r--keystone/contrib/kds/db/connection.py13
-rw-r--r--keystone/contrib/kds/db/kvs/api.py2
-rw-r--r--keystone/contrib/kds/db/migration.py2
-rw-r--r--keystone/contrib/kds/db/sqlalchemy/api.py2
-rw-r--r--keystone/contrib/kds/db/sqlalchemy/migrate_repo/manage.py2
-rw-r--r--keystone/contrib/kds/db/sqlalchemy/migrate_repo/versions/001_kds_table.py2
-rw-r--r--keystone/contrib/kds/db/sqlalchemy/migration.py2
-rw-r--r--keystone/contrib/kds/db/sqlalchemy/models.py2
-rw-r--r--keystone/contrib/oauth1/__init__.py2
-rw-r--r--keystone/contrib/oauth1/backends/__init__.py15
-rw-r--r--keystone/contrib/oauth1/backends/sql.py16
-rw-r--r--keystone/contrib/oauth1/controllers.py6
-rw-r--r--keystone/contrib/oauth1/core.py33
-rw-r--r--keystone/contrib/oauth1/migrate_repo/__init__.py15
-rw-r--r--keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py2
-rw-r--r--keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py2
-rw-r--r--keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py2
-rw-r--r--keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py2
-rw-r--r--keystone/contrib/oauth1/migrate_repo/versions/__init__.py15
-rw-r--r--keystone/contrib/oauth1/routers.py2
-rw-r--r--keystone/contrib/oauth1/validator.py2
-rw-r--r--keystone/contrib/s3/__init__.py1
-rw-r--r--keystone/contrib/s3/core.py2
-rw-r--r--keystone/contrib/simple_cert/__init__.py2
-rw-r--r--keystone/contrib/simple_cert/controllers.py2
-rw-r--r--keystone/contrib/simple_cert/core.py2
-rw-r--r--keystone/contrib/simple_cert/routers.py2
-rw-r--r--keystone/contrib/stats/__init__.py1
-rw-r--r--keystone/contrib/stats/backends/kvs.py2
-rw-r--r--keystone/contrib/stats/core.py12
-rw-r--r--keystone/contrib/user_crud/__init__.py1
-rw-r--r--keystone/contrib/user_crud/core.py3
-rw-r--r--keystone/controllers.py2
-rw-r--r--keystone/credential/__init__.py1
-rw-r--r--keystone/credential/backends/sql.py10
-rw-r--r--keystone/credential/controllers.py9
-rw-r--r--keystone/credential/core.py2
-rw-r--r--keystone/credential/routers.py2
-rw-r--r--keystone/exception.py10
-rw-r--r--keystone/identity/__init__.py1
-rw-r--r--keystone/identity/backends/kvs.py2
-rw-r--r--keystone/identity/backends/ldap.py8
-rw-r--r--keystone/identity/backends/pam.py2
-rw-r--r--keystone/identity/backends/sql.py20
-rw-r--r--keystone/identity/controllers.py22
-rw-r--r--keystone/identity/core.py100
-rw-r--r--keystone/identity/routers.py2
-rw-r--r--keystone/middleware/__init__.py1
-rw-r--r--keystone/middleware/core.py4
-rw-r--r--keystone/middleware/ec2_token.py4
-rw-r--r--keystone/middleware/s3_token.py241
-rw-r--r--keystone/notifications.py184
-rw-r--r--keystone/openstack/common/config/__init__.py0
-rw-r--r--keystone/openstack/common/config/generator.py293
-rw-r--r--keystone/openstack/common/db/exception.py6
-rw-r--r--keystone/openstack/common/db/sqlalchemy/session.py154
-rw-r--r--keystone/openstack/common/fixture/config.py56
-rw-r--r--keystone/openstack/common/fixture/lockutils.py4
-rw-r--r--keystone/openstack/common/fixture/mockpatch.py10
-rw-r--r--keystone/openstack/common/log.py14
-rw-r--r--keystone/openstack/common/log_handler.py29
-rw-r--r--keystone/openstack/common/notifier/__init__.py14
-rw-r--r--keystone/openstack/common/notifier/api.py173
-rw-r--r--keystone/openstack/common/notifier/log_notifier.py37
-rw-r--r--keystone/openstack/common/notifier/no_op_notifier.py19
-rw-r--r--keystone/openstack/common/notifier/rpc_notifier.py47
-rw-r--r--keystone/openstack/common/notifier/rpc_notifier2.py53
-rw-r--r--keystone/openstack/common/notifier/test_notifier.py22
-rw-r--r--keystone/openstack/common/policy.py22
-rw-r--r--keystone/openstack/common/rpc/__init__.py304
-rw-r--r--keystone/openstack/common/rpc/amqp.py637
-rw-r--r--keystone/openstack/common/rpc/common.py504
-rw-r--r--keystone/openstack/common/rpc/dispatcher.py178
-rw-r--r--keystone/openstack/common/rpc/impl_fake.py193
-rw-r--r--keystone/openstack/common/rpc/impl_kombu.py855
-rw-r--r--keystone/openstack/common/rpc/impl_qpid.py821
-rw-r--r--keystone/openstack/common/rpc/impl_zmq.py818
-rw-r--r--keystone/openstack/common/rpc/matchmaker.py322
-rw-r--r--keystone/openstack/common/rpc/matchmaker_redis.py143
-rw-r--r--keystone/openstack/common/rpc/matchmaker_ring.py106
-rw-r--r--keystone/openstack/common/rpc/proxy.py225
-rw-r--r--keystone/openstack/common/rpc/serializer.py54
-rw-r--r--keystone/openstack/common/rpc/service.py76
-rw-r--r--keystone/openstack/common/rpc/zmq_receiver.py38
-rw-r--r--keystone/policy/__init__.py1
-rw-r--r--keystone/policy/backends/rules.py2
-rw-r--r--keystone/policy/backends/sql.py10
-rw-r--r--keystone/policy/controllers.py7
-rw-r--r--keystone/policy/core.py12
-rw-r--r--keystone/policy/routers.py2
-rw-r--r--keystone/routers.py2
-rw-r--r--keystone/service.py6
-rw-r--r--keystone/tests/__init__.py2
-rw-r--r--keystone/tests/_ldap_livetest.py12
-rw-r--r--keystone/tests/_ldap_tls_livetest.py2
-rw-r--r--keystone/tests/_sql_livetest.py2
-rw-r--r--keystone/tests/backend_liveldap.conf2
-rw-r--r--keystone/tests/contrib/__init__.py13
-rw-r--r--keystone/tests/contrib/kds/__init__.py13
-rw-r--r--keystone/tests/contrib/kds/api/__init__.py13
-rw-r--r--keystone/tests/contrib/kds/api/base.py2
-rw-r--r--keystone/tests/contrib/kds/api/test.py16
-rw-r--r--keystone/tests/contrib/kds/api/v1/__init__.py0
-rw-r--r--keystone/tests/contrib/kds/api/v1/base.py29
-rw-r--r--keystone/tests/contrib/kds/api/v1/test.py28
-rw-r--r--keystone/tests/contrib/kds/base.py2
-rw-r--r--keystone/tests/contrib/kds/db/base.py2
-rw-r--r--keystone/tests/contrib/kds/db/test_host_key.py2
-rw-r--r--keystone/tests/contrib/kds/fixture/__init__.py4
-rw-r--r--keystone/tests/contrib/kds/fixture/kvsdb.py2
-rw-r--r--keystone/tests/contrib/kds/fixture/sqlitedb.py2
-rw-r--r--keystone/tests/contrib/kds/paths.py2
-rw-r--r--keystone/tests/core.py94
-rw-r--r--keystone/tests/default_catalog.templates2
-rw-r--r--keystone/tests/default_fixtures.py2
-rw-r--r--keystone/tests/fakeldap.py17
-rw-r--r--keystone/tests/filtering.py2
-rw-r--r--keystone/tests/fixtures/__init__.py16
-rw-r--r--keystone/tests/fixtures/appserver.py2
-rw-r--r--keystone/tests/fixtures/cache.py38
-rw-r--r--keystone/tests/legacy_d5.mysql281
-rw-r--r--keystone/tests/legacy_d5.sqlite277
-rw-r--r--keystone/tests/legacy_diablo.mysql281
-rw-r--r--keystone/tests/legacy_diablo.sqlite283
-rw-r--r--keystone/tests/legacy_essex.mysql309
-rw-r--r--keystone/tests/legacy_essex.sqlite313
-rw-r--r--keystone/tests/mapping_fixtures.py119
-rw-r--r--keystone/tests/matchers.py2
-rw-r--r--keystone/tests/rest.py2
-rw-r--r--keystone/tests/test_associate_project_endpoint_extension.py77
-rw-r--r--keystone/tests/test_auth.py57
-rw-r--r--keystone/tests/test_auth_plugin.py2
-rw-r--r--keystone/tests/test_backend.py124
-rw-r--r--keystone/tests/test_backend_federation_sql.py2
-rw-r--r--keystone/tests/test_backend_kvs.py14
-rw-r--r--keystone/tests/test_backend_ldap.py51
-rw-r--r--keystone/tests/test_backend_memcache.py238
-rw-r--r--keystone/tests/test_backend_pam.py2
-rw-r--r--keystone/tests/test_backend_sql.py48
-rw-r--r--keystone/tests/test_backend_templated.py2
-rw-r--r--keystone/tests/test_base64utils.py2
-rw-r--r--keystone/tests/test_cache.py8
-rw-r--r--keystone/tests/test_catalog.py2
-rw-r--r--keystone/tests/test_cert_setup.py43
-rw-r--r--keystone/tests/test_config.py2
-rw-r--r--keystone/tests/test_content_types.py11
-rw-r--r--keystone/tests/test_contrib_s3_core.py2
-rw-r--r--keystone/tests/test_contrib_simple_cert.py2
-rw-r--r--keystone/tests/test_contrib_stats_core.py2
-rw-r--r--keystone/tests/test_driver_hints.py15
-rw-r--r--keystone/tests/test_exception.py2
-rw-r--r--keystone/tests/test_injection.py2
-rw-r--r--keystone/tests/test_ipv6.py2
-rw-r--r--keystone/tests/test_keystoneclient.py26
-rw-r--r--keystone/tests/test_keystoneclient_sql.py5
-rw-r--r--keystone/tests/test_kvs.py261
-rw-r--r--keystone/tests/test_matchers.py2
-rw-r--r--keystone/tests/test_middleware.py2
-rw-r--r--keystone/tests/test_no_admin_token_auth.py2
-rw-r--r--keystone/tests/test_notifications.py212
-rw-r--r--keystone/tests/test_overrides.conf9
-rw-r--r--keystone/tests/test_pemutils.py2
-rw-r--r--keystone/tests/test_policy.py2
-rw-r--r--keystone/tests/test_s3_token_middleware.py231
-rw-r--r--keystone/tests/test_serializer.py2
-rw-r--r--keystone/tests/test_singular_plural.py2
-rw-r--r--keystone/tests/test_sizelimit.py2
-rw-r--r--keystone/tests/test_sql_migrate_extensions.py6
-rw-r--r--keystone/tests/test_sql_upgrade.py348
-rw-r--r--keystone/tests/test_ssl.py18
-rw-r--r--keystone/tests/test_token_provider.py42
-rw-r--r--keystone/tests/test_url_middleware.py2
-rw-r--r--keystone/tests/test_utils.py13
-rw-r--r--keystone/tests/test_v2_controller.py61
-rw-r--r--keystone/tests/test_v3.py7
-rw-r--r--keystone/tests/test_v3_auth.py4
-rw-r--r--keystone/tests/test_v3_catalog.py22
-rw-r--r--keystone/tests/test_v3_credential.py2
-rw-r--r--keystone/tests/test_v3_federation.py155
-rw-r--r--keystone/tests/test_v3_filters.py141
-rw-r--r--keystone/tests/test_v3_identity.py122
-rw-r--r--keystone/tests/test_v3_oauth1.py206
-rw-r--r--keystone/tests/test_v3_policy.py2
-rw-r--r--keystone/tests/test_v3_protection.py2
-rw-r--r--keystone/tests/test_versions.py2
-rw-r--r--keystone/tests/test_wsgi.py79
-rw-r--r--keystone/tests/unit/__init__.py0
-rw-r--r--keystone/tests/unit/common/__init__.py0
-rw-r--r--keystone/tests/unit/common/test_sql_core.py52
-rw-r--r--keystone/tests/unit/identity/__init__.py0
-rw-r--r--keystone/tests/unit/identity/test_core.py61
-rw-r--r--keystone/tests/utils.py20
-rw-r--r--keystone/token/__init__.py1
-rw-r--r--keystone/token/backends/memcache.py222
-rw-r--r--keystone/token/backends/sql.py4
-rw-r--r--keystone/token/controllers.py28
-rw-r--r--keystone/token/core.py2
-rw-r--r--keystone/token/provider.py11
-rw-r--r--keystone/token/providers/common.py4
-rw-r--r--keystone/token/providers/pki.py2
-rw-r--r--keystone/token/providers/uuid.py2
-rw-r--r--keystone/token/routers.py2
-rw-r--r--keystone/trust/__init__.py1
-rw-r--r--keystone/trust/backends/kvs.py2
-rw-r--r--keystone/trust/backends/sql.py4
-rw-r--r--keystone/trust/controllers.py49
-rw-r--r--keystone/trust/core.py7
-rw-r--r--keystone/trust/routers.py2
-rw-r--r--openstack-common.conf1
-rw-r--r--requirements.txt14
-rw-r--r--setup.cfg6
-rw-r--r--test-requirements.txt13
-rw-r--r--tools/config/README38
-rwxr-xr-xtools/config/check_uptodate.sh25
-rwxr-xr-xtools/config/generate_sample.sh119
-rw-r--r--tools/config/oslo.config.generator.rc4
-rw-r--r--tools/install_venv.py2
-rw-r--r--tox.ini15
375 files changed, 7020 insertions, 11361 deletions
diff --git a/.mailmap b/.mailmap
index 2feef7682..a1614cb65 100644
--- a/.mailmap
+++ b/.mailmap
@@ -4,7 +4,7 @@
<adam.gandelman@canonical.com> <adamg@canonical.com>
<brian.waldon@rackspace.com> <bcwaldon@gmail.com>
<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
-<dolph.mathews@rackspace.com> <dolph.mathews@gmail.com>
+<dolph.mathews@gmail.com> <dolph.mathews@rackspace.com>
<jeblair@hp.com> <james.blair@rackspace.com>
<jeblair@hp.com> <corvus@gnu.org>
<jaypipes@gmail.com> <jpipes@uberbox.gateway.2wire.net>
diff --git a/README.rst b/README.rst
index 7787fb840..ed1c6e32f 100644
--- a/README.rst
+++ b/README.rst
@@ -44,7 +44,7 @@ Future design work is tracked at:
https://blueprints.launchpad.net/keystone
-Contributors are encouraged to join IRC (``#openstack-dev`` on freenode):
+Contributors are encouraged to join IRC (``#openstack-keystone`` on freenode):
https://wiki.openstack.org/wiki/IRC
diff --git a/bin/keystone-all b/bin/keystone-all
index 3be54b830..ffe68678a 100755
--- a/bin/keystone-all
+++ b/bin/keystone-all
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
@@ -56,7 +55,9 @@ CONF = config.CONF
def create_server(conf, name, host, port):
app = deploy.loadapp('config:%s' % conf, name=name)
- server = environment.Server(app, host=host, port=port)
+ server = environment.Server(app, host=host, port=port,
+ keepalive=CONF.tcp_keepalive,
+ keepidle=CONF.tcp_keepidle)
if CONF.ssl.enable:
server.set_ssl(CONF.ssl.certfile, CONF.ssl.keyfile,
CONF.ssl.ca_certs, CONF.ssl.cert_required)
@@ -105,6 +106,7 @@ if __name__ == '__main__':
config.configure()
sql.initialize()
+ config.set_default_for_default_log_levels()
CONF(project='keystone',
version=pbr.version.VersionInfo('keystone').version_string(),
diff --git a/bin/keystone-manage b/bin/keystone-manage
index 9df13578b..e11e706e1 100755
--- a/bin/keystone-manage
+++ b/bin/keystone-manage
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
diff --git a/doc/ext/apidoc.py b/doc/ext/apidoc.py
index 20f208f69..435d388f1 100644
--- a/doc/ext/apidoc.py
+++ b/doc/ext/apidoc.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/doc/source/architecture.rst b/doc/source/architecture.rst
index 02e601134..5af8c6c54 100644
--- a/doc/source/architecture.rst
+++ b/doc/source/architecture.rst
@@ -231,7 +231,9 @@ Rules
-----
Given a list of matches to check for, simply verify that the credentials
-contain the matches. For example::
+contain the matches. For example:
+
+.. code:: python
credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']}
@@ -255,7 +257,9 @@ Capability RBAC
(Not yet implemented.)
Another approach to authorization can be action-based, with a mapping of roles
-to which capabilities are allowed for that role. For example::
+to which capabilities are allowed for that role. For example:
+
+.. code:: python
credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']}
diff --git a/doc/source/conf.py b/doc/source/conf.py
index b25e67770..ab534108d 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -50,7 +50,7 @@ extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
- 'oslo.sphinx',
+ 'oslosphinx',
# NOTE(dstanek): Uncomment the [pbr] section in setup.cfg and
# remove this Sphinx extension when
# https://launchpad.net/bugs/1260495 is fixed.
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 899cfceac..a19f41e97 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -243,7 +243,7 @@ behavior is that subsystem caching is enabled, but the global toggle is set to d
* ``enabled`` - enables/disables caching across all of keystone
* ``debug_cache_backend`` - enables more in-depth logging from the cache backend (get, set, delete, etc)
-* ``backend`` - the caching backend module to use e.g. ``dogpile.cache.memcache``
+* ``backend`` - the caching backend module to use e.g. ``dogpile.cache.memcached``
.. NOTE::
A given ``backend`` must be registered with ``dogpile.cache`` before it
@@ -468,8 +468,8 @@ To build your service catalog using this driver, see the built-in help::
You can also refer to `an example in Keystone (tools/sample_data.sh)
<https://github.com/openstack/keystone/blob/master/tools/sample_data.sh>`_.
-File-based Service Catalog (``templated.TemplatedCatalog``)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+File-based Service Catalog (``templated.Catalog``)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The templated catalog is an in-memory backend initialized from a read-only
``template_file``. Choose this option only if you know that your
@@ -486,7 +486,7 @@ service catalog will not change very much over time.
``keystone.conf`` example::
[catalog]
- driver = keystone.catalog.backends.templated.TemplatedCatalog
+ driver = keystone.catalog.backends.templated.Catalog
template_file = /opt/stack/keystone/etc/default_catalog.templates
The value of ``template_file`` is expected to be an absolute path to your
@@ -693,6 +693,22 @@ should be set to one of the following modes:
*Do not* set ``enforce_token_bind = named`` as there is not an authentication
mechanism called ``named``.
+Limiting the number of entities returned in a collection
+--------------------------------------------------------
+
+Keystone provides a method of setting a limit to the number of entities
+returned in a collection, which is useful to prevent overly long response times
+for list queries that have not specified a sufficiently narrow filter. This
+limit can be set globally by setting ``list_limit`` in the default section of
+``keystone.conf``, with no limit set by default. Individual driver sections
+may override this global value with a specific limit, for example::
+
+ [assignment]
+ list_limit = 100
+
+If a response to ``list_{entity}`` call has been truncated, then the response
+status code will still be 200 (OK), but the ``truncated`` attribute in the
+collection will be set to ``true``.
Sample Configuration Files
--------------------------
@@ -705,6 +721,110 @@ files for each Server application.
* ``etc/logging.conf.sample``
* ``etc/default_catalog.templates``
+.. _`API protection with RBAC`:
+
+Keystone API protection with Role Based Access Control (RBAC)
+=============================================================
+
+Like most OpenStack projects, Keystone supports the protection of its APIs
+by defining policy rules based on an RBAC approach. These are stored in a
+JSON policy file, the name and location of which is set in the main Keystone
+configuration file.
+
+Each keystone v3 API has a line in the policy file which dictates what level
+of protection is applied to it, where each line is of the form:
+
+<api name>: <rule statement> or <match statement>
+
+where
+
+<rule statement> can be contain <rule statement> or <match statement>
+
+<match statement> is a set of identifiers that must match between the token
+provided by the caller of the API and the parameters or target entities of
+the API call in question. For example:
+
+ "identity:create_user": [["role:admin", "domain_id:%(user.domain_id)s"]]
+
+indicates that to create a user you must have the admin role in your token and
+in addition the domain_id in your token (which implies this must be a domain
+scoped token) must match the domain_id in the user object you are trying to
+create. In other words, you must have the admin role on the domain in which
+you are creating the user, and the token you are using must be scoped to that
+domain.
+
+Each component of a match statement is of the form:
+
+<attribute from token>:<constant> or <attribute related to API call>
+
+The following attributes are available
+
+* Attributes from token: user_id, the domain_id or project_id depending on
+ the scope, and the list of roles you have within that scope
+
+* Attributes related to API call: Any parameters that are passed into the
+ API call are available, along with any filters specified in the query
+ string. Attributes of objects passed can be refererenced using an
+ object.attribute syntax (e.g. user.domain_id). The target objects of an
+ API are also available using a target.object.attribute syntax. For instance:
+
+ "identity:delete_user": [["role:admin", "domain_id:%(target.user.domain_id)s"]]
+
+ would ensure that the user object that is being deleted is in the same
+ domain as the token provided.
+
+Every target object has an `id` and a `name` available as
+`target.<object>.id` and `target.<object>.name`. Other attributes are
+retrieved from the database and vary between object types. Moreover,
+some database fields are filtered out (e.g. user passwords).
+
+List of object attributes:
+
+* role:
+ * target.role.id
+ * target.role.name
+
+* user:
+ * target.user.default_project_id
+ * target.user.description
+ * target.user.domain_id
+ * target.user.enabled
+ * target.user.id
+ * target.user.name
+
+* group:
+ * target.group.description
+ * target.group.domain_id
+ * target.group.id
+ * target.group.name
+
+* domain:
+ * target.domain.enabled
+ * target.domain.id
+ * target.domain.name
+
+* project:
+ * target.project.description
+ * target.project.domain_id
+ * target.project.enabled
+ * target.project.id
+ * target.project.name
+
+The default policy.json file supplied provides a somewhat basic example of
+API protection, and does not assume any particular use of domains. For
+multi-domain configuration installations where, for example, a cloud
+provider wishes to allow adminsistration of the contents of a domain to
+be delegated, it is recommended that the supplied policy.v3cloudsample.json
+is used as a basis for creating a suitable production policy file. This
+example policy file also shows the use of an admin_domain to allow a cloud
+provider to enable cloud adminstrators to have wider access across the APIs.
+
+A clean installation would need to perhaps start with the standard policy
+file, to allow creation of the admin_domain with the first users within
+it. The domain_id of the admin domain would then be obtained and could be
+pasted into a modifed version of policy.v3cloudsample.json which could then
+be enabled as the main policy file.
+
.. _`adding extensions`:
Adding Extensions
@@ -851,71 +971,6 @@ to be passed as arguments each time::
$ export OS_PASSWORD=my_password
$ export OS_TENANT_NAME=my_tenant
-Keystone API protection with Role Based Access Control (RBAC)
--------------------------------------------------------------
-
-Like most OpenStack projects, Keystone supports the protection of its APIs
-by defining policy rules based on an RBAC approach. These are stored in a
-JSON policy file, the name and location of which is set in the main Keystone
-configuration file.
-
-Each keystone v3 API has a line in the policy file which dictates what level
-of protection is applied to it, where each line is of the form:
-
-<api name>: <rule statement> or <match statement>
-
-where
-
-<rule statement> can be contain <rule statement> or <match statement>
-
-<match statement> is a set of identifiers that must match between the token
-provided by the caller of the API and the parameters or target entities of
-the API call in question. For example:
-
- "identity:create_user": [["role:admin", "domain_id:%(user.domain_id)s"]]
-
-indicates that to create a user you must have the admin role in your token and
-in addition the domain_id in your token (which implies this must be a domain
-scoped token) must match the domain_id in the user object you are trying to
-create. In other words, you must have the admin role on the domain in which
-you are creating the user, and the token you are using must be scoped to that
-domain.
-
-Each component of a match statement is of the form:
-
-<attribute from token>:<constant> or <attribute related to API call>
-
-The following attributes are available
-
-* Attributes from token: user_id, the domain_id or project_id depending on
- the scope, and the list of roles you have within that scope
-
-* Attributes related to API call: Any parameters that are passed into the
- API call are available, along with any filters specified in the query
- string. Attributes of objects passed can be refererenced using an
- object.attribute syntax (e.g. user.domain_id). The target objects of an
- API are also available using a target.object.attribute syntax. For instance:
-
- "identity:delete_user": [["role:admin", "domain_id:%(target.user.domain_id)s"]]
-
- would ensure that the user object that is being deleted is in the same
- domain as the token provided.
-
-The default policy.json file supplied provides a somewhat basic example of
-API protection, and does not assume any particular use of domains. For
-multi-domain configuration installations where, for example, a cloud
-provider wishes to allow adminsistration of the contents of a domain to
-be delegated, it is recommended that the supplied policy.v3cloudsample.json
-is used as a basis for creating a suitable production policy file. This
-example policy file also shows the use of an admin_domain to allow a cloud
-provider to enable cloud adminstrators to have wider access across the APIs.
-
-A clean installation would need to perhaps start with the standard policy
-file, to allow creation of the admin_domain with the first users within
-it. The domain_id of the admin domain would then be obtained and could be
-pasted into a modifed version of policy.v3cloudsample.json which could then
-be enabled as the main policy file.
-
Example usage
-------------
@@ -1332,6 +1387,31 @@ specified classes in the LDAP module so you can configure them like::
role_member_attribute = roleOccupant
role_attribute_ignore =
+
+Enabled Emulation
+-----------------
+
+Some directory servers do not provide any enabled attribute. For these
+servers, the ``user_enabled_emulation`` and ``tenant_enabled_emulation``
+attributes have been created. They are enabled by setting their respective
+flags to True. Then the attributes ``user_enabled_emulation_dn`` and
+``tenant_enabled_emulation_dn`` may be set to specify how the enabled users
+and projects (tenants) are selected. These attributes work by using a
+``groupOfNames`` and adding whichever users or projects (tenants) that
+you want enabled to the respective group. For example, this will
+mark any user who is a member of ``enabled_users`` as enabled::
+
+ [ldap]
+ user_enabled_emulation = True
+ user_enabled_emulation_dn = cn=enabled_users,cn=groups,dc=openstack,dc=org
+
+The default values for user and project (tenant) enabled emulation DN is
+``cn=enabled_users,$user_tree_dn`` and ``cn=enabled_tenants,$tenant_tree_dn``
+respectively.
+
+Secure Connection
+-----------------
+
If you are using a directory server to provide the Identity service,
it is strongly recommended that you utilize a secure connection from
Keystone to the directory server. In addition to supporting ldaps, Keystone
diff --git a/doc/source/developing.rst b/doc/source/developing.rst
index 7d39fc884..2cfcf2fcc 100644
--- a/doc/source/developing.rst
+++ b/doc/source/developing.rst
@@ -100,8 +100,9 @@ The directory ``keystone/contrib/example`` contains a sample extension
migration.
Migrations must be explicitly run for each extension individually. To run a
-migration for a specific extension, run ``keystone-manage --extension <name>
-db_sync``.
+migration for a specific extension, simply run::
+
+ $ keystone-manage db_sync --extension <name>
Initial Sample Data
-------------------
@@ -144,6 +145,21 @@ The contract for a driver for ``list_{entity}`` methods is therefore:
list by filtering for one or more of the specified filters in the passed
Hints reference, and removing any such satisfied filters.
+Entity list truncation by drivers
+---------------------------------
+
+Keystone supports the ability for a deployment to restrict the number of
+entries returned from ``list_{entity}`` methods, typically to prevent poorly
+formed searches (e.g. without sufficient filters) from becoming a performance
+issue.
+
+These limits are set in the configuration file, either for a specific driver or
+across all drivers. These limits are read at the Manager level and passed into
+individual drivers as part of the Hints list object. A driver should try and
+honor any such limit if possible, but if it is unable to do so then it may
+ignore it (and the truncation of the returned list of entities will happen at
+the controller level).
+
Testing
-------
@@ -293,6 +309,26 @@ installed devstack with a different LDAP password, modify the file
``keystone/tests/backend_liveldap.conf`` to reflect your password.
+Generating Updated Sample Config File
+-------------------------------------
+
+Keystone's sample configuration file ``etc/keystone.conf.sample`` is automatically
+generated based upon all of the options available within Keystone. These options
+are sourced from the many files around Keystone as well as some external libraries.
+
+If new options are added, primarily located in ``keystone.common.config``, a new
+sample configuration file needs to be generated. Generating a new sample configuration
+to be included in a commit run::
+
+ $ tox -esample_config -r
+
+The tox command will place an updated sample config in ``etc/keystone.conf.sample``.
+
+If there is a new external library (e.g. ``oslo.messaging``) that utilizes the
+``oslo.config`` package for configuration, it can be added to the list of libraries
+found in ``tools/config/oslo.config.generator.rc``.
+
+
Translated responses
--------------------
@@ -346,7 +382,9 @@ which will provide a reference, to a function, that will consult the global cach
calling ``should_cache_fn``, the returned function reference will default to enabling
caching for that ``manager``.
-Example use of cache and ``should_cache_fn`` (in this example, ``token`` is the manager)::
+Example use of cache and ``should_cache_fn`` (in this example, ``token`` is the manager):
+
+.. code:: python
from keystone.common import cache
SHOULD_CACHE = cache.should_cache_fn('token')
@@ -372,7 +410,9 @@ If the ``expiration_time`` argument passed to the decorator is set to ``None``,
time will be set to the global default (``expiration_time`` option in the ``[cache]``
configuration section.
-Example of using a section specific ``cache_time`` (in this example, ``identity`` is the manager)::
+Example of using a section specific ``cache_time`` (in this example, ``identity`` is the manager):
+
+.. code:: python
from keystone.common import cache
SHOULD_CACHE = cache.should_cache_fn('identity')
@@ -387,7 +427,9 @@ For cache invalidation, the ``on_arguments`` decorator will add an ``invalidate`
(attribute) to your decorated function. To invalidate the cache, you pass the same arguments
to the ``invalidate`` method as you would the normal function.
-Example (using the above cacheable_function)::
+Example (using the above cacheable_function):
+
+.. code:: python
def invalidate_cache(arg1, arg2, arg3):
cacheable_function.invalidate(arg1, arg2, arg3)
@@ -420,27 +462,78 @@ be configured before use. The KVS object will only be retrievable with the
Once all references have been removed the object is gone (the registry uses a ``weakref`` to
match the object to the name).
-Example Instantiation and Configuration::
+Example Instantiation and Configuration:
+
+.. code:: python
kvs_store = kvs.get_key_value_store('TestKVSRegion')
kvs_store.configure('openstack.kvs.Memory', ...)
Any keyword arguments passed to the configure method that are not defined as part of the
KeyValueStore object configuration are passed to the backend for further configuration (e.g.
-memcache servers, lock_timeout, etc).
+memcached servers, lock_timeout, etc).
The memcached backend uses the Keystone manager mechanism to support the use of any of the
-provided dogpile.cache memcached backends (``BMemcached``, ``pylibmc``, and basic ``Memcached``).
-By default the standard Memcache backend is used. Currently the Memcache URLs come from the
+provided memcached backends (``bmemcached``, ``pylibmc``, and basic ``memcached``).
+By default the ``memcached`` backend is used. Currently the Memcache URLs come from the
``servers`` option in the ``[memcache]`` configuration section of the Keystone config.
-Example configuring the KVS system to use memcached and a specific dogpile.cache memcached backend::
+The following is an example showing how to configure the KVS system to use a
+KeyValueStore object named "TestKVSRegion" and a specific Memcached driver:
+
+.. code:: python
kvs_store = kvs.get_key_value_store('TestKVSRegion')
- kvs_store.configure('openstack.kvs.Memcached', dogpile_cache_backend='MemcachedBackend')
+ kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached')
+
+The memcached backend supports a mechanism to supply an explicit TTL (in seconds) to all keys
+set via the KVS object. This is accomplished by passing the argument ``memcached_expire_time``
+as a keyword argument to the ``configure`` method. Passing the ``memcache_expire_time`` argument
+will cause the ``time`` argument to be added to all ``set`` and ``set_multi`` calls performed by
+the memcached client. ``memcached_expire_time`` is an argument exclusive to the memcached dogpile
+backend, and will be ignored if passed to another backend:
+
+.. code:: python
+
+ kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached',
+ memcached_expire_time=86400)
+
+If an explicit TTL is configured via the ``memcached_expire_time`` argument, it is possible to
+exempt specific keys from receiving the TTL by passing the argument ``no_expiry_keys`` (list)
+as a keyword argument to the ``configure`` method. ``no_expiry_keys`` should be supported by
+all OpenStack-specific dogpile backends (memcached) that have the ability to set an explicit TTL:
+
+.. code:: python
+
+ kvs_store.configure('openstack.kvs.Memcached', memcached_backend='Memcached',
+ memcached_expire_time=86400, no_expiry_keys=['key', 'second_key', ...])
+
+
+.. NOTE::
+ For the non-expiring keys functionality to work, the backend must support the ability for
+ the region to set the key_mangler on it and have the attribute ``raw_no_expiry_keys``.
+ In most cases, support for setting the key_mangler on the backend is handled by allowing
+ the region object to set the ``key_mangler`` attribute on the backend.
+
+ The ``raw_no_expiry_keys`` attribute is expected to be used to hold the values of the
+ keyword argument ``no_expiry_keys`` prior to hashing. It is the responsibility of the
+ backend to use these raw values to determine if a key should be exempt from expiring
+ and not set the TTL on the non-expiring keys when the ``set`` or ``set_multi`` methods are
+ called.
+
+ Typically the key will be hashed by the region using its key_mangler method
+ before being passed to the backend to set the value in the KeyValueStore. This
+ means that in most cases, the backend will need to either pre-compute the hashed versions
+ of the keys (when the key_mangler is set) and store a cached copy, or hash each item in
+ the ``raw_no_expiry_keys`` attribute on each call to ``.set()`` and ``.set_multi()``. The
+ ``memcached`` backend handles this hashing and caching of the keys by utilizing an
+ ``@property`` method for the ``.key_mangler`` attribute on the backend and utilizing the
+ associated ``.settr()`` method to front-load the hashing work at attribute set time.
Once a KVS object has been instantiated the method of interacting is the same as most memcache
-implementations::
+implementations:
+
+.. code:: python
kvs_store = kvs.get_key_value_store('TestKVSRegion')
kvs_store.configure(...)
diff --git a/doc/source/event_notifications.rst b/doc/source/event_notifications.rst
index 5e2110511..44e6b6d41 100644
--- a/doc/source/event_notifications.rst
+++ b/doc/source/event_notifications.rst
@@ -57,6 +57,10 @@ Notifications for create, update and delete events are all similar to each
other, where either ``created``, ``updated`` or ``deleted`` is inserted as the
``<operation>`` in the above notification's ``event_type``.
+The ``priority`` of the notification being sent is not configurable through
+the Keystone configuration file. This value is defaulted to INFO for all
+notifications sent in Keystone's case.
+
If the operation fails, the notification won't be sent, and no special error
notification will be sent. Information about the error is handled through
normal exception paths.
diff --git a/doc/source/extension_development.rst b/doc/source/extension_development.rst
index 06c76028c..dcf9917ba 100644
--- a/doc/source/extension_development.rst
+++ b/doc/source/extension_development.rst
@@ -1,68 +1,80 @@
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+=====================================
Keystone Extensions Development Guide
=====================================
-
General
--------
+=======
This Extension Development Guide provides some mocked code to use as an
-Extension code base in the `keystone/contrib/example` folder.
-
-
-- All Extensions must be created in the `keystone/contrib` folder.
-- The new Extension code must be contained in a new folder under `contrib`.
-- Whenever possible an Extension should follow the following structure
- convention:
-
-
- keystone
- \\\ contrib
- \\\ my_extension
- \\\ backends (optional)
-
- \\\ migrate_repo (optional)
-
- \\\ __init__.py (mandatory)
-
- \\\ configuration.rst (mandatory)
-
- \\\ core.py (mandatory)
-
- \\\ controllers.py (mandatory for API Extension)
-
- \\\ routers.py (mandatory for API Extension)
-
-
-- If the Extension implements an API Extension the `controllers.py` and
- `routers.py` must be present and correctly handle the API Extension requests
- and responses.
-- If the Extension implements backends a `backends` folder should exist.
+Extension code base in the ``keystone/contrib/example`` folder.
+
+- All Extensions must be created in the ``keystone/contrib`` folder.
+- The new Extension code must be contained in a new folder under ``contrib``.
+- Whenever possible an Extension should follow the following directory
+ structure convention::
+
+ keystone/contrib/
+ └── my_extension
+    ├── backends (optional)
+    │   ├── __init__.py (mandatory)
+    │   └── sql.py (optional)
+    │   └── kvs.py (optional)
+    ├── migrate_repo (optional)
+    │   ├── __init__.py (mandatory)
+    │   ├── migrate.cfg (mandatory)
+    │   └── versions (mandatory)
+ │      ├── 001_create_tables.py (mandatory)
+ │      └── __init__.py (mandatory)
+    ├── __init__.py (mandatory)
+    ├── core.py (mandatory)
+    ├── controllers.py (mandatory for API Extension)
+    └── routers.py (mandatory for API Extension)
+
+- If the Extension implements an API Extension the ``controllers.py`` and
+ ``routers.py`` must be present and correctly handle the API Extension
+ requests and responses.
+- If the Extension implements backends a ``backends`` folder should exist.
Backends are defined to store data persistently and can use a variety of
technologies. Please see the Backends section in this document for more info.
-- If the Extension adds data structures a `migrate_repo` folder should exist.
-- If configuration changes are required/introduced in the `keystone.conf.sample`
- file, these should be kept disabled as default and have their own element.
-- If configuration changes are required/introduced in the `keystone-paste.ini`,
- the new filter must be declared.
+- If the Extension adds data structures, then a ``migrate_repo`` folder should
+ exist.
+- If configuration changes are required/introduced in the
+ ``keystone.conf.sample`` file, these should be kept disabled as default and
+ have their own section.
+- If configuration changes are required/introduced in the
+ ``keystone-paste.ini``, the new filter must be declared.
- The module may register to listen to events by declaring the corresponding
callbacks in the ``core.py`` file.
+- The new extension should be disabled by default (it should not affect the
+ default application pipelines).
-`keystone.conf.sample` File
----------------------------
+Modifying the `keystone.conf.sample` File
+=========================================
-In the case an Extension needs to change the `keystone.conf.sample` file, it
-must follow the config file conventions and introduce a dedicated entry.
+In the case an Extension needs to change the ``keystone.conf.sample`` file, it
+must follow the config file conventions and introduce a dedicated section.
Example::
[example]
driver = keystone.contrib.example.backends.sql.mySQLClass
-
- [myOtherExtension]
+ [my_other_extension]
extension_flag = False
-
The Extension parameters expressed should be commented out since, by default,
extensions are disabled.
@@ -71,23 +83,21 @@ Example::
[example]
#driver = keystone.contrib.example.backends.sql.mySQLClass
-
- [myOtherExtension]
+ [my_other_extension]
#extension_flag = False
-
In case the Extension is overriding or re-implementing an existing portion of
-Keystone the required change should be commented in the `configuration.rst` but
-not placed in the `keystone.conf.sample` file to avoid unnecessary confusion.
+Keystone, the required change should be commented in the ``configuration.rst``
+but not placed in the `keystone.conf.sample` file to avoid unnecessary
+confusion.
+Modifying the ``keystone-paste.ini`` File
+=========================================
-`keystone-paste.ini` File
---------------------------
-
-In the case an Extension is augmenting a pipeline introducing a new `filter`
-and/or APIs in the `OS` namespace, a corresponding `filter:` section is
-necessary to be introduced in the `keystone-paste.ini` file.
-The Extension should declare the filter factory constructor in the `ini` file.
+In the case an Extension is augmenting a pipeline introducing a new ``filter``
+and/or APIs in the ``OS`` namespace, a corresponding ``filter:`` section is
+necessary to be introduced in the ``keystone-paste.ini`` file. The Extension
+should declare the filter factory constructor in the ``ini`` file.
Example::
@@ -95,16 +105,15 @@ Example::
paste.filter_factory = keystone.contrib.example.routers:ExampleRouter.
factory
-The `filter` must not be placed in the `pipeline` and treated as optional.
+The ``filter`` must not be placed in the ``pipeline`` and treated as optional.
How to add the extension in the pipeline should be specified in detail in the
-`configuration.rst` file.
-
+``configuration.rst`` file.
Package Constructor File
-------------------------
+========================
-The `__init__.py` file represents the package constructor. Extension needs to
-import what is necessary from the `core.py` module.
+The ``__init__.py`` file represents the package constructor. Extension needs to
+import what is necessary from the ``core.py`` module.
Example:
@@ -112,18 +121,18 @@ Example:
from keystone.contrib.example.core import *
-
Core
-----
-
-The `core.py` file represents the main module defining the data structure and
-interface. In the `Model View Control` (MVC) model it represents the `Model`
-part and it delegates to the `Backends` the data layer implementation.
+====
-In case the `core.py` file contains a `Manager` and a `Driver` it must provide
-the dependency injections for the `Controllers` and/or other modules using the
-`Manager`. A good practice is to call the dependency `extension_name_api`.
+The ``core.py`` file represents the main module defining the data structure and
+interface. In the ``Model View Control`` (MVC) model it represents the
+``Model`` part and it delegates to the ``Backends`` the data layer
+implementation.
+In case the ``core.py`` file contains a ``Manager`` and a ``Driver`` it must
+provide the dependency injections for the ``Controllers`` and/or other modules
+using the ``Manager``. A good practice is to call the dependency
+``extension_name_api``.
Example:
@@ -132,14 +141,12 @@ Example:
@dependency.provider('example_api')
class Manager(manager.Manager):
-
Routers
--------
-
-`routers.py` have the objective of routing the HTTP requests and direct them to
-the right method within the `Controllers`. Extension routers are extending the
-`wsgi.ExtensionRouter`.
+=======
+``routers.py`` have the objective of routing the HTTP requests and direct them to
+the correct method within the ``Controllers``. Extension routers are extending
+the ``wsgi.ExtensionRouter``.
Example:
@@ -159,18 +166,14 @@ Example:
controller=example_controller,
action='do_something',
conditions=dict(method=['GET']))
- ...
-
-
Controllers
------------
+===========
-`controllers.py` have the objective of handing requests and implement the
+``controllers.py`` have the objective of handing requests and implement the
Extension logic. Controllers are consumers of 'Managers' API and must have all
-the dependency injections required. `Controllers` are extending the
-`V3Controller` class.
-
+the dependency injections required. ``Controllers`` are extending the
+``V3Controller`` class.
Example:
@@ -178,37 +181,22 @@ Example:
@dependency.requires('identity_api', 'example_api')
class ExampleV3Controller(controller.V3Controller):
- ...
-
+ pass
Backends
---------
-
-The `backends` folder provides the model implementations for the different
-backends supported by the Extension.
-The folder structure must be the following:
-
-
- keystone
- \\\ contrib
- \\\ my_extension
- \\\ backends
- \\\ __init__.py (required)
+========
- \\\ sql.py (optional)
+The ``backends`` folder provides the model implementations for the different
+backends supported by the Extension. See General above for an example directory
+structure.
- \\\ kvs.py (optional)
-
-
-If a SQL backend is provided, in the `sql.py` backend implementation it is
+If a SQL backend is provided, in the ``sql.py`` backend implementation it is
mandatory to define the new table(s) that the Extension introduces and the
attributes they are composed of.
-
-For more information on Backends please consult the Keystone Architecture
-documentation:
-(http://docs.openstack.org/developer/keystone/architecture.html)
-
+For more information on backends, refer to the `Keystone Architecture
+<http://docs.openstack.org/developer/keystone/architecture.html>`_
+documentation.
Example:
@@ -224,38 +212,33 @@ Example:
nullable=False)
...
+SQL Migration Repository
+========================
-
-Migrate Repository
-------------------
-
-In case the Extension is adding data structures, these must be stored in
-separate tables and must not be included in the `migrate_repo` of the core
-Keystone. Please refere to the 'migrate.cfg' file to configure the Extension
+In case the Extension is adding SQL data structures, these must be stored in
+separate tables and must not be included in the ``migrate_repo`` of the core
+Keystone. Please refer to the ``migrate.cfg`` file to configure the Extension
repository.
-
-In order to create the Extension tables and its attributes, a db_sync command
-must be executed.
-
+In order to create the Extension tables and their attributes, a ``db_sync``
+command must be executed.
Example::
./bin/keystone-manage db_sync --extension example
-
Event Callbacks
------------
+---------------
Extensions may provide callbacks to Keystone (Identity) events.
Extensions must provide the list of events of interest and the corresponding
callbacks. Events are issued upon successful creation, modification, and
deletion of the following Keystone resources:
-* ``group``
-* ``project``
-* ``role``
-* ``user``
+- ``group``
+- ``project``
+- ``role``
+- ``user``
The extension's ``Manager`` class must contain the
``event_callbacks`` attribute. It is a dictionary listing as keys
@@ -299,20 +282,20 @@ Example:
A callback must accept the following parameters:
-* ``service`` - the service information (e.g. identity)
-* ``resource_type`` - the resource type (e.g. project)
-* ``operation`` - the operation (updated, created, deleted)
-* ``payload`` - the actual payload info of the resource that was acted on
+- ``service`` - the service information (e.g. identity)
+- ``resource_type`` - the resource type (e.g. project)
+- ``operation`` - the operation (updated, created, deleted)
+- ``payload`` - the actual payload info of the resource that was acted on
Current callback operations:
-* ``created``
-* ``deleted``
-* ``updated``
+- ``created``
+- ``deleted``
+- ``updated``
Example:
.. code:: python
+
def project_deleted_callback(self, service, resource_type, operation,
payload):
-
diff --git a/doc/source/external-auth.rst b/doc/source/external-auth.rst
index 7d18672d6..7012884e8 100644
--- a/doc/source/external-auth.rst
+++ b/doc/source/external-auth.rst
@@ -95,7 +95,9 @@ user must exist in advance in the identity backend so that a proper token can
be issued.
Your code should set the ``REMOTE_USER`` if the user is properly authenticated,
-following the semantics below::
+following the semantics below:
+
+.. code:: python
from keystone.common import wsgi
diff --git a/doc/source/man/keystone-all.rst b/doc/source/man/keystone-all.rst
index ea983d27d..a5eef44f0 100644
--- a/doc/source/man/keystone-all.rst
+++ b/doc/source/man/keystone-all.rst
@@ -7,9 +7,9 @@ Keystone Startup Command
------------------------
:Author: openstack@lists.openstack.org
-:Date: 2013-10-17
+:Date: 2014-02-14
:Copyright: OpenStack Foundation
-:Version: 2013.2
+:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
@@ -19,7 +19,7 @@ SYNOPSIS
::
keystone-all [-h] [--config-dir DIR] [--config-file PATH] [--debug]
- [--log-config PATH] [--log-date-format DATE_FORMAT]
+ [--log-config-append PATH] [--log-date-format DATE_FORMAT]
[--log-dir LOG_DIR] [--log-file PATH]
[--log-format FORMAT] [--nodebug] [--nostandard-threads]
[--nouse-syslog] [--noverbose]
@@ -50,17 +50,19 @@ OPTIONS
precedence. The default files used are: None
--debug, -d Print debugging output (set logging level to DEBUG
instead of default WARNING level).
- --log-config PATH If this option is specified, the logging configuration
- file specified is used and overrides any other logging
- options specified. Please see the Python logging
- module documentation for details on logging
- configuration files.
+ --log-config-append PATH, --log_config PATH
+ The name of logging configuration file. It does not
+ disable existing loggers, but just appends specified
+ logging configuration to any other existing logging
+ options. Please see the Python logging module
+ documentation for details on logging configuration
+ files.
--log-date-format DATE_FORMAT
Format string for %(asctime)s in log records. Default:
None
--log-dir LOG_DIR, --logdir LOG_DIR
- (Optional) The base directory used for relative
- --log-file paths
+ (Optional) The base directory used for relative --log-
+ file paths
--log-file PATH, --logfile PATH
(Optional) Name of log file to output to. If no
default is set, logging will go to stdout.
diff --git a/doc/source/man/keystone-manage.rst b/doc/source/man/keystone-manage.rst
index 84515e9cb..a47eba3d1 100644
--- a/doc/source/man/keystone-manage.rst
+++ b/doc/source/man/keystone-manage.rst
@@ -7,9 +7,9 @@ Keystone Management Utility
---------------------------
:Author: openstack@lists.openstack.org
-:Date: 2013-10-17
+:Date: 2014-02-14
:Copyright: OpenStack Foundation
-:Version: 2013.2
+:Version: 2014.1
:Manual section: 1
:Manual group: cloud computing
@@ -21,25 +21,22 @@ SYNOPSIS
DESCRIPTION
===========
-keystone-manage is the command line tool that interacts with the Keystone
-service to initialize and update data within Keystone. Generally,
-keystone-manage is only used for operations that cannot be accomplished
-with the Keystone REST api, such data import/export and schema
-migrations.
-
+``keystone-manage`` is the command line tool which interacts with the Keystone
+service to initialize and update data within Keystone. Generally,
+``keystone-manage`` is only used for operations that cannot be accomplished
+with the HTTP API, such data import/export and database migrations.
USAGE
=====
``keystone-manage [options] action [additional args]``
-
General keystone-manage options:
--------------------------------
* ``--help`` : display verbose help output.
-Invoking keystone-manage by itself will give you some usage information.
+Invoking ``keystone-manage`` by itself will give you some usage information.
Available commands:
@@ -49,7 +46,6 @@ Available commands:
* ``ssl_setup``: Generate certificates for SSL.
* ``token_flush``: Purge expired tokens.
-
OPTIONS
=======
@@ -65,17 +61,19 @@ OPTIONS
precedence. The default files used are: None
--debug, -d Print debugging output (set logging level to DEBUG
instead of default WARNING level).
- --log-config PATH If this option is specified, the logging configuration
- file specified is used and overrides any other logging
- options specified. Please see the Python logging
- module documentation for details on logging
- configuration files.
+ --log-config-append PATH, --log_config PATH
+ The name of logging configuration file. It does not
+ disable existing loggers, but just appends specified
+ logging configuration to any other existing logging
+ options. Please see the Python logging module
+ documentation for details on logging configuration
+ files.
--log-date-format DATE_FORMAT
Format string for %(asctime)s in log records. Default:
None
--log-dir LOG_DIR, --logdir LOG_DIR
- (Optional) The base directory used for relative
- --log-file paths
+ (Optional) The base directory used for relative --log-
+ file paths
--log-file PATH, --logfile PATH
(Optional) Name of log file to output to. If no
default is set, logging will go to stdout.
diff --git a/etc/default_catalog.templates b/etc/default_catalog.templates
index eb1e04470..e7a3d407f 100644
--- a/etc/default_catalog.templates
+++ b/etc/default_catalog.templates
@@ -1,4 +1,4 @@
-# config for TemplatedCatalog, using camelCase because I don't want to do
+# config for templated.Catalog, using camelCase because I don't want to do
# translations for keystone compat
catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
diff --git a/etc/keystone.conf.sample b/etc/keystone.conf.sample
index 36e25e763..6d9b8c731 100644
--- a/etc/keystone.conf.sample
+++ b/etc/keystone.conf.sample
@@ -1,494 +1,1220 @@
[DEFAULT]
-# A "shared secret" that can be used to bootstrap Keystone. This "token" does
-# not represent a user, and carries no explicit authorization. To disable in
-# production (highly recommended), remove AdminTokenAuthMiddleware from your
-# paste application pipelines (for example, in keystone-paste.ini).
-# admin_token = ADMIN
-# The IP address of the network interface to listen on
-# public_bind_host = 0.0.0.0
-# admin_bind_host = 0.0.0.0
+#
+# Options defined in keystone
+#
+
+# A "shared secret" that can be used to bootstrap Keystone.
+# This "token" does not represent a user, and carries no
+# explicit authorization. To disable in production (highly
+# recommended), remove AdminTokenAuthMiddleware from your
+# paste application pipelines (for example, in keystone-
+# paste.ini). (string value)
+#admin_token=ADMIN
+
+# The IP Address of the network interface to for the public
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#public_bind_host=0.0.0.0
+
+# The IP Address of the network interface to for the admin
+# service to listen on. (string value)
+# Deprecated group/name - [DEFAULT]/bind_host
+#admin_bind_host=0.0.0.0
+
+# The port which the OpenStack Compute service listens on.
+# (integer value)
+#compute_port=8774
+
+# The port number which the admin service listens on. (integer
+# value)
+#admin_port=35357
+
+# The port number which the public service listens on.
+# (integer value)
+#public_port=5000
+
+# The base public endpoint URL for keystone that are
+# advertised to clients (NOTE: this does NOT affect how
+# keystone listens for connections) (string value)
+#public_endpoint=http://localhost:%(public_port)s/
+
+# The base admin endpoint URL for keystone that are advertised
+# to clients (NOTE: this does NOT affect how keystone listens
+# for connections) (string value)
+#admin_endpoint=http://localhost:%(admin_port)s/
+
+# onready allows you to send a notification when the process
+# is ready to serve For example, to have it notify using
+# systemd, one could set shell command: "onready = systemd-
+# notify --ready" or a module with notify() method: "onready =
+# keystone.common.systemd" (string value)
+#onready=<None>
+
+# enforced by optional sizelimit middleware
+# (keystone.middleware:RequestBodySizeLimiter) (integer value)
+#max_request_body_size=114688
+
+# limit the sizes of user & tenant ID/names (integer value)
+#max_param_size=64
+
+# similar to max_param_size, but provides an exception for
+# token values (integer value)
+#max_token_size=8192
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, the member_role_id will be used in the API
+# add_user_to_project. (string value)
+#member_role_id=9fe2ff9ee4384b1894a90878d3e92bab
+
+# During a SQL upgrade member_role_id will be used to create a
+# new role that will replace records in the
+# user_tenant_membership table with explicit role grants.
+# After migration, member_role_name will be ignored. (string
+# value)
+#member_role_name=_member_
+
+# The value passed as the keyword "rounds" to passlib encrypt
+# method. (integer value)
+#crypt_strength=40000
+
+# Set this to True if you want to enable TCP_KEEPALIVE on
+# server sockets i.e. sockets used by the keystone wsgi server
+# for client connections (boolean value)
+#tcp_keepalive=false
+
+# Sets the value of TCP_KEEPIDLE in seconds for each server
+# socket. Only applies if tcp_keepalive is True. Not supported
+# on OS X. (integer value)
+#tcp_keepidle=600
+
+# The maximum number of entities that will be returned in a
+# collection can be set with list_limit, with no limit set by
+# default. This global limit may be then overridden for a
+# specific driver, by specifying a list_limit in the
+# appropriate section (e.g. [assignment] (integer value)
+#list_limit=<None>
+
+
+#
+# Options defined in oslo.messaging
+#
+
+# Use durable queues in amqp. (boolean value)
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues=false
+
+# Auto-delete queues in amqp. (boolean value)
+#amqp_auto_delete=false
+
+# Size of RPC connection pool (integer value)
+#rpc_conn_pool_size=30
+
+# Modules of exceptions that are permitted to be recreatedupon
+# receiving exception data from an rpc call. (list value)
+#allowed_rpc_exception_modules=oslo.messaging.exceptions,nova.exception,cinder.exception,exceptions
+
+# Qpid broker hostname (string value)
+#qpid_hostname=localhost
+
+# Qpid broker port (integer value)
+#qpid_port=5672
+
+# Qpid HA cluster host:port pairs (list value)
+#qpid_hosts=$qpid_hostname:$qpid_port
+
+# Username for Qpid connection (string value)
+#qpid_username=
+
+# Password for Qpid connection (string value)
+#qpid_password=
+
+# Space separated list of SASL mechanisms to use for auth
+# (string value)
+#qpid_sasl_mechanisms=
+
+# Seconds between connection keepalive heartbeats (integer
+# value)
+#qpid_heartbeat=60
+
+# Transport to use, either 'tcp' or 'ssl' (string value)
+#qpid_protocol=tcp
+
+# Disable Nagle algorithm (boolean value)
+#qpid_tcp_nodelay=true
+
+# The qpid topology version to use. Version 1 is what was
+# originally used by impl_qpid. Version 2 includes some
+# backwards-incompatible changes that allow broker federation
+# to work. Users should update to version 2 when they are
+# able to take everything down, as it requires a clean break.
+# (integer value)
+#qpid_topology_version=1
+
+# SSL version to use (valid only if SSL enabled). valid values
+# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
+# distributions (string value)
+#kombu_ssl_version=
-# The port number which the public service listens on
-# public_port = 5000
+# SSL key file (valid only if SSL enabled) (string value)
+#kombu_ssl_keyfile=
-# The port number which the public admin listens on
-# admin_port = 35357
+# SSL cert file (valid only if SSL enabled) (string value)
+#kombu_ssl_certfile=
-# The base endpoint URLs for keystone that are advertised to clients
-# (NOTE: this does NOT affect how keystone listens for connections)
-# public_endpoint = http://localhost:%(public_port)s/
-# admin_endpoint = http://localhost:%(admin_port)s/
+# SSL certification authority file (valid only if SSL enabled)
+# (string value)
+#kombu_ssl_ca_certs=
-# The port number which the OpenStack Compute service listens on
-# compute_port = 8774
+# The RabbitMQ broker address where a single node is used
+# (string value)
+#rabbit_host=localhost
-# Path to your policy definition containing identity actions
-# policy_file = policy.json
+# The RabbitMQ broker port where a single node is used
+# (integer value)
+#rabbit_port=5672
-# Rule to check if no matching policy definition is found
-# FIXME(dolph): This should really be defined as [policy] default_rule
-# policy_default_rule = admin_required
+# RabbitMQ HA cluster host:port pairs (list value)
+#rabbit_hosts=$rabbit_host:$rabbit_port
-# Role for migrating membership relationships
-# During a SQL upgrade, the following values will be used to create a new role
-# that will replace records in the user_tenant_membership table with explicit
-# role grants. After migration, the member_role_id will be used in the API
-# add_user_to_project, and member_role_name will be ignored.
-# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
-# member_role_name = _member_
+# Connect over SSL for RabbitMQ (boolean value)
+#rabbit_use_ssl=false
-# enforced by optional sizelimit middleware (keystone.middleware:RequestBodySizeLimiter)
-# max_request_body_size = 114688
+# The RabbitMQ userid (string value)
+#rabbit_userid=guest
-# limit the sizes of user & tenant ID/names
-# max_param_size = 64
+# The RabbitMQ password (string value)
+#rabbit_password=guest
-# similar to max_param_size, but provides an exception for token values
-# max_token_size = 8192
+# The RabbitMQ virtual host (string value)
+#rabbit_virtual_host=/
-# the filename to use with sqlite
-# sqlite_db = keystone.db
+# How frequently to retry connecting with RabbitMQ (integer
+# value)
+#rabbit_retry_interval=1
-# If true, use synchronous mode for sqlite
-# sqlite_synchronous = True
+# How long to backoff for between retries when connecting to
+# RabbitMQ (integer value)
+#rabbit_retry_backoff=2
-# === Logging Options ===
-# Print debugging output
-# (includes plaintext request logging, potentially including passwords)
-# debug = False
+# Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count) (integer value)
+#rabbit_max_retries=0
-# Print more verbose output
-# verbose = False
+# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
+# this option, you must wipe the RabbitMQ database. (boolean
+# value)
+#rabbit_ha_queues=false
-# Name of log file to output to. If not set, logging will go to stdout.
-# log_file = keystone.log
+# If passed, use a fake RabbitMQ provider (boolean value)
+#fake_rabbit=false
-# The directory to keep log files in (will be prepended to --logfile)
-# log_dir = /var/log/keystone
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet
+# interface, or IP. The "host" option should point or resolve
+# to this address. (string value)
+#rpc_zmq_bind_address=*
-# Use syslog for logging.
-# use_syslog = False
+# MatchMaker driver (string value)
+#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
+
+# ZeroMQ receiver listening port (integer value)
+#rpc_zmq_port=9501
+
+# Number of ZeroMQ contexts, defaults to 1 (integer value)
+#rpc_zmq_contexts=1
+
+# Maximum number of ingress messages to locally buffer per
+# topic. Default is unlimited. (integer value)
+#rpc_zmq_topic_backlog=<None>
+
+# Directory for holding IPC sockets (string value)
+#rpc_zmq_ipc_dir=/var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP
+# address. Must match "host" option, if running Nova. (string
+# value)
+#rpc_zmq_host=keystone
+
+# Seconds to wait before a cast expires (TTL). Only supported
+# by impl_zmq. (integer value)
+#rpc_cast_timeout=30
+
+# Heartbeat frequency (integer value)
+#matchmaker_heartbeat_freq=300
+
+# Heartbeat time-to-live. (integer value)
+#matchmaker_heartbeat_ttl=600
+
+# Host to locate redis (string value)
+#host=127.0.0.1
+
+# Use this port to connect to redis host. (integer value)
+#port=6379
+
+# Password for Redis server. (optional) (string value)
+#password=<None>
+
+# Size of RPC greenthread pool (integer value)
+#rpc_thread_pool_size=64
+
+# Driver or drivers to handle sending notifications (multi
+# valued)
+#notification_driver=
+
+# AMQP topic used for OpenStack notifications (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+#notification_topics=notifications
+
+# Seconds to wait for a response from a call (integer value)
+#rpc_response_timeout=60
+
+# A URL representing the messaging driver to use and its full
+# configuration. If not set, we fall back to the rpc_backend
+# option and driver specific configuration. (string value)
+#transport_url=<None>
+
+# The messaging driver to use, defaults to rabbit. Other
+# drivers include qpid and zmq. (string value)
+#rpc_backend=rabbit
+
+# The default exchange under which topics are scoped. May be
+# overridden by an exchange name specified in the
+# transport_url option. (string value)
+#control_exchange=openstack
+
+
+#
+# Options defined in keystone.notifications
+#
-# syslog facility to receive log lines
-# syslog_log_facility = LOG_USER
+# Default publisher_id for outgoing notifications (string
+# value)
+#default_publisher_id=<None>
+
+
+#
+# Options defined in keystone.middleware.ec2_token
+#
+
+# URL to get token from ec2 request. (string value)
+#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens
+
+
+#
+# Options defined in keystone.openstack.common.db.sqlalchemy.session
+#
+
+# The file name to use with SQLite (string value)
+#sqlite_db=keystone.sqlite
+
+# If True, SQLite uses synchronous mode (boolean value)
+#sqlite_synchronous=true
+
+
+#
+# Options defined in keystone.openstack.common.eventlet_backdoor
+#
+
+# Enable eventlet backdoor. Acceptable values are 0, <port>,
+# and <start>:<end>, where 0 results in listening on a random
+# tcp port number; <port> results in listening on the
+# specified port number (and not enabling backdoor if that
+# port is in use); and <start>:<end> results in listening on
+# the smallest unused port number within the specified range
+# of port numbers. The chosen port is displayed in the
+# service's log file. (string value)
+#backdoor_port=<None>
+
+
+#
+# Options defined in keystone.openstack.common.lockutils
+#
+
+# Whether to disable inter-process locks (boolean value)
+#disable_process_locking=false
+
+# Directory to use for lock files. (string value)
+#lock_path=<None>
+
+
+#
+# Options defined in keystone.openstack.common.log
+#
+
+# Print debugging output (set logging level to DEBUG instead
+# of default WARNING level). (boolean value)
+#debug=false
+
+# Print more verbose output (set logging level to INFO instead
+# of default WARNING level). (boolean value)
+#verbose=false
+
+# Log output to standard error (boolean value)
+#use_stderr=true
+
+# format string to use for log messages with context (string
+# value)
+#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# format string to use for log messages without context
+# (string value)
+#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# data to append to log format when level is DEBUG (string
+# value)
+#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d
+
+# prefix each line of exception output with this format
+# (string value)
+#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s %(instance)s
# list of logger=LEVEL pairs (list value)
-#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,keystone=INFO,qpid=WARN,sqlalchemy=WARN,suds=INFO
+#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,iso8601=WARN
+
+# publish error events (boolean value)
+#publish_errors=false
+
+# make deprecations fatal (boolean value)
+#fatal_deprecations=false
+
+# If an instance is passed with the log message, format it
+# like this (string value)
+#instance_format="[instance: %(uuid)s] "
+
+# If an instance UUID is passed with the log message, format
+# it like this (string value)
+#instance_uuid_format="[instance: %(uuid)s] "
# The name of logging configuration file. It does not disable
# existing loggers, but just appends specified logging
# configuration to any other existing logging options. Please
# see the Python logging module documentation for details on
# logging configuration files. (string value)
+# Deprecated group/name - [DEFAULT]/log_config
#log_config_append=<None>
-# A logging.Formatter log message format string which may use any of the
-# available logging.LogRecord attributes.
-# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
+# DEPRECATED. A logging.Formatter log message format string
+# which may use any of the available logging.LogRecord
+# attributes. This option is deprecated. Please use
+# logging_context_format_string and
+# logging_default_format_string instead. (string value)
+#log_format=<None>
+
+# Format string for %%(asctime)s in log records. Default:
+# %(default)s (string value)
+#log_date_format=%Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to output to. If no default is
+# set, logging will go to stdout. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file=<None>
+
+# (Optional) The base directory used for relative --log-file
+# paths (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir=<None>
+
+# Use syslog for logging. (boolean value)
+#use_syslog=false
+
+# syslog facility to receive log lines (string value)
+#syslog_log_facility=LOG_USER
+
-# Format string for %(asctime)s in log records.
-# log_date_format = %Y-%m-%d %H:%M:%S
+#
+# Options defined in keystone.openstack.common.policy
+#
+
+# JSON file containing policy (string value)
+#policy_file=policy.json
+
+# Rule enforced when requested rule is not found (string
+# value)
+#policy_default_rule=default
+
+
+[assignment]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Assignment backend driver (string value)
+#driver=<None>
-# onready allows you to send a notification when the process is ready to serve
-# For example, to have it notify using systemd, one could set shell command:
-# onready = systemd-notify --ready
-# or a module with notify() method:
-# onready = keystone.common.systemd
+# Toggle for assignment caching. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
-# === Notification Options ===
+# TTL (in seconds) to cache assignment data. This has no
+# effect unless global caching is enabled. (integer value)
+#cache_time=<None>
-# Notifications can be sent when users or projects are created, updated or
-# deleted. There are three methods of sending notifications: logging (via the
-# log_file directive), rpc (via a message queue) and no_op (no notifications
-# sent, the default)
+# Maximum number of entities that will be returned in an
+# assignment collection (integer value)
+#list_limit=<None>
-# notification_driver can be defined multiple times
-# Do nothing driver (the default)
-# notification_driver = keystone.openstack.common.notifier.no_op_notifier
-# Logging driver example (not enabled by default)
-# notification_driver = keystone.openstack.common.notifier.log_notifier
-# RPC driver example (not enabled by default)
-# notification_driver = keystone.openstack.common.notifier.rpc_notifier
-# Default notification level for outgoing notifications
-# default_notification_level = INFO
+[auth]
+
+#
+# Options defined in keystone
+#
+
+# Default auth methods. (list value)
+#methods=external,password,token
+
+# The password auth plugin module (string value)
+#password=keystone.auth.plugins.password.Password
-# Default publisher_id for outgoing notifications; included in the payload.
-# default_publisher_id =
+# The token auth plugin module (string value)
+#token=keystone.auth.plugins.token.Token
-# AMQP topics to publish to when using the RPC notification driver.
-# Multiple values can be specified by separating with commas.
-# The actual topic names will be %s.%(default_notification_level)s
-# notification_topics = notifications
+# The external (REMOTE_USER) auth plugin module. (string
+# value)
+#external=keystone.auth.plugins.external.DefaultDomain
+
+
+[cache]
+
+#
+# Options defined in keystone
+#
-# === RPC Options ===
+# Prefix for building the configuration dictionary for the
+# cache region. This should not need to be changed unless
+# there is another dogpile.cache region with the same
+# configuration name (string value)
+#config_prefix=cache.keystone
+
+# Default TTL, in seconds, for any cached item in the
+# dogpile.cache region. This applies to any cached method that
+# doesn't have an explicit cache expiration time defined for
+# it. (integer value)
+#expiration_time=600
+
+# Dogpile.cache backend module. It is recommended that
+# Memcache (dogpile.cache.memcache) or Redis
+# (dogpile.cache.redis) be used in production deployments.
+# Small workloads (single process) like devstack can use the
+# dogpile.cache.memory backend. (string value)
+#backend=keystone.common.cache.noop
+
+# Use a key-mangling function (sha1) to ensure fixed length
+# cache-keys. This is toggle-able for debugging purposes, it
+# is highly recommended to always leave this set to True.
+# (boolean value)
+#use_key_mangler=true
+
+# Arguments supplied to the backend module. Specify this
+# option once per argument to be passed to the dogpile.cache
+# backend. Example format: <argname>:<value> (multi valued)
+#backend_argument=
+
+# Proxy Classes to import that will affect the way the
+# dogpile.cache backend functions. See the dogpile.cache
+# documentation on changing-backend-behavior. Comma delimited
+# list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2
+# (list value)
+#proxies=
+
+# Global toggle for all caching using the should_cache_fn
+# mechanism (boolean value)
+#enabled=false
+
+# Extra debugging from the cache backend (cache keys,
+# get/set/delete/etc calls) This is only really useful if you
+# need to see the specific cache-backend get/set/delete calls
+# with the keys/values. Typically this should be left set to
+# False. (boolean value)
+#debug_cache_backend=false
-# For Keystone, these options apply only when the RPC notification driver is
-# used.
-# The messaging module to use, defaults to kombu.
-# rpc_backend = keystone.openstack.common.rpc.impl_kombu
+[catalog]
-# Size of RPC thread pool
-# rpc_thread_pool_size = 64
+#
+# Options defined in keystone
+#
-# Size of RPC connection pool
-# rpc_conn_pool_size = 30
+# Catalog template file name for use with the template catalog
+# backend. (string value)
+#template_file=default_catalog.templates
-# Seconds to wait for a response from call or multicall
-# rpc_response_timeout = 60
+# Keystone catalog backend driver (string value)
+#driver=keystone.catalog.backends.sql.Catalog
-# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
-# rpc_cast_timeout = 30
+# Maximum number of entities that will be returned in a
+# catalog collection (integer value)
+#list_limit=<None>
-# Modules of exceptions that are permitted to be recreated upon receiving
-# exception data from an rpc call.
-# allowed_rpc_exception_modules = keystone.openstack.common.exception,nova.exception,cinder.exception,exceptions
-# If True, use a fake RabbitMQ provider
-# fake_rabbit = False
+[credential]
-# AMQP exchange to connect to if using RabbitMQ or Qpid
-# control_exchange = openstack
+#
+# Options defined in keystone
+#
-[sql]
-# The SQLAlchemy connection string used to connect to the database
-# DEPRECATED: use connection in the [database] section instead.
-# connection = sqlite:///keystone.db
+# Keystone Credential backend driver (string value)
+#driver=keystone.credential.backends.sql.Credential
-# the timeout before idle sql connections are reaped
-# DEPRECATED: use idle_timeout in the [database] section instead.
-# idle_timeout = 200
[database]
-# The SQLAlchemy connection string used to connect to the database
-# connection = sqlite:///keystone.db
-# The SQLAlchemy connection string used to connect to the slave database
-# Note that Keystone does not use this option.
-# slave_connection =
+#
+# Options defined in keystone.openstack.common.db.api
+#
-# timeout before idle sql connections are reaped
-# idle_timeout = 3600
+# The backend to use for db (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+#backend=sqlalchemy
-# Minimum number of SQL connections to keep open in a pool
-# min_pool_size = 1
-# Maximum number of SQL connections to keep open in a pool
-# max_pool_size =
+#
+# Options defined in keystone.openstack.common.db.sqlalchemy.session
+#
+
+# The SQLAlchemy connection string used to connect to the
+# database (string value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+#connection=sqlite:////keystone/openstack/common/db/$sqlite_db
-# maximum db connection retries during startup. (setting -1 implies an infinite retry count)
-# max_retries = 10
+# The SQLAlchemy connection string used to connect to the
+# slave database (string value)
+#slave_connection=
-# interval between retries of opening a sql connection
-# retry_interval = 10
+# Timeout before idle sql connections are reaped (integer
+# value)
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#idle_timeout=3600
+
+# Minimum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_min_pool_size
+# Deprecated group/name - [DATABASE]/sql_min_pool_size
+#min_pool_size=1
+
+# Maximum number of SQL connections to keep open in a pool
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size=<None>
+
+# Maximum db connection retries during startup. (setting -1
+# implies an infinite retry count) (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries=10
+
+# Interval between retries of opening a sql connection
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval=10
# If set, use this value for max_overflow with sqlalchemy
-# max_overflow =
+# (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow=<None>
-# Verbosity of SQL debugging information. 0=None, 100=Everything
-# connection_debug = 0
+# Verbosity of SQL debugging information. 0=None,
+# 100=Everything (integer value)
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug=0
-# Add python stack traces to SQL as comment strings
-# connection_trace = False
+# Add python stack traces to SQL as comment strings (boolean
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace=false
# If set, use this value for pool_timeout with sqlalchemy
-# pool_timeout =
+# (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout=<None>
+
+
+[ec2]
+
+#
+# Options defined in keystone
+#
+
+# Keystone EC2Credential backend driver (string value)
+#driver=keystone.contrib.ec2.backends.kvs.Ec2
+
+
+[endpoint_filter]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Endpoint Filter backend driver (string value)
+#driver=keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
+
+# Toggle to return all active endpoints if no filter exists.
+# (boolean value)
+#return_all_endpoints_if_no_filter=true
+
+
+[federation]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Federation backend driver (string value)
+#driver=keystone.contrib.federation.backends.sql.Federation
+
[identity]
-# driver = keystone.identity.backends.sql.Identity
-
-# This references the domain to use for all Identity API v2 requests (which are
-# not aware of domains). A domain with this ID will be created for you by
-# keystone-manage db_sync in migration 008. The domain referenced by this ID
-# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
-# There is nothing special about this domain, other than the fact that it must
-# exist to order to maintain support for your v2 clients.
-# default_domain_id = default
-#
-# A subset (or all) of domains can have their own identity driver, each with
-# their own partial configuration file in a domain configuration directory.
-# Only values specific to the domain need to be placed in the domain specific
+
+#
+# Options defined in keystone
+#
+
+# This references the domain to use for all Identity API v2
+# requests (which are not aware of domains). A domain with
+# this ID will be created for you by keystone-manage db_sync
+# in migration 008. The domain referenced by this ID cannot
+# be deleted on the v3 API, to prevent accidentally breaking
+# the v2 API. There is nothing special about this domain,
+# other than the fact that it must exist to order to maintain
+# support for your v2 clients. (string value)
+#default_domain_id=default
+
+# A subset (or all) of domains can have their own identity
+# driver, each with their own partial configuration file in a
+# domain configuration directory. Only values specific to the
+# domain need to be placed in the domain specific
# configuration file. This feature is disabled by default; set
-# domain_specific_drivers_enabled to True to enable.
-# domain_specific_drivers_enabled = False
-# domain_config_dir = /etc/keystone/domains
+# to True to enable. (boolean value)
+#domain_specific_drivers_enabled=false
-# Maximum supported length for user passwords; decrease to improve performance.
-# max_password_length = 4096
+# Path for Keystone to locate the domain specificidentity
+# configuration files if domain_specific_drivers_enabled is
+# set to true. (string value)
+#domain_config_dir=/etc/keystone/domains
-[credential]
-# driver = keystone.credential.backends.sql.Credential
+# Keystone Identity backend driver (string value)
+#driver=keystone.identity.backends.sql.Identity
-[trust]
-# driver = keystone.trust.backends.sql.Trust
+# Maximum supported length for user passwords; decrease to
+# improve performance. (integer value)
+#max_password_length=4096
-# delegation and impersonation features can be optionally disabled
-# enabled = True
+# Maximum number of entities that will be returned in an
+# identity collection (integer value)
+#list_limit=<None>
-[os_inherit]
-# role-assignment inheritance to projects from owning domain can be
-# optionally enabled
-# enabled = False
-[catalog]
-# dynamic, sql-based backend (supports API/CLI-based management commands)
-# driver = keystone.catalog.backends.sql.Catalog
+[kvs]
-# static, file-based backend (does *NOT* support any management commands)
-# driver = keystone.catalog.backends.templated.TemplatedCatalog
+#
+# Options defined in keystone
+#
-# template_file = default_catalog.templates
+# Extra dogpile.cache backend modules to register with the
+# dogpile.cache library (list value)
+#backends=
-[endpoint_filter]
-# extension for creating associations between project and endpoints in order to
-# provide a tailored catalog for project-scoped token requests.
-# driver = keystone.contrib.endpoint_filter.backends.sql.EndpointFilter
-# return_all_endpoints_if_no_filter = True
+# Prefix for building the configuration dictionary for the KVS
+# region. This should not need to be changed unless there is
+# another dogpile.cache region with the same configuration
+# name (string value)
+#config_prefix=keystone.kvs
-[token]
-# Provides token persistence.
-# driver = keystone.token.backends.sql.Token
+# Toggle to disable using a key-mangling function to ensure
+# fixed length keys. This is toggle-able for debugging
+# purposes, it is highly recommended to always leave this set
+# to True. (boolean value)
+#enable_key_mangler=true
-# Controls the token construction, validation, and revocation operations.
-# Core providers are keystone.token.providers.[pki|uuid].Provider
-# provider =
+# Default lock timeout for distributed locking. (integer
+# value)
+#default_lock_timeout=5
-# Amount of time a token should remain valid (in seconds)
-# expiration = 3600
-# External auth mechanisms that should add bind information to token.
-# eg kerberos, x509
-# bind =
+[ldap]
-# Enforcement policy on tokens presented to keystone with bind information.
-# One of disabled, permissive, strict, required or a specifically required bind
-# mode e.g. kerberos or x509 to require binding to that authentication.
-# enforce_token_bind = permissive
+#
+# Options defined in keystone
+#
-# Token specific caching toggle. This has no effect unless the global caching
-# option is set to True
-# caching = True
+# URL for connecting to the LDAP server (string value)
+#url=ldap://localhost
-# Token specific cache time-to-live (TTL) in seconds.
-# cache_time =
+# User BindDN to query the LDAP server (string value)
+#user=<None>
-# Revocation-List specific cache time-to-live (TTL) in seconds.
-# revocation_cache_time = 3600
+# Password for the BindDN to query the LDAP server (string
+# value)
+#password=<None>
-[cache]
-# Global cache functionality toggle.
-# enabled = False
-
-# Prefix for building the configuration dictionary for the cache region. This
-# should not need to be changed unless there is another dogpile.cache region
-# with the same configuration name
-# config_prefix = cache.keystone
-
-# Default TTL, in seconds, for any cached item in the dogpile.cache region.
-# This applies to any cached method that doesn't have an explicit cache
-# expiration time defined for it.
-# expiration_time = 600
-
-# Dogpile.cache backend module. It is recommended that Memcache
-# (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production
-# deployments. Small workloads (single process) like devstack can use the
-# dogpile.cache.memory backend.
-# backend = keystone.common.cache.noop
-
-# Arguments supplied to the backend module. Specify this option once per
-# argument to be passed to the dogpile.cache backend.
-# Example format: <argname>:<value>
-# backend_argument =
-
-# Proxy Classes to import that will affect the way the dogpile.cache backend
-# functions. See the dogpile.cache documentation on changing-backend-behavior.
-# Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2
-# proxies =
-
-# Use a key-mangling function (sha1) to ensure fixed length cache-keys. This
-# is toggle-able for debugging purposes, it is highly recommended to always
-# leave this set to True.
-# use_key_mangler = True
-
-# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls)
-# This is only really useful if you need to see the specific cache-backend
-# get/set/delete calls with the keys/values. Typically this should be left
-# set to False.
-# debug_cache_backend = False
+# LDAP server suffix (string value)
+#suffix=cn=example,cn=com
-[policy]
-# driver = keystone.policy.backends.sql.Policy
+# (boolean value)
+#use_dumb_member=false
-[ec2]
-# driver = keystone.contrib.ec2.backends.kvs.Ec2
+# (string value)
+#dumb_member=cn=dumb,dc=nonexistent
-[assignment]
-# driver =
+# allow deleting subtrees (boolean value)
+#allow_subtree_delete=false
-# Assignment specific caching toggle. This has no effect unless the global
-# caching option is set to True
-# caching = True
+# The LDAP scope for queries, this can be either "one"
+# (onelevel/singleLevel) or "sub" (subtree/wholeSubtree)
+# (string value)
+#query_scope=one
-# Assignment specific cache time-to-live (TTL) in seconds.
-# cache_time =
+# Maximum results per page; a value of zero ("0") disables
+# paging (integer value)
+#page_size=0
-[oauth1]
-# driver = keystone.contrib.oauth1.backends.sql.OAuth1
+# The LDAP dereferencing option for queries. This can be
+# either "never", "searching", "always", "finding" or
+# "default". The "default" option falls back to using default
+# dereferencing configured by your ldap.conf. (string value)
+#alias_dereferencing=default
-# The Identity service may include expire attributes.
-# If no such attribute is included, then the token lasts indefinitely.
-# Specify how quickly the request token will expire (in seconds)
-# request_token_duration = 28800
-# Specify how quickly the access token will expire (in seconds)
-# access_token_duration = 86400
+# (string value)
+#user_tree_dn=<None>
-[federation]
-#driver = keystone.contrib.federation.backends.sql.Federation
+# (string value)
+#user_filter=<None>
-[ssl]
-#enable = True
-#certfile = /etc/keystone/ssl/certs/keystone.pem
-#keyfile = /etc/keystone/ssl/private/keystonekey.pem
-#ca_certs = /etc/keystone/ssl/certs/ca.pem
-#ca_key = /etc/keystone/ssl/private/cakey.pem
-#key_size = 1024
-#valid_days = 3650
-#cert_required = False
-#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+# (string value)
+#user_objectclass=inetOrgPerson
-[signing]
-# Deprecated in favor of provider in the [token] section
-# Allowed values are PKI or UUID
-#token_format =
+# (string value)
+#user_id_attribute=cn
-#certfile = /etc/keystone/ssl/certs/signing_cert.pem
-#keyfile = /etc/keystone/ssl/private/signing_key.pem
-#ca_certs = /etc/keystone/ssl/certs/ca.pem
-#ca_key = /etc/keystone/ssl/private/cakey.pem
-#key_size = 2048
-#valid_days = 3650
-#cert_subject = /C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+# (string value)
+#user_name_attribute=sn
+
+# (string value)
+#user_mail_attribute=email
+
+# (string value)
+#user_pass_attribute=userPassword
+
+# (string value)
+#user_enabled_attribute=enabled
+
+# (integer value)
+#user_enabled_mask=0
+
+# (string value)
+#user_enabled_default=True
+
+# (list value)
+#user_attribute_ignore=default_project_id,tenants
+
+# (string value)
+#user_default_project_id_attribute=<None>
+
+# (boolean value)
+#user_allow_create=true
+
+# (boolean value)
+#user_allow_update=true
+
+# (boolean value)
+#user_allow_delete=true
+
+# (boolean value)
+#user_enabled_emulation=false
+
+# (string value)
+#user_enabled_emulation_dn=<None>
+
+# (list value)
+#user_additional_attribute_mapping=
+
+# (string value)
+#tenant_tree_dn=<None>
+
+# (string value)
+#tenant_filter=<None>
+
+# (string value)
+#tenant_objectclass=groupOfNames
+
+# (string value)
+#tenant_id_attribute=cn
+
+# (string value)
+#tenant_member_attribute=member
+
+# (string value)
+#tenant_name_attribute=ou
+
+# (string value)
+#tenant_desc_attribute=description
+
+# (string value)
+#tenant_enabled_attribute=enabled
+
+# (string value)
+#tenant_domain_id_attribute=businessCategory
+
+# (list value)
+#tenant_attribute_ignore=
+
+# (boolean value)
+#tenant_allow_create=true
+
+# (boolean value)
+#tenant_allow_update=true
+
+# (boolean value)
+#tenant_allow_delete=true
+
+# (boolean value)
+#tenant_enabled_emulation=false
+
+# (string value)
+#tenant_enabled_emulation_dn=<None>
+
+# (list value)
+#tenant_additional_attribute_mapping=
+
+# (string value)
+#role_tree_dn=<None>
+
+# (string value)
+#role_filter=<None>
+
+# (string value)
+#role_objectclass=organizationalRole
+
+# (string value)
+#role_id_attribute=cn
+
+# (string value)
+#role_name_attribute=ou
+
+# (string value)
+#role_member_attribute=roleOccupant
+
+# (list value)
+#role_attribute_ignore=
+
+# (boolean value)
+#role_allow_create=true
+
+# (boolean value)
+#role_allow_update=true
+
+# (boolean value)
+#role_allow_delete=true
+
+# (list value)
+#role_additional_attribute_mapping=
+
+# (string value)
+#group_tree_dn=<None>
+
+# (string value)
+#group_filter=<None>
+
+# (string value)
+#group_objectclass=groupOfNames
+
+# (string value)
+#group_id_attribute=cn
+
+# (string value)
+#group_name_attribute=ou
+
+# (string value)
+#group_member_attribute=member
+
+# (string value)
+#group_desc_attribute=description
+
+# (list value)
+#group_attribute_ignore=
+
+# (boolean value)
+#group_allow_create=true
+
+# (boolean value)
+#group_allow_update=true
+
+# (boolean value)
+#group_allow_delete=true
+
+# (list value)
+#group_additional_attribute_mapping=
+
+# (string value)
+#tls_cacertfile=<None>
+
+# (string value)
+#tls_cacertdir=<None>
+
+# (boolean value)
+#use_tls=false
-[ldap]
-# url = ldap://localhost
-# user = dc=Manager,dc=example,dc=com
-# password = None
-# suffix = cn=example,cn=com
-# use_dumb_member = False
-# allow_subtree_delete = False
-# dumb_member = cn=dumb,dc=example,dc=com
-
-# Maximum results per page; a value of zero ('0') disables paging (default)
-# page_size = 0
-
-# The LDAP dereferencing option for queries. This can be either 'never',
-# 'searching', 'always', 'finding' or 'default'. The 'default' option falls
-# back to using default dereferencing configured by your ldap.conf.
-# alias_dereferencing = default
-
-# The LDAP scope for queries, this can be either 'one'
-# (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)
-# query_scope = one
-
-# user_tree_dn = ou=Users,dc=example,dc=com
-# user_filter =
-# user_objectclass = inetOrgPerson
-# user_id_attribute = cn
-# user_name_attribute = sn
-# user_mail_attribute = email
-# user_pass_attribute = userPassword
-# user_enabled_attribute = enabled
-# user_enabled_mask = 0
-# user_enabled_default = True
-# user_attribute_ignore = default_project_id,tenants
-# user_default_project_id_attribute =
-# user_allow_create = True
-# user_allow_update = True
-# user_allow_delete = True
-# user_enabled_emulation = False
-# user_enabled_emulation_dn =
-
-# tenant_tree_dn = ou=Projects,dc=example,dc=com
-# tenant_filter =
-# tenant_objectclass = groupOfNames
-# tenant_domain_id_attribute = businessCategory
-# tenant_id_attribute = cn
-# tenant_member_attribute = member
-# tenant_name_attribute = ou
-# tenant_desc_attribute = desc
-# tenant_enabled_attribute = enabled
-# tenant_attribute_ignore =
-# tenant_allow_create = True
-# tenant_allow_update = True
-# tenant_allow_delete = True
-# tenant_enabled_emulation = False
-# tenant_enabled_emulation_dn =
-
-# role_tree_dn = ou=Roles,dc=example,dc=com
-# role_filter =
-# role_objectclass = organizationalRole
-# role_id_attribute = cn
-# role_name_attribute = ou
-# role_member_attribute = roleOccupant
-# role_attribute_ignore =
-# role_allow_create = True
-# role_allow_update = True
-# role_allow_delete = True
-
-# group_tree_dn =
-# group_filter =
-# group_objectclass = groupOfNames
-# group_id_attribute = cn
-# group_name_attribute = ou
-# group_member_attribute = member
-# group_desc_attribute = desc
-# group_attribute_ignore =
-# group_allow_create = True
-# group_allow_update = True
-# group_allow_delete = True
-
-# ldap TLS options
-# if both tls_cacertfile and tls_cacertdir are set then
-# tls_cacertfile will be used and tls_cacertdir is ignored
# valid options for tls_req_cert are demand, never, and allow
-# use_tls = False
-# tls_cacertfile =
-# tls_cacertdir =
-# tls_req_cert = demand
-
-# Additional attribute mappings can be used to map ldap attributes to internal
-# keystone attributes. This allows keystone to fulfill ldap objectclass
-# requirements. An example to map the description and gecos attributes to a
-# user's name would be:
-# user_additional_attribute_mapping = description:name, gecos:name
-#
-# domain_additional_attribute_mapping =
-# group_additional_attribute_mapping =
-# role_additional_attribute_mapping =
-# project_additional_attribute_mapping =
-# user_additional_attribute_mapping =
+# (string value)
+#tls_req_cert=demand
+
+
+[matchmaker_ring]
+
+#
+# Options defined in oslo.messaging
+#
+
+# Matchmaker ring file (JSON) (string value)
+# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
+#ringfile=/etc/oslo/matchmaker_ring.json
-[auth]
-methods = external,password,token,oauth1
-#external = keystone.auth.plugins.external.DefaultDomain
-password = keystone.auth.plugins.password.Password
-token = keystone.auth.plugins.token.Token
-oauth1 = keystone.auth.plugins.oauth1.OAuth
[memcache]
-# servers = localhost:11211
-# max_compare_and_set_retry = 16
-[kvs]
-# backends =
-# config_prefix = keystone.kvs
-# enable_key_mangler = True
#
-# KeyValueStore lock timeout in seconds
-# default_lock_timeout = 5
+# Options defined in keystone
+#
+
+# Memcache servers in the format of "host:port" (list value)
+#servers=localhost:11211
+
+# Number of compare-and-set attempts to make when using
+# compare-and-set in the token memcache back end (integer
+# value)
+#max_compare_and_set_retry=16
+
+
+[oauth1]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Credential backend driver (string value)
+#driver=keystone.contrib.oauth1.backends.sql.OAuth1
+
+# Duration (in seconds) for the OAuth Request Token (integer
+# value)
+#request_token_duration=28800
+
+# Duration (in seconds) for the OAuth Access Token (integer
+# value)
+#access_token_duration=86400
+
+
+[os_inherit]
+
+#
+# Options defined in keystone
+#
+
+# role-assignment inheritance to projects from owning domain
+# can be optionally enabled (boolean value)
+#enabled=false
+
+
+[pam]
+
+#
+# Options defined in keystone
+#
+
+# (string value)
+#userid=<None>
+
+# (string value)
+#password=<None>
+
[paste_deploy]
-# Name of the paste configuration file that defines the available pipelines
-config_file = keystone-paste.ini
+
+#
+# Options defined in keystone
+#
+
+# Name of the paste configuration file that defines the
+# available pipelines (string value)
+#config_file=keystone-paste.ini
+
+
+[policy]
+
+#
+# Options defined in keystone
+#
+
+# Keystone Policy backend driver (string value)
+#driver=keystone.policy.backends.sql.Policy
+
+# Maximum number of entities that will be returned in a policy
+# collection (integer value)
+#list_limit=<None>
+
+
+[signing]
+
+#
+# Options defined in keystone
+#
+
+# Deprecated in favor of provider in the [token] section
+# (string value)
+#token_format=<None>
+
+# Path of the certfile for token signing. (string value)
+#certfile=/etc/keystone/ssl/certs/signing_cert.pem
+
+# Path of the keyfile for token signing. (string value)
+#keyfile=/etc/keystone/ssl/private/signing_key.pem
+
+# Path of the CA for token signing. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA Key for token signing (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# Key Size (in bits) for token signing cert (auto generated
+# certificate) (integer value)
+#key_size=2048
+
+# Day the token signing cert is valid for (auto generated
+# certificate) (integer value)
+#valid_days=3650
+
+# Certificate Subject (auto generated certificate) for token
+# signing. (string value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com
+
+
+[ssl]
+
+#
+# Options defined in keystone
+#
+
+# Toggle for SSL support on the keystone eventlet servers.
+# (boolean value)
+#enable=false
+
+# Path of the certfile for SSL. (string value)
+#certfile=/etc/keystone/ssl/certs/keystone.pem
+
+# Path of the keyfile for SSL. (string value)
+#keyfile=/etc/keystone/ssl/private/keystonekey.pem
+
+# Path of the ca cert file for SSL. (string value)
+#ca_certs=/etc/keystone/ssl/certs/ca.pem
+
+# Path of the CA key file for SSL (string value)
+#ca_key=/etc/keystone/ssl/private/cakey.pem
+
+# (boolean value)
+#cert_required=false
+
+# SSL Key Length (in bits) (auto generated certificate)
+# (integer value)
+#key_size=1024
+
+# Days the certificate is valid for once signed (auto
+# generated certificate) (integer value)
+#valid_days=3650
+
+# SSL Certificate Subject (auto generated certificate) (string
+# value)
+#cert_subject=/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost
+
+
+#
+# Options defined in keystone.openstack.common.sslutils
+#
+
+# CA certificate file to use to verify connecting clients
+# (string value)
+#ca_file=<None>
+
+# Certificate file to use when starting the server securely
+# (string value)
+#cert_file=<None>
+
+# Private key file to use when starting the server securely
+# (string value)
+#key_file=<None>
+
+
+[stats]
+
+#
+# Options defined in keystone
+#
+
+# Keystone stats backend driver (string value)
+#driver=keystone.contrib.stats.backends.kvs.Stats
+
+
+[token]
+
+#
+# Options defined in keystone
+#
+
+# External auth mechanisms that should add bind information to
+# token e.g. kerberos, x509 (list value)
+#bind=
+
+# Enforcement policy on tokens presented to keystone with bind
+# information. One of disabled, permissive, strict, required
+# or a specifically required bind mode e.g. kerberos or x509
+# to require binding to that authentication. (string value)
+#enforce_token_bind=permissive
+
+# Amount of time a token should remain valid (in seconds)
+# (integer value)
+#expiration=3600
+
+# Controls the token construction, validation, and revocation
+# operations. Core providers are
+# keystone.token.providers.[pki|uuid].Provider (string value)
+#provider=<None>
+
+# Keystone Token persistence backend driver (string value)
+#driver=keystone.token.backends.sql.Token
+
+# Toggle for token system cacheing. This has no effect unless
+# global caching is enabled. (boolean value)
+#caching=true
+
+# Time to cache the revocation list (in seconds). This has no
+# effect unless global and token caching are enabled. (integer
+# value)
+#revocation_cache_time=3600
+
+# Time to cache tokens (in seconds). This has no effect unless
+# global and token caching are enabled. (integer value)
+#cache_time=<None>
+
+
+[trust]
+
+#
+# Options defined in keystone
+#
+
+# delegation and impersonation features can be optionally
+# disabled (boolean value)
+#enabled=true
+
+# Keystone Trust backend driver (string value)
+#driver=keystone.trust.backends.sql.Trust
+
+
diff --git a/httpd/keystone.py b/httpd/keystone.py
index b216a59d4..df6c11d13 100644
--- a/httpd/keystone.py
+++ b/httpd/keystone.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -38,6 +36,7 @@ CONF = config.CONF
config.configure()
sql.initialize()
+config.set_default_for_default_log_levels()
CONF(project='keystone')
config.setup_logging()
diff --git a/keystone/assignment/__init__.py b/keystone/assignment/__init__.py
index 3c76d9660..0a3e4e997 100644
--- a/keystone/assignment/__init__.py
+++ b/keystone/assignment/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2013 OpenStack Foundation
diff --git a/keystone/assignment/backends/kvs.py b/keystone/assignment/backends/kvs.py
index 8a2ab8299..7c9cd2964 100644
--- a/keystone/assignment/backends/kvs.py
+++ b/keystone/assignment/backends/kvs.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,15 +14,39 @@
from keystone import assignment
from keystone import clean
-from keystone.common import dependency
from keystone.common import kvs
from keystone import exception
-@dependency.requires('identity_api')
class Assignment(kvs.Base, assignment.Driver):
- def __init__(self):
- super(Assignment, self).__init__()
+ """KVS Assignment backend.
+
+ This backend uses the following mappings to store data:
+
+ * Domains:
+
+ * domain_list -> [domain_id, ...]
+ * domain-{id} -> domain_ref
+ * domain_name-{name} -> domain_ref
+
+ * Projects:
+
+ * tenant-{id} -> project_ref
+ * tenant_name-{name} -> project_ref
+
+ * Roles:
+
+ * role_list -> [role_id, ...]
+ * role-{id} -> role_ref
+
+ * Role assignments:
+
+ * metadata_user-{target}-{user_id} ->
+ {'roles': [{'id': role-id, ...}, ...]}
+ * metadata_group-{target}-{group_id} ->
+ {'roles': [{'id': role-id, ...}, ...]}
+
+ """
# Public interface
@@ -57,34 +79,39 @@ class Assignment(kvs.Base, assignment.Driver):
def list_user_ids_for_project(self, tenant_id):
self.get_project(tenant_id)
- user_keys = filter(lambda x: x.startswith("user-"), self.db.keys())
- user_refs = [self.db.get(key) for key in user_keys]
- user_refs = filter(lambda x: tenant_id in x['tenants'], user_refs)
- return [user_ref['id'] for user_ref in user_refs]
- def _get_user(self, user_id):
- try:
- return self.db.get('user-%s' % user_id)
- except exception.NotFound:
- raise exception.UserNotFound(user_id=user_id)
+ user_ids = set()
+
+ metadata_keys = filter(lambda x: x.startswith("metadata_user-"),
+ self.db.keys())
+ for key in metadata_keys:
+ _, meta_project_or_domain_id, meta_user_id = key.split('-')
+
+ if meta_project_or_domain_id != tenant_id:
+ # target is not the project, so on to next metadata.
+ continue
+
+ user_ids.add(meta_user_id)
+
+ return list(user_ids)
def _get_metadata(self, user_id=None, tenant_id=None,
domain_id=None, group_id=None):
try:
if user_id:
if tenant_id:
- return self.db.get('metadata-%s-%s' % (tenant_id,
- user_id))
+ return self.db.get('metadata_user-%s-%s' % (tenant_id,
+ user_id))
else:
- return self.db.get('metadata-%s-%s' % (domain_id,
- user_id))
+ return self.db.get('metadata_user-%s-%s' % (domain_id,
+ user_id))
else:
if tenant_id:
- return self.db.get('metadata-%s-%s' % (tenant_id,
- group_id))
+ return self.db.get('metadata_group-%s-%s' % (tenant_id,
+ group_id))
else:
- return self.db.get('metadata-%s-%s' % (domain_id,
- group_id))
+ return self.db.get('metadata_group-%s-%s' % (domain_id,
+ group_id))
except exception.NotFound:
raise exception.MetadataNotFound()
@@ -105,11 +132,35 @@ class Assignment(kvs.Base, assignment.Driver):
# NOTE(henry-nash): The kvs backend is being deprecated, so no
# support is provided for projects that the user has a role on solely
# by virtue of group membership.
- user_ref = self._get_user(user_id)
- return [self.get_project(x) for x in user_ref.get('tenants', [])]
+
+ project_ids = set()
+
+ metadata_keys = filter(lambda x: x.startswith('metadata_user-'),
+ self.db.keys())
+ for key in metadata_keys:
+ _, meta_project_or_domain_id, meta_user_id = key.split('-')
+
+ if meta_user_id != user_id:
+ # Not the user, so on to next metadata.
+ continue
+
+ try:
+ self.get_project(meta_project_or_domain_id)
+ except exception.NotFound:
+ # target is not a project, so on to next metadata.
+ continue
+
+ project_id = meta_project_or_domain_id
+ project_ids.add(project_id)
+
+ project_refs = []
+
+ for project_id in project_ids:
+ project_refs.append(self.get_project(project_id))
+
+ return project_refs
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
- self.identity_api.get_user(user_id)
self.get_project(tenant_id)
self.get_role(role_id)
try:
@@ -145,48 +196,50 @@ class Assignment(kvs.Base, assignment.Driver):
if metadata_ref['roles']:
self._update_metadata(user_id, tenant_id, metadata_ref)
else:
-
- self.db.delete('metadata-%s-%s' % (tenant_id, user_id))
- user_ref = self._get_user(user_id)
- tenants = set(user_ref.get('tenants', []))
- tenants.remove(tenant_id)
- user_ref['tenants'] = list(tenants)
- self.identity_api.update_user(user_id, user_ref)
+ self.db.delete('metadata_user-%s-%s' % (tenant_id, user_id))
def list_role_assignments(self):
"""List the role assignments.
- The kvs backend stores role assignments as key-values:
-
- "metadata-{target}-{actor}", with the value being a role list
-
- i.e. "metadata-MyProjectID-MyUserID" [{'id': role1}, {'id': role2}]
-
- ...so we enumerate the list and extract the targets, actors
- and roles.
+ We enumerate the metadata entries and extract the targets, actors, and
+ roles.
"""
assignment_list = []
- metadata_keys = filter(lambda x: x.startswith("metadata-"),
+ metadata_keys = filter(lambda x: x.startswith('metadata_user-'),
self.db.keys())
for key in metadata_keys:
template = {}
- meta_id1 = key.split('-')[1]
- meta_id2 = key.split('-')[2]
+ _, meta_project_or_domain_id, template['user_id'] = key.split('-')
try:
- self.get_project(meta_id1)
- template['project_id'] = meta_id1
+ self.get_project(meta_project_or_domain_id)
+ template['project_id'] = meta_project_or_domain_id
except exception.NotFound:
- template['domain_id'] = meta_id1
+ template['domain_id'] = meta_project_or_domain_id
+
+ entry = self.db.get(key)
+ inherited = False
+ for r in self._roles_from_role_dicts(entry.get('roles', {}),
+ inherited):
+ role_assignment = template.copy()
+ role_assignment['role_id'] = r
+ assignment_list.append(role_assignment)
+
+ metadata_keys = filter(lambda x: x.startswith('metadata_group-'),
+ self.db.keys())
+ for key in metadata_keys:
+ template = {}
+ _, meta_project_or_domain_id, template['group_id'] = key.split('-')
try:
- self._get_user(meta_id2)
- template['user_id'] = meta_id2
+ self.get_project(meta_project_or_domain_id)
+ template['project_id'] = meta_project_or_domain_id
except exception.NotFound:
- template['group_id'] = meta_id2
+ template['domain_id'] = meta_project_or_domain_id
entry = self.db.get(key)
+ inherited = False
for r in self._roles_from_role_dicts(entry.get('roles', {}),
- False):
+ inherited):
role_assignment = template.copy()
role_assignment['role_id'] = r
assignment_list.append(role_assignment)
@@ -257,30 +310,18 @@ class Assignment(kvs.Base, assignment.Driver):
domain_id=None, group_id=None):
if user_id:
if tenant_id:
- self.db.set('metadata-%s-%s' % (tenant_id, user_id), metadata)
- try:
- user_ref = self._get_user(user_id)
- # FIXME(morganfainberg): Setting the password does a number
- # of things including invalidating tokens. Simple solution
- # is to remove it from the ref before sending it on. The
- # correct solution is to remove the need to call the
- # identity_api from within the driver.
- user_ref.pop('password', None)
- tenants = set(user_ref.get('tenants', []))
- if tenant_id not in tenants:
- tenants.add(tenant_id)
- user_ref['tenants'] = list(tenants)
- self.identity_api.update_user(user_id, user_ref)
- except exception.UserNotFound:
- # It's acceptable for the user to not exist.
- pass
+ self.db.set('metadata_user-%s-%s' % (tenant_id, user_id),
+ metadata)
else:
- self.db.set('metadata-%s-%s' % (domain_id, user_id), metadata)
+ self.db.set('metadata_user-%s-%s' % (domain_id, user_id),
+ metadata)
else:
if tenant_id:
- self.db.set('metadata-%s-%s' % (tenant_id, group_id), metadata)
+ self.db.set('metadata_group-%s-%s' % (tenant_id, group_id),
+ metadata)
else:
- self.db.set('metadata-%s-%s' % (domain_id, group_id), metadata)
+ self.db.set('metadata_group-%s-%s' % (domain_id, group_id),
+ metadata)
return metadata
def create_role(self, role_id, role):
@@ -320,31 +361,39 @@ class Assignment(kvs.Base, assignment.Driver):
def delete_role(self, role_id):
self.get_role(role_id)
- metadata_keys = filter(lambda x: x.startswith("metadata-"),
+
+ metadata_keys = filter(lambda x: x.startswith('metadata_user-'),
self.db.keys())
for key in metadata_keys:
- meta_id1 = key.split('-')[1]
- meta_id2 = key.split('-')[2]
+ _, meta_project_or_domain_id, meta_user_id = key.split('-')
try:
- self.delete_grant(role_id, project_id=meta_id1,
- user_id=meta_id2)
+ self.delete_grant(role_id,
+ project_id=meta_project_or_domain_id,
+ user_id=meta_user_id)
except exception.NotFound:
pass
try:
- self.delete_grant(role_id, project_id=meta_id1,
- group_id=meta_id2)
+ self.delete_grant(role_id, domain_id=meta_project_or_domain_id,
+ user_id=meta_user_id)
except exception.NotFound:
pass
+
+ metadata_keys = filter(lambda x: x.startswith('metadata_group-'),
+ self.db.keys())
+ for key in metadata_keys:
+ _, meta_project_or_domain_id, meta_group_id = key.split('-')
try:
- self.delete_grant(role_id, domain_id=meta_id1,
- user_id=meta_id2)
+ self.delete_grant(role_id,
+ project_id=meta_project_or_domain_id,
+ group_id=meta_group_id)
except exception.NotFound:
pass
try:
- self.delete_grant(role_id, domain_id=meta_id1,
- group_id=meta_id2)
+ self.delete_grant(role_id, domain_id=meta_project_or_domain_id,
+ group_id=meta_group_id)
except exception.NotFound:
pass
+
self.db.delete('role-%s' % role_id)
role_list = set(self.db.get('role_list', []))
role_list.remove(role_id)
@@ -375,10 +424,6 @@ class Assignment(kvs.Base, assignment.Driver):
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
- if user_id:
- self.identity_api.get_user(user_id)
- if group_id:
- self.identity_api.get_group(group_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
@@ -398,8 +443,6 @@ class Assignment(kvs.Base, assignment.Driver):
domain_id=None, project_id=None,
inherited_to_projects=False):
self.get_role(role_id)
- if user_id:
- self.identity_api.get_user(user_id)
if group_id:
self.get_group(group_id)
if domain_id:
@@ -424,10 +467,6 @@ class Assignment(kvs.Base, assignment.Driver):
domain_id=None, project_id=None,
inherited_to_projects=False):
self.get_role(role_id)
- if user_id:
- self.identity_api.get_user(user_id)
- if group_id:
- self.identity_api.get_group(group_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
diff --git a/keystone/assignment/backends/ldap.py b/keystone/assignment/backends/ldap.py
index 2c383856b..3f7bfb305 100644
--- a/keystone/assignment/backends/ldap.py
+++ b/keystone/assignment/backends/ldap.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -21,7 +19,6 @@ import ldap as ldap
from keystone import assignment
from keystone import clean
-from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import ldap as common_ldap
from keystone.common import models
@@ -35,7 +32,6 @@ CONF = config.CONF
LOG = log.getLogger(__name__)
-@dependency.requires('identity_api')
class Assignment(assignment.Driver):
def __init__(self):
super(Assignment, self).__init__()
@@ -69,6 +65,7 @@ class Assignment(assignment.Driver):
return self._set_default_domain(self.project.get_by_name(tenant_name))
def create_project(self, tenant_id, tenant):
+ self.project.check_allow_create()
tenant = self._validate_default_domain(tenant)
tenant['name'] = clean.project_name(tenant['name'])
data = tenant.copy()
@@ -79,6 +76,7 @@ class Assignment(assignment.Driver):
return self._set_default_domain(self.project.create(data))
def update_project(self, tenant_id, tenant):
+ self.project.check_allow_update()
tenant = self._validate_default_domain(tenant)
if 'name' in tenant:
tenant['name'] = clean.project_name(tenant['name'])
@@ -88,7 +86,6 @@ class Assignment(assignment.Driver):
domain_id=None, group_id=None):
def _get_roles_for_just_user_and_project(user_id, tenant_id):
- self.identity_api.get_user(user_id)
self.get_project(tenant_id)
return [self.role._dn_to_id(a.role_dn)
for a in self.role.get_role_assignments
@@ -96,7 +93,6 @@ class Assignment(assignment.Driver):
if self.user._dn_to_id(a.user_dn) == user_id]
def _get_roles_for_group_and_project(group_id, project_id):
- self.identity_api.get_group(group_id)
self.get_project(project_id)
group_dn = self.group._id_to_dn(group_id)
# NOTE(marcos-fermin-lobo): In Active Directory, for functions
@@ -111,7 +107,7 @@ class Assignment(assignment.Driver):
if a.user_dn.upper() == group_dn.upper()]
if domain_id is not None:
- msg = 'Domain metadata not supported by LDAP'
+ msg = _('Domain metadata not supported by LDAP')
raise exception.NotImplemented(message=msg)
if group_id is None and user_id is None:
return {}
@@ -138,7 +134,6 @@ class Assignment(assignment.Driver):
# NOTE(henry-nash): The LDAP backend is being deprecated, so no
# support is provided for projects that the user has a role on solely
# by virtue of group membership.
- self.identity_api.get_user(user_id)
user_dn = self.user._id_to_dn(user_id)
associations = (self.role.list_project_roles_for_user
(user_dn, self.project.tree_dn))
@@ -164,7 +159,6 @@ class Assignment(assignment.Driver):
self.project._id_to_dn(tenant_id))
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
- self.identity_api.get_user(user_id)
self.get_project(tenant_id)
self.get_role(role_id)
user_dn = self.user._id_to_dn(user_id)
@@ -176,7 +170,6 @@ class Assignment(assignment.Driver):
tenant_dn=tenant_dn)
def _add_role_to_group_and_project(self, group_id, tenant_id, role_id):
- self.identity_api.get_group(group_id)
self.get_project(tenant_id)
self.get_role(role_id)
group_dn = self.group._id_to_dn(group_id)
@@ -187,16 +180,14 @@ class Assignment(assignment.Driver):
role_dn=role_dn,
tenant_dn=tenant_dn)
- def _create_metadata(self, user_id, tenant_id, metadata):
- return {}
-
def create_role(self, role_id, role):
+ self.role.check_allow_create()
try:
self.get_role(role_id)
except exception.NotFound:
pass
else:
- msg = 'Duplicate ID, %s.' % role_id
+ msg = _('Duplicate ID, %s.') % role_id
raise exception.Conflict(type='role', details=msg)
try:
@@ -204,15 +195,17 @@ class Assignment(assignment.Driver):
except exception.NotFound:
pass
else:
- msg = 'Duplicate name, %s.' % role['name']
+ msg = _('Duplicate name, %s.') % role['name']
raise exception.Conflict(type='role', details=msg)
return self.role.create(role)
def delete_role(self, role_id):
+ self.role.check_allow_delete()
return self.role.delete(role_id, self.project.tree_dn)
def delete_project(self, tenant_id):
+ self.project.check_allow_delete()
if self.project.subtree_delete_enabled:
self.project.deleteTree(tenant_id)
else:
@@ -236,14 +229,15 @@ class Assignment(assignment.Driver):
group_id, role_id)
def update_role(self, role_id, role):
+ self.role.check_allow_update()
self.get_role(role_id)
return self.role.update(role_id, role)
def create_domain(self, domain_id, domain):
if domain_id == CONF.identity.default_domain_id:
- msg = 'Duplicate ID, %s.' % domain_id
+ msg = _('Duplicate ID, %s.') % domain_id
raise exception.Conflict(type='domain', details=msg)
- raise exception.Forbidden('Domains are read-only against LDAP')
+ raise exception.Forbidden(_('Domains are read-only against LDAP'))
def get_domain(self, domain_id):
self._validate_default_domain_id(domain_id)
@@ -251,11 +245,11 @@ class Assignment(assignment.Driver):
def update_domain(self, domain_id, domain):
self._validate_default_domain_id(domain_id)
- raise exception.Forbidden('Domains are read-only against LDAP')
+ raise exception.Forbidden(_('Domains are read-only against LDAP'))
def delete_domain(self, domain_id):
self._validate_default_domain_id(domain_id)
- raise exception.Forbidden('Domains are read-only against LDAP')
+ raise exception.Forbidden(_('Domains are read-only against LDAP'))
def list_domains(self, hints):
return [assignment.calc_default_domain()]
@@ -348,11 +342,6 @@ class Assignment(assignment.Driver):
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
- if user_id:
- self.identity_api.get_user(user_id)
- if group_id:
- self.identity_api.get_group(group_id)
-
self.get_role(role_id)
if domain_id:
@@ -549,8 +538,9 @@ class RoleApi(common_ldap.BaseLdap):
conn.modify_s(role_dn, [(ldap.MOD_ADD,
self.member_attribute, user_dn)])
except ldap.TYPE_OR_VALUE_EXISTS:
- msg = ('User %s already has role %s in tenant %s'
- % (user_id, role_id, tenant_id))
+ msg = (_('User %(user_id)s already has role %(role_id)s in '
+ 'tenant %(tenant_id)s') %
+ dict(user_id=user_id, role_id=role_id, tenant_id=tenant_id))
raise exception.Conflict(type='role grant', details=msg)
except ldap.NO_SUCH_OBJECT:
if tenant_id is None or self.get(role_id) is None:
@@ -661,7 +651,7 @@ class RoleApi(common_ldap.BaseLdap):
def update(self, role_id, role):
try:
old_name = self.get_by_name(role['name'])
- raise exception.Conflict('Cannot duplicate name %s' % old_name)
+ raise exception.Conflict(_('Cannot duplicate name %s') % old_name)
except exception.NotFound:
pass
return super(RoleApi, self).update(role_id, role)
diff --git a/keystone/assignment/backends/sql.py b/keystone/assignment/backends/sql.py
index 376ff55de..76b4f7b9c 100644
--- a/keystone/assignment/backends/sql.py
+++ b/keystone/assignment/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012-13 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,25 +12,34 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
+
from keystone import assignment
from keystone import clean
-from keystone.common import dependency
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone import config
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
CONF = config.CONF
-@dependency.requires('identity_api')
-class Assignment(sql.Base, assignment.Driver):
+class AssignmentType:
+ USER_PROJECT = 'UserProject'
+ GROUP_PROJECT = 'GroupProject'
+ USER_DOMAIN = 'UserDomain'
+ GROUP_DOMAIN = 'GroupDomain'
+
+
+class Assignment(assignment.Driver):
# Internal interface to manage the database
def db_sync(self, version=None):
- migration.db_sync(version=version)
+ migration.db_sync(
+ migration_helpers.find_migrate_repo(), version=version)
def _get_project(self, session, project_id):
project_ref = session.query(Project).get(project_id)
@@ -58,41 +65,64 @@ class Assignment(sql.Base, assignment.Driver):
def list_user_ids_for_project(self, tenant_id):
with sql.transaction() as session:
self._get_project(session, tenant_id)
- query = session.query(UserProjectGrant)
- query = query.filter(UserProjectGrant.project_id ==
- tenant_id)
- project_refs = query.all()
- return [project_ref.user_id for project_ref in project_refs]
+ query = session.query(RoleAssignment.actor_id)
+ query = query.filter_by(type=AssignmentType.USER_PROJECT)
+ query = query.filter_by(target_id=tenant_id)
+ assignments = query.all()
+ return [assignment.actor_id for assignment in assignments]
def _get_metadata(self, user_id=None, tenant_id=None,
domain_id=None, group_id=None, session=None):
+ # TODO(henry-nash): This method represents the last vestiges of the old
+ # metadata concept in this driver. Although we no longer need it here,
+ # since the Manager layer uses the metadata concept across all
+ # assignment drivers, we need to remove it from all of them in order to
+ # finally remove this method.
+
# We aren't given a session when called by the manager directly.
if session is None:
session = db_session.get_session()
- if user_id:
- if tenant_id:
- q = session.query(UserProjectGrant)
- q = q.filter_by(project_id=tenant_id)
- elif domain_id:
- q = session.query(UserDomainGrant)
- q = q.filter_by(domain_id=domain_id)
- q = q.filter_by(user_id=user_id)
- elif group_id:
- if tenant_id:
- q = session.query(GroupProjectGrant)
- q = q.filter_by(project_id=tenant_id)
- elif domain_id:
- q = session.query(GroupDomainGrant)
- q = q.filter_by(domain_id=domain_id)
- q = q.filter_by(group_id=group_id)
- try:
- return q.one().data
- except sql.NotFound:
+
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=user_id or group_id)
+ q = q.filter_by(target_id=tenant_id or domain_id)
+ refs = q.all()
+ if not refs:
raise exception.MetadataNotFound()
+ metadata_ref = {}
+ metadata_ref['roles'] = []
+ for assignment in refs:
+ role_ref = {}
+ role_ref['id'] = assignment.role_id
+ if assignment.inherited and (
+ assignment.type == AssignmentType.USER_DOMAIN or
+ assignment.type == AssignmentType.GROUP_DOMAIN):
+ role_ref['inherited_to'] = 'projects'
+ metadata_ref['roles'].append(role_ref)
+
+ return metadata_ref
+
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
+
+ def calculate_type(user_id, group_id, project_id, domain_id):
+ if user_id and project_id:
+ return AssignmentType.USER_PROJECT
+ elif user_id and domain_id:
+ return AssignmentType.USER_DOMAIN
+ elif group_id and project_id:
+ return AssignmentType.GROUP_PROJECT
+ elif group_id and domain_id:
+ return AssignmentType.GROUP_DOMAIN
+ else:
+ message_data = ', '.join(
+ [user_id, group_id, project_id, domain_id])
+ raise exception.Error(message=_(
+ 'Unexpected combination of grant attributes - '
+ 'User, Group, Project, Domain: %s') % message_data)
+
with sql.transaction() as session:
self._get_role(session, role_id)
@@ -105,24 +135,18 @@ class Assignment(sql.Base, assignment.Driver):
msg = _('Inherited roles can only be assigned to domains')
raise exception.Conflict(type='role grant', details=msg)
- try:
- metadata_ref = self._get_metadata(user_id, project_id,
- domain_id, group_id,
- session=session)
- is_new = False
- except exception.MetadataNotFound:
- metadata_ref = {}
- is_new = True
-
- metadata_ref['roles'] = self._add_role_to_role_dicts(
- role_id, inherited_to_projects, metadata_ref.get('roles', []))
-
- if is_new:
- self._create_metadata(session, user_id, project_id,
- metadata_ref, domain_id, group_id)
- else:
- self._update_metadata(session, user_id, project_id,
- metadata_ref, domain_id, group_id)
+ type = calculate_type(user_id, group_id, project_id, domain_id)
+ try:
+ with sql.transaction() as session:
+ session.add(RoleAssignment(
+ type=type,
+ actor_id=user_id or group_id,
+ target_id=project_id or domain_id,
+ role_id=role_id,
+ inherited=inherited_to_projects))
+ except sql.DBDuplicateEntry:
+ # The v3 grant APIs are silent if the assignment already exists
+ pass
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
@@ -133,16 +157,21 @@ class Assignment(sql.Base, assignment.Driver):
if project_id:
self._get_project(session, project_id)
- try:
- metadata_ref = self._get_metadata(user_id, project_id,
- domain_id, group_id,
- session=session)
- except exception.MetadataNotFound:
- metadata_ref = {}
-
- return [self.get_role(x) for x in
- self._roles_from_role_dicts(metadata_ref.get('roles', []),
- inherited_to_projects)]
+ q = session.query(Role).join(RoleAssignment)
+ q = q.filter(RoleAssignment.actor_id == (user_id or group_id))
+ q = q.filter(RoleAssignment.target_id == (project_id or domain_id))
+ q = q.filter(RoleAssignment.inherited == inherited_to_projects)
+ q = q.filter(Role.id == RoleAssignment.role_id)
+ return [x.to_dict() for x in q.all()]
+
+ def _build_grant_filter(self, session, role_id, user_id, group_id,
+ domain_id, project_id, inherited_to_projects):
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=user_id or group_id)
+ q = q.filter_by(target_id=project_id or domain_id)
+ q = q.filter_by(role_id=role_id)
+ q = q.filter_by(inherited=inherited_to_projects)
+ return q
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
@@ -155,64 +184,36 @@ class Assignment(sql.Base, assignment.Driver):
self._get_project(session, project_id)
try:
- metadata_ref = self._get_metadata(user_id, project_id,
- domain_id, group_id,
- session=session)
- except exception.MetadataNotFound:
- metadata_ref = {}
- role_ids = set(self._roles_from_role_dicts(
- metadata_ref.get('roles', []), inherited_to_projects))
- if role_id not in role_ids:
+ q = self._build_grant_filter(
+ session, role_id, user_id, group_id, domain_id, project_id,
+ inherited_to_projects)
+ q.one()
+ except sql.NotFound:
raise exception.RoleNotFound(role_id=role_id)
+
return role_ref.to_dict()
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
with sql.transaction() as session:
- self._delete_grant(session=session, role_id=role_id,
- user_id=user_id, group_id=group_id,
- domain_id=domain_id, project_id=project_id,
- inherited_to_projects=inherited_to_projects)
-
- def _delete_grant(self, session, role_id, user_id=None, group_id=None,
- domain_id=None, project_id=None,
- inherited_to_projects=False):
- self._get_role(session, role_id)
- if domain_id:
- self._get_domain(session, domain_id)
- if project_id:
- self._get_project(session, project_id)
-
- try:
- metadata_ref = self._get_metadata(user_id, project_id,
- domain_id, group_id,
- session=session)
- is_new = False
- except exception.MetadataNotFound:
- metadata_ref = {}
- is_new = True
-
- try:
- metadata_ref['roles'] = self._remove_role_from_role_dicts(
- role_id, inherited_to_projects, metadata_ref.get('roles', []))
- except KeyError:
- raise exception.RoleNotFound(role_id=role_id)
+ self._get_role(session, role_id)
+ if domain_id:
+ self._get_domain(session, domain_id)
+ if project_id:
+ self._get_project(session, project_id)
- if is_new:
- # TODO(henry-nash) It seems odd that you would create a new
- # entry in response to trying to delete a role that was not
- # assigned. Although benign, this should probably be removed.
- self._create_metadata(session, user_id, project_id, metadata_ref,
- domain_id, group_id)
- else:
- self._update_metadata(session, user_id, project_id, metadata_ref,
- domain_id, group_id)
+ q = self._build_grant_filter(
+ session, role_id, user_id, group_id, domain_id, project_id,
+ inherited_to_projects)
+ if not q.delete(False):
+ raise exception.RoleNotFound(role_id=role_id)
+ @sql.truncated
def list_projects(self, hints):
with sql.transaction() as session:
query = session.query(Project)
- project_refs = self.filter_query(Project, query, hints)
+ project_refs = sql.filter_limit_query(Project, query, hints)
return [project_ref.to_dict() for project_ref in project_refs]
def list_projects_in_domain(self, domain_id):
@@ -223,51 +224,35 @@ class Assignment(sql.Base, assignment.Driver):
return [project_ref.to_dict() for project_ref in project_refs]
def list_projects_for_user(self, user_id, group_ids, hints):
- # NOTE(henry-nash): This method is written as a series of code blocks,
- # rather than broken down into too many sub-functions, to prepare for
- # SQL optimization when we rationalize the grant tables in the
- # future.
+ # TODO(henry-nash): Now that we have a single assignment table, we
+ # should be able to honor the hints list that is provided.
- # TODO(henry-nash): Once we replace the existing grant tables with
- # a more normalized table structure, we will be able to implement
- # filtering here. Until then, we'll just ignore it and let the
- # controller do it.
+ def _project_ids_to_dicts(session, ids):
+ if not ids:
+ return []
+ else:
+ query = session.query(Project)
+ query = query.filter(Project.id.in_(ids))
+ project_refs = query.all()
+ return [project_ref.to_dict() for project_ref in project_refs]
- def _list_domains_with_inherited_grants(query):
- domain_ids = set()
- domain_grants = query.all()
- for domain_grant in domain_grants:
- for grant in domain_grant.data.get('roles', []):
- if 'inherited_to' in grant:
- domain_ids.add(domain_grant.domain_id)
- return domain_ids
+ with sql.transaction() as session:
+ # First get a list of the projects and domains for which the user
+ # has any kind of role assigned
- def _project_ids_to_dicts(session, ids):
- return [self._get_project(session, project_id).to_dict()
- for project_id in ids]
+ actor_list = [user_id]
+ if group_ids:
+ actor_list = actor_list + group_ids
- # NOTE(henry-nash): The metadata management code doesn't always clean
- # up table entries when the last role is deleted - so when checking
- # grant entries, only include this project if there are actually roles
- # present.
+ query = session.query(RoleAssignment)
+ query = query.filter(RoleAssignment.actor_id.in_(actor_list))
+ assignments = query.all()
- with sql.transaction() as session:
- # First get a list of the projects for which the user has a direct
- # role assigned
- query = session.query(UserProjectGrant)
- query = query.filter_by(user_id=user_id)
- project_grants_for_user = query.all()
- project_ids = set(x.project_id for x in project_grants_for_user
- if x.data.get('roles'))
-
- # Now find any projects with group roles and add them in
- for group_id in group_ids:
- query = session.query(GroupProjectGrant)
- query = query.filter_by(group_id=group_id)
- project_grants_for_group = query.all()
- for project_grant in project_grants_for_group:
- if project_grant.data.get('roles'):
- project_ids.add(project_grant.project_id)
+ project_ids = set()
+ for assignment in assignments:
+ if (assignment.type == AssignmentType.USER_PROJECT or
+ assignment.type == AssignmentType.GROUP_PROJECT):
+ project_ids.add(assignment.target_id)
if not CONF.os_inherit.enabled:
return _project_ids_to_dicts(session, project_ids)
@@ -277,137 +262,79 @@ class Assignment(sql.Base, assignment.Driver):
# add in all the projects in that domain.
domain_ids = set()
-
- # First check for user roles on any domains
- query = session.query(UserDomainGrant)
- query = query.filter_by(user_id=user_id)
- domain_ids.update(_list_domains_with_inherited_grants(query))
-
- # Now for group roles on any domains
- for group_id in group_ids:
- query = session.query(GroupDomainGrant)
- query = query.filter_by(group_id=group_id)
- domain_ids.update(_list_domains_with_inherited_grants(query))
-
- # For each domain on which the user has an inherited role, get the
- # list of projects in that domain and add them in to the
- # project id list
-
- for domain_id in domain_ids:
- query = session.query(Project)
- query = query.filter_by(domain_id=domain_id)
- project_refs = query.all()
- for project_ref in project_refs:
+ for assignment in assignments:
+ if ((assignment.type == AssignmentType.USER_DOMAIN or
+ assignment.type == AssignmentType.GROUP_DOMAIN) and
+ assignment.inherited):
+ domain_ids.add(assignment.target_id)
+
+ # Get the projects that are owned by all of these domains and
+ # add them in to the project id list
+
+ if domain_ids:
+ query = session.query(Project.id)
+ query = query.filter(Project.domain_id.in_(domain_ids))
+ for project_ref in query.all():
project_ids.add(project_ref.id)
return _project_ids_to_dicts(session, project_ids)
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
- self.identity_api.get_user(user_id)
-
with sql.transaction() as session:
self._get_project(session, tenant_id)
self._get_role(session, role_id)
- try:
- metadata_ref = self._get_metadata(user_id, tenant_id,
- session=session)
- is_new = False
- except exception.MetadataNotFound:
- metadata_ref = {}
- is_new = True
-
- try:
- metadata_ref['roles'] = self._add_role_to_role_dicts(
- role_id, False, metadata_ref.get('roles', []),
- allow_existing=False)
- except KeyError:
- msg = ('User %s already has role %s in tenant %s'
- % (user_id, role_id, tenant_id))
- raise exception.Conflict(type='role grant', details=msg)
- if is_new:
- self._create_metadata(session, user_id, tenant_id,
- metadata_ref)
- else:
- self._update_metadata(session, user_id, tenant_id,
- metadata_ref)
+ try:
+ with sql.transaction() as session:
+ session.add(RoleAssignment(
+ type=AssignmentType.USER_PROJECT,
+ actor_id=user_id, target_id=tenant_id,
+ role_id=role_id, inherited=False))
+ except sql.DBDuplicateEntry:
+ msg = ('User %s already has role %s in tenant %s'
+ % (user_id, role_id, tenant_id))
+ raise exception.Conflict(type='role grant', details=msg)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
with sql.transaction() as session:
- try:
- metadata_ref = self._get_metadata(user_id, tenant_id,
- session=session)
- except exception.MetadataNotFound:
- raise exception.RoleNotFound(message=_(
- 'Cannot remove role that has not been granted, %s') %
- role_id)
- try:
- metadata_ref['roles'] = self._remove_role_from_role_dicts(
- role_id, False, metadata_ref.get('roles', []))
- except KeyError:
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=user_id)
+ q = q.filter_by(target_id=tenant_id)
+ q = q.filter_by(role_id=role_id)
+ if q.delete() == 0:
raise exception.RoleNotFound(message=_(
'Cannot remove role that has not been granted, %s') %
role_id)
- if metadata_ref['roles']:
- self._update_metadata(session, user_id, tenant_id,
- metadata_ref)
- else:
- q = session.query(UserProjectGrant)
- q = q.filter_by(user_id=user_id)
- q = q.filter_by(project_id=tenant_id)
- q.delete()
-
def list_role_assignments(self):
- # TODO(henry-nash): The current implementation is really simulating
- # us having a common role assignment table, rather than having the
- # four different grant tables we have today. When we move to role
- # assignment as a first class entity, we should create the single
- # assignment table, simplifying the logic of this (and many other)
- # functions.
+ def denormalize_role(ref):
+ assignment = {}
+ if ref.type == AssignmentType.USER_PROJECT:
+ assignment['user_id'] = ref.actor_id
+ assignment['project_id'] = ref.target_id
+ elif ref.type == AssignmentType.USER_DOMAIN:
+ assignment['user_id'] = ref.actor_id
+ assignment['domain_id'] = ref.target_id
+ elif ref.type == AssignmentType.GROUP_PROJECT:
+ assignment['group_id'] = ref.actor_id
+ assignment['project_id'] = ref.target_id
+ elif ref.type == AssignmentType.GROUP_DOMAIN:
+ assignment['group_id'] = ref.actor_id
+ assignment['domain_id'] = ref.target_id
+ else:
+ raise exception.Error(message=_(
+ 'Unexpected assignment type encountered, %s') %
+ ref.type)
+ assignment['role_id'] = ref.role_id
+ if ref.inherited and (ref.type == AssignmentType.USER_DOMAIN or
+ ref.type == AssignmentType.GROUP_DOMAIN):
+ assignment['inherited_to_projects'] = 'projects'
+ return assignment
with sql.transaction() as session:
- assignment_list = []
- refs = session.query(UserDomainGrant).all()
- for x in refs:
- roles = x.data.get('roles', {})
- for r in self._roles_from_role_dicts(roles, False):
- assignment_list.append({'user_id': x.user_id,
- 'domain_id': x.domain_id,
- 'role_id': r})
- for r in self._roles_from_role_dicts(roles, True):
- assignment_list.append({'user_id': x.user_id,
- 'domain_id': x.domain_id,
- 'role_id': r,
- 'inherited_to_projects': True})
- refs = session.query(UserProjectGrant).all()
- for x in refs:
- roles = x.data.get('roles', {})
- for r in self._roles_from_role_dicts(roles, False):
- assignment_list.append({'user_id': x.user_id,
- 'project_id': x.project_id,
- 'role_id': r})
- refs = session.query(GroupDomainGrant).all()
- for x in refs:
- roles = x.data.get('roles', {})
- for r in self._roles_from_role_dicts(roles, False):
- assignment_list.append({'group_id': x.group_id,
- 'domain_id': x.domain_id,
- 'role_id': r})
- for r in self._roles_from_role_dicts(roles, True):
- assignment_list.append({'group_id': x.group_id,
- 'domain_id': x.domain_id,
- 'role_id': r,
- 'inherited_to_projects': True})
- refs = session.query(GroupProjectGrant).all()
- for x in refs:
- roles = x.data.get('roles', {})
- for r in self._roles_from_role_dicts(roles, False):
- assignment_list.append({'group_id': x.group_id,
- 'project_id': x.project_id,
- 'role_id': r})
- return assignment_list
+ refs = session.query(RoleAssignment).all()
+ return [denormalize_role(ref) for ref in refs]
# CRUD
@sql.handle_conflicts(conflict_type='project')
@@ -440,85 +367,12 @@ class Assignment(sql.Base, assignment.Driver):
with sql.transaction() as session:
tenant_ref = self._get_project(session, tenant_id)
- q = session.query(UserProjectGrant)
- q = q.filter_by(project_id=tenant_id)
- q.delete(False)
-
- q = session.query(GroupProjectGrant)
- q = q.filter_by(project_id=tenant_id)
+ q = session.query(RoleAssignment)
+ q = q.filter_by(target_id=tenant_id)
q.delete(False)
session.delete(tenant_ref)
- @sql.handle_conflicts(conflict_type='metadata')
- def _create_metadata(self, session, user_id, tenant_id, metadata,
- domain_id=None, group_id=None):
- if user_id:
- if tenant_id:
- session.add(UserProjectGrant
- (user_id=user_id,
- project_id=tenant_id,
- data=metadata))
- elif domain_id:
- session.add(UserDomainGrant
- (user_id=user_id,
- domain_id=domain_id,
- data=metadata))
- elif group_id:
- if tenant_id:
- session.add(GroupProjectGrant
- (group_id=group_id,
- project_id=tenant_id,
- data=metadata))
- elif domain_id:
- session.add(GroupDomainGrant
- (group_id=group_id,
- domain_id=domain_id,
- data=metadata))
- session.flush()
- return metadata
-
- @sql.handle_conflicts(conflict_type='metadata')
- def _update_metadata(self, session, user_id, tenant_id, metadata,
- domain_id=None, group_id=None):
- if user_id:
- if tenant_id:
- q = session.query(UserProjectGrant)
- q = q.filter_by(user_id=user_id)
- q = q.filter_by(project_id=tenant_id)
- elif domain_id:
- q = session.query(UserDomainGrant)
- q = q.filter_by(user_id=user_id)
- q = q.filter_by(domain_id=domain_id)
- elif group_id:
- if tenant_id:
- q = session.query(GroupProjectGrant)
- q = q.filter_by(group_id=group_id)
- q = q.filter_by(project_id=tenant_id)
- elif domain_id:
- q = session.query(GroupDomainGrant)
- q = q.filter_by(group_id=group_id)
- q = q.filter_by(domain_id=domain_id)
- metadata_ref = q.first()
- metadata_ref.data.update(metadata)
-
- # NOTE(pete5): We manually mark metadata_ref.data as modified since
- # SQLAlchemy may not automatically detect the change. Why not? Well...
- # SQLAlchemy knows that an attribute has changed either if (1) somebody
- # has marked it as mutated, or (2) the attribute's value at load-time
- # != the flush-time value. Since we don't track mutations to JsonBlob
- # columns (see "Mutation Tracking" in SQLAlchemy's documentation at
- # http://docs.sqlalchemy.org/en/rel_0_7/orm/extensions/mutable.html),
- # we can't count on (1). Since metadata_ref.data is often the same
- # object as metadata (i.e., we return metadata_ref.data in
- # self._get_metadata, manipulate it, then pass it to
- # self._update_metadata), the check in (2) determines that the value
- # hasn't changed.
- sql.flag_modified(metadata_ref, 'data')
-
- session.flush()
- return metadata_ref
-
# domain crud
@sql.handle_conflicts(conflict_type='domain')
@@ -528,10 +382,11 @@ class Assignment(sql.Base, assignment.Driver):
session.add(ref)
return ref.to_dict()
+ @sql.truncated
def list_domains(self, hints):
with sql.transaction() as session:
query = session.query(Domain)
- refs = self.filter_query(Domain, query, hints)
+ refs = sql.filter_limit_query(Domain, query, hints)
return [ref.to_dict() for ref in refs]
def _get_domain(self, session, domain_id):
@@ -570,6 +425,13 @@ class Assignment(sql.Base, assignment.Driver):
def delete_domain(self, domain_id):
with sql.transaction() as session:
ref = self._get_domain(session, domain_id)
+
+ # TODO(henry-nash): Although the controller will ensure deletion of
+ # all users & groups within the domain (which will cause all
+ # assignments for those users/groups to also be deleted), there
+ # could still be assignments on this domain for users/groups in
+ # other domains - so we should delete these here (see Bug #1277847)
+
session.delete(ref)
# role crud
@@ -581,10 +443,11 @@ class Assignment(sql.Base, assignment.Driver):
session.add(ref)
return ref.to_dict()
+ @sql.truncated
def list_roles(self, hints):
with sql.transaction() as session:
query = session.query(Role)
- refs = self.filter_query(Role, query, hints)
+ refs = sql.filter_limit_query(Role, query, hints)
return [ref.to_dict() for ref in refs]
def _get_role(self, session, role_id):
@@ -614,55 +477,21 @@ class Assignment(sql.Base, assignment.Driver):
def delete_role(self, role_id):
with sql.transaction() as session:
ref = self._get_role(session, role_id)
- for metadata_ref in session.query(UserProjectGrant):
- try:
- self._delete_grant(session, role_id,
- user_id=metadata_ref.user_id,
- project_id=metadata_ref.project_id)
- except exception.RoleNotFound:
- pass
- for metadata_ref in session.query(UserDomainGrant):
- try:
- self._delete_grant(session, role_id,
- user_id=metadata_ref.user_id,
- domain_id=metadata_ref.domain_id)
- except exception.RoleNotFound:
- pass
- for metadata_ref in session.query(GroupProjectGrant):
- try:
- self._delete_grant(session, role_id,
- group_id=metadata_ref.group_id,
- project_id=metadata_ref.project_id)
- except exception.RoleNotFound:
- pass
- for metadata_ref in session.query(GroupDomainGrant):
- try:
- self._delete_grant(session, role_id,
- group_id=metadata_ref.group_id,
- domain_id=metadata_ref.domain_id)
- except exception.RoleNotFound:
- pass
-
+ q = session.query(RoleAssignment)
+ q = q.filter_by(role_id=role_id)
+ q.delete(False)
session.delete(ref)
def delete_user(self, user_id):
with sql.transaction() as session:
- q = session.query(UserProjectGrant)
- q = q.filter_by(user_id=user_id)
- q.delete(False)
-
- q = session.query(UserDomainGrant)
- q = q.filter_by(user_id=user_id)
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=user_id)
q.delete(False)
def delete_group(self, group_id):
with sql.transaction() as session:
- q = session.query(GroupProjectGrant)
- q = q.filter_by(group_id=group_id)
- q.delete(False)
-
- q = session.query(GroupDomainGrant)
- q = q.filter_by(group_id=group_id)
+ q = session.query(RoleAssignment)
+ q = q.filter_by(actor_id=group_id)
q.delete(False)
@@ -700,66 +529,27 @@ class Role(sql.ModelBase, sql.DictBase):
__table_args__ = (sql.UniqueConstraint('name'), {})
-class BaseGrant(sql.DictBase):
- """Base Grant class.
-
- There are four grant tables in the current implementation, one for
- each type of grant:
-
- - User for Project
- - User for Domain
- - Group for Project
- - Group for Domain
-
- Each is a table with the two attributes above as a combined primary key,
- with the data field holding all roles for that combination. The data
- field is a list of dicts. For regular role assignments each dict in
- the list of of the form:
-
- {'id': role_id}
-
- If the OS-INHERIT extension is enabled and the role on a domain is an
- inherited role, the dict will be of the form:
+class RoleAssignment(sql.ModelBase, sql.DictBase):
+ __tablename__ = 'assignment'
+ attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited']
+ # NOTE(henry-nash); Postgres requires a name to be defined for an Enum
+ type = sql.Column(
+ sql.Enum(AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT,
+ AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN,
+ name='type'),
+ nullable=False)
+ actor_id = sql.Column(sql.String(64), nullable=False)
+ target_id = sql.Column(sql.String(64), nullable=False)
+ role_id = sql.Column(sql.String(64), sql.ForeignKey('role.id'),
+ nullable=False)
+ inherited = sql.Column(sql.Boolean, default=False, nullable=False)
+ __table_args__ = (sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id',
+ 'role_id'), {})
- {'id': role_id, 'inherited_to': 'projects'}
-
- """
def to_dict(self):
"""Override parent to_dict() method with a simpler implementation.
- Grant tables don't have non-indexed 'extra' attributes, so the
+ RoleAssignment doesn't have non-indexed 'extra' attributes, so the
parent implementation is not applicable.
"""
- return dict(self.iteritems())
-
-
-class UserProjectGrant(sql.ModelBase, BaseGrant):
- __tablename__ = 'user_project_metadata'
- user_id = sql.Column(sql.String(64), primary_key=True)
- project_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'),
- primary_key=True)
- data = sql.Column(sql.JsonBlob())
-
-
-class UserDomainGrant(sql.ModelBase, BaseGrant):
- __tablename__ = 'user_domain_metadata'
- user_id = sql.Column(sql.String(64), primary_key=True)
- domain_id = sql.Column(sql.String(64), sql.ForeignKey('domain.id'),
- primary_key=True)
- data = sql.Column(sql.JsonBlob())
-
-
-class GroupProjectGrant(sql.ModelBase, BaseGrant):
- __tablename__ = 'group_project_metadata'
- group_id = sql.Column(sql.String(64), primary_key=True)
- project_id = sql.Column(sql.String(64), sql.ForeignKey('project.id'),
- primary_key=True)
- data = sql.Column(sql.JsonBlob())
-
-
-class GroupDomainGrant(sql.ModelBase, BaseGrant):
- __tablename__ = 'group_domain_metadata'
- group_id = sql.Column(sql.String(64), primary_key=True)
- domain_id = sql.Column(sql.String(64), sql.ForeignKey('domain.id'),
- primary_key=True)
- data = sql.Column(sql.JsonBlob())
+ return dict(six.iteritems(self))
diff --git a/keystone/assignment/controllers.py b/keystone/assignment/controllers.py
index 9f5dbfa12..cfef07e40 100644
--- a/keystone/assignment/controllers.py
+++ b/keystone/assignment/controllers.py
@@ -135,7 +135,7 @@ class Tenant(controller.V2Controller):
user_ids = self.assignment_api.list_user_ids_for_project(tenant_id)
for user_id in user_ids:
user_ref = self.identity_api.get_user(user_id)
- user_refs.append(self.identity_api.v3_to_v2_user(user_ref))
+ user_refs.append(self.v3_to_v2_user(user_ref))
return {'users': user_refs}
def _format_project_list(self, tenant_refs, **kwargs):
@@ -173,7 +173,7 @@ class Tenant(controller.V2Controller):
return o
-@dependency.requires('assignment_api', 'identity_api')
+@dependency.requires('assignment_api')
class Role(controller.V2Controller):
# COMPAT(essex-3)
@@ -274,8 +274,6 @@ class Role(controller.V2Controller):
"""
self.assert_admin(context)
- # Ensure user exists by getting it first.
- self.identity_api.get_user(user_id)
tenants = self.assignment_api.list_projects_for_user(user_id)
o = []
for tenant in tenants:
@@ -508,11 +506,6 @@ class RoleV3(controller.V3Controller):
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
- if user_id:
- self.identity_api.get_user(user_id)
- if group_id:
- self.identity_api.get_group(group_id)
-
self.assignment_api.create_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
@@ -536,11 +529,6 @@ class RoleV3(controller.V3Controller):
self._require_domain_xor_project(domain_id, project_id)
self._require_user_xor_group(user_id, group_id)
- if user_id:
- self.identity_api.get_user(user_id)
- if group_id:
- self.identity_api.get_group(group_id)
-
self.assignment_api.get_grant(
role_id, user_id, group_id, domain_id, project_id,
self._check_if_inherited(context))
diff --git a/keystone/assignment/core.py b/keystone/assignment/core.py
index 3849003ab..8d2d5f4d7 100644
--- a/keystone/assignment/core.py
+++ b/keystone/assignment/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -59,6 +57,7 @@ class Manager(manager.Manager):
The late import works around this. The if block prevents creation of the
api object by both managers.
"""
+ _PROJECT = 'project'
def __init__(self):
assignment_driver = CONF.assignment.driver
@@ -69,7 +68,7 @@ class Manager(manager.Manager):
super(Manager, self).__init__(assignment_driver)
- @notifications.created('project')
+ @notifications.created(_PROJECT)
def create_project(self, tenant_id, tenant):
tenant = tenant.copy()
tenant.setdefault('enabled', True)
@@ -82,22 +81,26 @@ class Manager(manager.Manager):
ret['domain_id'])
return ret
- @notifications.updated('project')
+ @notifications.disabled(_PROJECT, public=False)
+ def _disable_project(self, tenant_id):
+ return self.token_api.delete_tokens_for_users(
+ self.list_user_ids_for_project(tenant_id),
+ project_id=tenant_id)
+
+ @notifications.updated(_PROJECT)
def update_project(self, tenant_id, tenant):
tenant = tenant.copy()
if 'enabled' in tenant:
tenant['enabled'] = clean.project_enabled(tenant['enabled'])
if not tenant.get('enabled', True):
- self.token_api.delete_tokens_for_users(
- self.list_user_ids_for_project(tenant_id),
- project_id=tenant_id)
+ self._disable_project(tenant_id)
ret = self.driver.update_project(tenant_id, tenant)
self.get_project.invalidate(self, tenant_id)
self.get_project_by_name.invalidate(self, ret['name'],
ret['domain_id'])
return ret
- @notifications.deleted('project')
+ @notifications.deleted(_PROJECT)
def delete_project(self, tenant_id):
project = self.driver.get_project(tenant_id)
user_ids = self.list_user_ids_for_project(tenant_id)
@@ -266,6 +269,8 @@ class Manager(manager.Manager):
"exist."),
role_id)
+ # TODO(henry-nash): We might want to consider list limiting this at some
+ # point in the future.
def list_projects_for_user(self, user_id, hints=None):
# NOTE(henry-nash): In order to get a complete list of user projects,
# the driver will need to look at group assignments. To avoid cross
@@ -289,6 +294,7 @@ class Manager(manager.Manager):
def get_domain_by_name(self, domain_name):
return self.driver.get_domain_by_name(domain_name)
+ @notifications.created('domain')
def create_domain(self, domain_id, domain):
ret = self.driver.create_domain(domain_id, domain)
if SHOULD_CACHE(ret):
@@ -296,19 +302,26 @@ class Manager(manager.Manager):
self.get_domain_by_name.set(ret, self, ret['name'])
return ret
+ @manager.response_truncated
def list_domains(self, hints=None):
return self.driver.list_domains(hints or driver_hints.Hints())
+ @notifications.disabled('domain', public=False)
+ def _disable_domain(self, domain_id):
+ self.token_api.delete_tokens_for_domain(domain_id)
+
+ @notifications.updated('domain')
def update_domain(self, domain_id, domain):
ret = self.driver.update_domain(domain_id, domain)
# disable owned users & projects when the API user specifically set
# enabled=False
if not domain.get('enabled', True):
- self.token_api.delete_tokens_for_domain(domain_id)
+ self._disable_domain(domain_id)
self.get_domain.invalidate(self, domain_id)
self.get_domain_by_name.invalidate(self, ret['name'])
return ret
+ @notifications.deleted('domain')
def delete_domain(self, domain_id):
# explicitly forbid deleting the default domain (this should be a
# carefully orchestrated manual process involving configuration
@@ -325,7 +338,8 @@ class Manager(manager.Manager):
# to get a valid token to issue this delete.
if domain['enabled']:
raise exception.ForbiddenAction(
- action=_('delete a domain that is not disabled'))
+ action=_('cannot delete a domain that is enabled, '
+ 'please disable it first.'))
self._delete_domain_contents(domain_id)
self.driver.delete_domain(domain_id)
@@ -392,6 +406,7 @@ class Manager(manager.Manager):
{'userid': user['id'],
'domainid': domain_id})
+ @manager.response_truncated
def list_projects(self, hints=None):
return self.driver.list_projects(hints or driver_hints.Hints())
@@ -427,6 +442,7 @@ class Manager(manager.Manager):
self.get_role.set(ret, self, role_id)
return ret
+ @manager.response_truncated
def list_roles(self, hints=None):
return self.driver.list_roles(hints or driver_hints.Hints())
@@ -590,6 +606,9 @@ class Driver(object):
inherited).items()))
return [dict(r) for r in role_set]
+ def _get_list_limit(self):
+ return CONF.assignment.list_limit or CONF.list_limit
+
@abc.abstractmethod
def get_project_by_name(self, tenant_name, domain_id):
"""Get a tenant by name.
diff --git a/keystone/auth/__init__.py b/keystone/auth/__init__.py
index 27e2c4f87..d5d3b90c1 100644
--- a/keystone/auth/__init__.py
+++ b/keystone/auth/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2013 OpenStack Foundation
diff --git a/keystone/auth/controllers.py b/keystone/auth/controllers.py
index acfabcd94..b9771c975 100644
--- a/keystone/auth/controllers.py
+++ b/keystone/auth/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -85,6 +83,10 @@ def get_auth_method(method_name):
return AUTH_METHODS[method_name]
+# TODO(blk-u): this class doesn't use identity_api directly, but makes it
+# available for consumers. Consumers should probably not be getting
+# identity_api from this since it's available in global registry, then
+# identity_api should be removed from this list.
@dependency.requires('assignment_api', 'identity_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@@ -118,12 +120,6 @@ class AuthInfo(object):
LOG.warning(msg)
raise exception.Unauthorized(msg)
- def _assert_user_is_enabled(self, user_ref):
- if not user_ref.get('enabled', True):
- msg = _('User is disabled: %s') % (user_ref['id'])
- LOG.warning(msg)
- raise exception.Unauthorized(msg)
-
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
@@ -176,29 +172,6 @@ class AuthInfo(object):
raise exception.TrustNotFound(trust_id=trust_id)
return trust
- def lookup_user(self, user_info):
- user_id = user_info.get('id')
- user_name = user_info.get('name')
- user_ref = None
- if not user_id and not user_name:
- raise exception.ValidationError(attribute='id or name',
- target='user')
- try:
- if user_name:
- if 'domain' not in user_info:
- raise exception.ValidationError(attribute='domain',
- target='user')
- domain_ref = self._lookup_domain(user_info['domain'])
- user_ref = self.identity_api.get_user_by_name(
- user_name, domain_ref['id'])
- else:
- user_ref = self.identity_api.get_user(user_id)
- except exception.UserNotFound as e:
- LOG.exception(e)
- raise exception.Unauthorized(e)
- self._assert_user_is_enabled(user_ref)
- return user_ref
-
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
diff --git a/keystone/auth/core.py b/keystone/auth/core.py
index b1c54cd10..3d6183acd 100644
--- a/keystone/auth/core.py
+++ b/keystone/auth/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/auth/plugins/external.py b/keystone/auth/plugins/external.py
index 452ca2585..b4d517cf7 100644
--- a/keystone/auth/plugins/external.py
+++ b/keystone/auth/plugins/external.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -22,6 +20,7 @@ import six
from keystone import auth
from keystone.common import config
+from keystone.common import dependency
from keystone import exception
from keystone.openstack.common import versionutils
@@ -46,7 +45,7 @@ class Base(auth.AuthMethodHandler):
msg = _('No authenticated user')
raise exception.Unauthorized(msg)
try:
- user_ref = self._authenticate(REMOTE_USER, context, auth_info)
+ user_ref = self._authenticate(REMOTE_USER, context)
auth_context['user_id'] = user_ref['id']
if ('kerberos' in CONF.token.bind and
(context['environment'].get('AUTH_TYPE', '').lower()
@@ -57,7 +56,7 @@ class Base(auth.AuthMethodHandler):
raise exception.Unauthorized(msg)
@abc.abstractmethod
- def _authenticate(self, remote_user, context, auth_info):
+ def _authenticate(self, remote_user, context):
"""Look up the user in the identity backend.
Return user_ref
@@ -65,17 +64,18 @@ class Base(auth.AuthMethodHandler):
pass
+@dependency.requires('identity_api')
class DefaultDomain(Base):
- def _authenticate(self, remote_user, context, auth_info):
+ def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend."""
domain_id = CONF.identity.default_domain_id
- user_ref = auth_info.identity_api.get_user_by_name(remote_user,
- domain_id)
+ user_ref = self.identity_api.get_user_by_name(remote_user, domain_id)
return user_ref
+@dependency.requires('assignment_api', 'identity_api')
class Domain(Base):
- def _authenticate(self, remote_user, context, auth_info):
+ def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend.
The domain will be extracted from the REMOTE_DOMAIN environment
@@ -88,12 +88,10 @@ class Domain(Base):
except KeyError:
domain_id = CONF.identity.default_domain_id
else:
- domain_ref = (auth_info.identity_api.
- get_domain_by_name(domain_name))
+ domain_ref = self.assignment_api.get_domain_by_name(domain_name)
domain_id = domain_ref['id']
- user_ref = auth_info.identity_api.get_user_by_name(username,
- domain_id)
+ user_ref = self.identity_api.get_user_by_name(username, domain_id)
return user_ref
@@ -119,6 +117,7 @@ class ExternalDomain(Domain):
super(ExternalDomain, self).__init__()
+@dependency.requires('identity_api')
class LegacyDefaultDomain(Base):
"""Deprecated. Please use keystone.auth.external.DefaultDomain instead.
@@ -134,17 +133,17 @@ class LegacyDefaultDomain(Base):
def __init__(self):
super(LegacyDefaultDomain, self).__init__()
- def _authenticate(self, remote_user, context, auth_info):
+ def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend."""
# NOTE(dolph): this unintentionally discards half the REMOTE_USER value
names = remote_user.split('@')
username = names.pop(0)
domain_id = CONF.identity.default_domain_id
- user_ref = auth_info.identity_api.get_user_by_name(username,
- domain_id)
+ user_ref = self.identity_api.get_user_by_name(username, domain_id)
return user_ref
+@dependency.requires('assignment_api', 'identity_api')
class LegacyDomain(Base):
"""Deprecated. Please use keystone.auth.external.Domain instead."""
@@ -155,7 +154,7 @@ class LegacyDomain(Base):
def __init__(self):
super(LegacyDomain, self).__init__()
- def _authenticate(self, remote_user, context, auth_info):
+ def _authenticate(self, remote_user, context):
"""Use remote_user to look up the user in the identity backend.
If remote_user contains an `@` assume that the substring before the
@@ -166,11 +165,9 @@ class LegacyDomain(Base):
username = names.pop(0)
if names:
domain_name = names[0]
- domain_ref = (auth_info.assignment_api.
- get_domain_by_name(domain_name))
+ domain_ref = self.assignment_api.get_domain_by_name(domain_name)
domain_id = domain_ref['id']
else:
domain_id = CONF.identity.default_domain_id
- user_ref = auth_info.identity_api.get_user_by_name(username,
- domain_id)
+ user_ref = self.identity_api.get_user_by_name(username, domain_id)
return user_ref
diff --git a/keystone/auth/plugins/oauth1.py b/keystone/auth/plugins/oauth1.py
index c5c9785d7..ba1601b08 100644
--- a/keystone/auth/plugins/oauth1.py
+++ b/keystone/auth/plugins/oauth1.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/auth/plugins/password.py b/keystone/auth/plugins/password.py
index 45397292e..42a1fbb63 100644
--- a/keystone/auth/plugins/password.py
+++ b/keystone/auth/plugins/password.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -115,6 +113,7 @@ class Password(auth.AuthMethodHandler):
# all we care is password matches
try:
self.identity_api.authenticate(
+ context,
user_id=user_info.user_id,
password=user_info.password,
domain_scope=user_info.domain_id)
diff --git a/keystone/auth/plugins/token.py b/keystone/auth/plugins/token.py
index 615c708b0..6c39b6c7b 100644
--- a/keystone/auth/plugins/token.py
+++ b/keystone/auth/plugins/token.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/auth/routers.py b/keystone/auth/routers.py
index ba29b2f52..c15e5476f 100644
--- a/keystone/auth/routers.py
+++ b/keystone/auth/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/catalog/__init__.py b/keystone/catalog/__init__.py
index 35fb00f69..9cef97fa3 100644
--- a/keystone/catalog/__init__.py
+++ b/keystone/catalog/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/catalog/backends/kvs.py b/keystone/catalog/backends/kvs.py
index bb9c758f4..2a9ae7a51 100644
--- a/keystone/catalog/backends/kvs.py
+++ b/keystone/catalog/backends/kvs.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -51,7 +49,8 @@ class Catalog(kvs.Base, catalog.Driver):
# which is the behavior we want.
self.get_region(parent_region_id)
- def create_region(self, region_id, region):
+ def create_region(self, region):
+ region_id = region['id']
region.setdefault('parent_region_id')
self._check_parent_region(region)
self.db.set('region-%s' % region_id, region)
@@ -88,7 +87,7 @@ class Catalog(kvs.Base, catalog.Driver):
self.db.set('service_list', list(service_list))
return service
- def list_services(self):
+ def list_services(self, hints):
return [self.get_service(x) for x in self.db.get('service_list', [])]
def get_service(self, service_id):
@@ -119,7 +118,7 @@ class Catalog(kvs.Base, catalog.Driver):
self.db.set('endpoint_list', list(endpoint_list))
return endpoint
- def list_endpoints(self):
+ def list_endpoints(self, hints):
return [self.get_endpoint(x) for x in self.db.get('endpoint_list', [])]
def get_endpoint(self, endpoint_id):
diff --git a/keystone/catalog/backends/sql.py b/keystone/catalog/backends/sql.py
index 60934654b..e4f57cf4f 100644
--- a/keystone/catalog/backends/sql.py
+++ b/keystone/catalog/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Canonical Ltd.
#
@@ -20,9 +18,10 @@ import six
from keystone import catalog
from keystone.catalog import core
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone import config
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
@@ -78,9 +77,10 @@ class Endpoint(sql.ModelBase, sql.DictBase):
extra = sql.Column(sql.JsonBlob())
-class Catalog(sql.Base, catalog.Driver):
+class Catalog(catalog.Driver):
def db_sync(self, version=None):
- migration.db_sync(version=version)
+ migration.db_sync(
+ migration_helpers.find_migrate_repo(), version=version)
# Regions
def list_regions(self):
@@ -130,7 +130,7 @@ class Catalog(sql.Base, catalog.Driver):
session.delete(ref)
session.flush()
- def create_region(self, region_id, region_ref):
+ def create_region(self, region_ref):
session = db_session.get_session()
with session.begin():
self._check_parent_region(session, region_ref)
@@ -154,9 +154,11 @@ class Catalog(sql.Base, catalog.Driver):
return ref.to_dict()
# Services
- def list_services(self):
+ @sql.truncated
+ def list_services(self, hints):
session = db_session.get_session()
- services = session.query(Service).all()
+ services = session.query(Service)
+ services = sql.filter_limit_query(Service, services, hints)
return [s.to_dict() for s in list(services)]
def _get_service(self, session, service_id):
@@ -221,9 +223,11 @@ class Catalog(sql.Base, catalog.Driver):
session = db_session.get_session()
return self._get_endpoint(session, endpoint_id).to_dict()
- def list_endpoints(self):
+ @sql.truncated
+ def list_endpoints(self, hints):
session = db_session.get_session()
endpoints = session.query(Endpoint)
+ endpoints = sql.filter_limit_query(Endpoint, endpoints, hints)
return [e.to_dict() for e in list(endpoints)]
def update_endpoint(self, endpoint_id, endpoint_ref):
diff --git a/keystone/catalog/backends/templated.py b/keystone/catalog/backends/templated.py
index f1315741c..1ca80cb35 100644
--- a/keystone/catalog/backends/templated.py
+++ b/keystone/catalog/backends/templated.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundationc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -23,6 +21,7 @@ from keystone.catalog import core
from keystone import config
from keystone import exception
from keystone.openstack.common import log
+from keystone.openstack.common import versionutils
LOG = log.getLogger(__name__)
@@ -57,10 +56,7 @@ def parse_templates(template_lines):
return o
-# TODO(jaypipes): should be templated.Catalog,
-# not templated.TemplatedCatalog to be consistent with
-# other catalog backends
-class TemplatedCatalog(kvs.Catalog):
+class Catalog(kvs.Catalog):
"""A backend that generates endpoints for the Catalog based on templates.
It is usually configured via config entries that look like:
@@ -93,6 +89,7 @@ class TemplatedCatalog(kvs.Catalog):
"""
def __init__(self, templates=None):
+ super(Catalog, self).__init__()
if templates:
self.templates = templates
else:
@@ -100,7 +97,6 @@ class TemplatedCatalog(kvs.Catalog):
if not os.path.exists(template_file):
template_file = CONF.find_file(template_file)
self._load_templates(template_file)
- super(TemplatedCatalog, self).__init__()
def _load_templates(self, template_file):
try:
@@ -126,3 +122,11 @@ class TemplatedCatalog(kvs.Catalog):
def get_v3_catalog(self, user_id, tenant_id, metadata=None):
raise exception.NotImplemented()
+
+
+@versionutils.deprecated(
+ versionutils.deprecated.ICEHOUSE,
+ in_favor_of='keystone.catalog.backends.templated.Catalog',
+ remove_in=+2)
+class TemplatedCatalog(Catalog):
+ pass
diff --git a/keystone/catalog/controllers.py b/keystone/catalog/controllers.py
index 2ab95d2b0..f289932b1 100644
--- a/keystone/catalog/controllers.py
+++ b/keystone/catalog/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Canonical Ltd.
#
@@ -143,15 +141,19 @@ class RegionV3(controller.V3Controller):
collection_name = 'regions'
member_name = 'region'
- def __init__(self):
- super(RegionV3, self).__init__()
- self.get_member_from_driver = self.catalog_api.get_region
+ @controller.protected()
+ def create_region_with_id(self, context, region_id, region):
+ """Specialized route target for PUT /regions/{region_id}."""
+ ref = self._normalize_dict(region)
+ ref['id'] = region_id
+ ref = self.catalog_api.create_region(ref)
+ return RegionV3.wrap_member(context, ref)
@controller.protected()
def create_region(self, context, region):
ref = self._assign_unique_id(self._normalize_dict(region))
- ref = self.catalog_api.create_region(ref['id'], ref)
+ ref = self.catalog_api.create_region(ref)
return RegionV3.wrap_member(context, ref)
def list_regions(self, context):
@@ -195,7 +197,7 @@ class ServiceV3(controller.V3Controller):
@controller.filterprotected('type')
def list_services(self, context, filters):
hints = ServiceV3.build_driver_hints(context, filters)
- refs = self.catalog_api.list_services()
+ refs = self.catalog_api.list_services(hints=hints)
return ServiceV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
@@ -248,7 +250,7 @@ class EndpointV3(controller.V3Controller):
@controller.filterprotected('interface', 'service_id')
def list_endpoints(self, context, filters):
hints = EndpointV3.build_driver_hints(context, filters)
- refs = self.catalog_api.list_endpoints()
+ refs = self.catalog_api.list_endpoints(hints=hints)
return EndpointV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
diff --git a/keystone/catalog/core.py b/keystone/catalog/core.py
index 02785a732..0dfffcc68 100644
--- a/keystone/catalog/core.py
+++ b/keystone/catalog/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Canonical Ltd.
#
@@ -22,6 +20,7 @@ import abc
import six
from keystone.common import dependency
+from keystone.common import driver_hints
from keystone.common import manager
from keystone import config
from keystone import exception
@@ -68,9 +67,9 @@ class Manager(manager.Manager):
def __init__(self):
super(Manager, self).__init__(CONF.catalog.driver)
- def create_region(self, region_id, region_ref):
+ def create_region(self, region_ref):
try:
- return self.driver.create_region(region_id, region_ref)
+ return self.driver.create_region(region_ref)
except exception.NotFound:
parent_region_id = region_ref.get('parent_region_id')
raise exception.RegionNotFound(region_id=parent_region_id)
@@ -99,6 +98,10 @@ class Manager(manager.Manager):
except exception.NotFound:
raise exception.ServiceNotFound(service_id=service_id)
+ @manager.response_truncated
+ def list_services(self, hints=None):
+ return self.driver.list_services(hints or driver_hints.Hints())
+
def create_endpoint(self, endpoint_id, endpoint_ref):
try:
return self.driver.create_endpoint(endpoint_id, endpoint_ref)
@@ -118,6 +121,10 @@ class Manager(manager.Manager):
except exception.NotFound:
raise exception.EndpointNotFound(endpoint_id=endpoint_id)
+ @manager.response_truncated
+ def list_endpoints(self, hints=None):
+ return self.driver.list_endpoints(hints or driver_hints.Hints())
+
def get_catalog(self, user_id, tenant_id, metadata=None):
try:
return self.driver.get_catalog(user_id, tenant_id, metadata)
@@ -129,8 +136,11 @@ class Manager(manager.Manager):
class Driver(object):
"""Interface description for an Catalog driver."""
+ def _get_list_limit(self):
+ return CONF.catalog.list_limit or CONF.list_limit
+
@abc.abstractmethod
- def create_region(self, region_id, region_ref):
+ def create_region(self, region_ref):
"""Creates a new region.
:raises: keystone.exception.Conflict
diff --git a/keystone/catalog/routers.py b/keystone/catalog/routers.py
index 80740aa75..1efa9d78f 100644
--- a/keystone/catalog/routers.py
+++ b/keystone/catalog/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,8 +17,17 @@ from keystone.common import router
def append_v3_routers(mapper, routers):
- routers.append(router.Router(controllers.RegionV3(),
+ regions_controller = controllers.RegionV3()
+ routers.append(router.Router(regions_controller,
'regions', 'region'))
+
+ # Need to add an additional route to support PUT /regions/{region_id}
+ mapper.connect(
+ '/regions/{region_id}',
+ controller=regions_controller,
+ action='create_region_with_id',
+ conditions=dict(method=['PUT']))
+
routers.append(router.Router(controllers.ServiceV3(),
'services', 'service'))
routers.append(router.Router(controllers.EndpointV3(),
diff --git a/keystone/clean.py b/keystone/clean.py
index eef20d4c9..208eebee1 100644
--- a/keystone/clean.py
+++ b/keystone/clean.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/cli.py b/keystone/cli.py
index 8e41f2264..257b4a517 100644
--- a/keystone/cli.py
+++ b/keystone/cli.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -25,10 +23,12 @@ import pbr.version
from keystone.common import openssl
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone.common import utils
from keystone import config
from keystone import contrib
+from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common import importutils
from keystone import token
@@ -71,23 +71,27 @@ class DbSync(BaseApp):
version = CONF.command.version
extension = CONF.command.extension
if not extension:
- migration.db_sync(version=version)
+ abs_path = migration_helpers.find_migrate_repo()
else:
- package_name = "%s.%s.migrate_repo" % (contrib.__name__, extension)
try:
+ package_name = '.'.join((contrib.__name__, extension))
package = importutils.import_module(package_name)
- repo_path = os.path.abspath(os.path.dirname(package.__file__))
except ImportError:
- print(_("This extension does not provide migrations."))
- exit(0)
+ raise ImportError(_("%s extension does not exist.")
+ % package_name)
try:
+ abs_path = migration_helpers.find_migrate_repo(package)
+ try:
+ migration.db_version_control(abs_path)
# Register the repo with the version control API
# If it already knows about the repo, it will throw
# an exception that we can safely ignore
- migration.db_version_control(version=None, repo_path=repo_path)
- except exceptions.DatabaseAlreadyControlledError:
- pass
- migration.db_sync(version=version, repo_path=repo_path)
+ except exceptions.DatabaseAlreadyControlledError:
+ pass
+ except exception.MigrationNotProvided as e:
+ print(e)
+ exit(0)
+ migration.db_sync(abs_path, version=version)
class DbVersion(BaseApp):
@@ -108,16 +112,20 @@ class DbVersion(BaseApp):
extension = CONF.command.extension
if extension:
try:
- package_name = ("%s.%s.migrate_repo" %
- (contrib.__name__, extension))
+ package_name = '.'.join((contrib.__name__, extension))
package = importutils.import_module(package_name)
- repo_path = os.path.abspath(os.path.dirname(package.__file__))
- print(migration.db_version(repo_path))
except ImportError:
- print(_("This extension does not provide migrations."))
- exit(1)
+ raise ImportError(_("%s extension does not exist.")
+ % package_name)
+ try:
+ print(migration.db_version(
+ migration_helpers.find_migrate_repo(package)), 0)
+ except exception.MigrationNotProvided as e:
+ print(e)
+ exit(0)
else:
- print(migration.db_version())
+ print(migration.db_version(
+ migration_helpers.find_migrate_repo()), 0)
class BaseCertificateSetup(BaseApp):
@@ -214,6 +222,7 @@ def main(argv=None, config_files=None):
config.configure()
sql.initialize()
+ config.set_default_for_default_log_levels()
CONF(args=argv[1:],
project='keystone',
diff --git a/keystone/common/authorization.py b/keystone/common/authorization.py
index 74957e4fd..3d97b540b 100644
--- a/keystone/common/authorization.py
+++ b/keystone/common/authorization.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
diff --git a/keystone/common/base64utils.py b/keystone/common/base64utils.py
index 9ad3b5dd1..b944401b1 100644
--- a/keystone/common/base64utils.py
+++ b/keystone/common/base64utils.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/cache/__init__.py b/keystone/common/cache/__init__.py
index c556c80b8..ec7de293e 100644
--- a/keystone/common/cache/__init__.py
+++ b/keystone/common/cache/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/cache/backends/__init__.py b/keystone/common/cache/backends/__init__.py
index 42ad33836..e69de29bb 100644
--- a/keystone/common/cache/backends/__init__.py
+++ b/keystone/common/cache/backends/__init__.py
@@ -1,15 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 Metacloud
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/common/cache/backends/noop.py b/keystone/common/cache/backends/noop.py
index 646610a3a..38329c940 100644
--- a/keystone/common/cache/backends/noop.py
+++ b/keystone/common/cache/backends/noop.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py
index a8f3ba0e6..0ced079de 100644
--- a/keystone/common/cache/core.py
+++ b/keystone/common/cache/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/cms.py b/keystone/common/cms.py
index e3c835cee..917de3d86 100644
--- a/keystone/common/cms.py
+++ b/keystone/common/cms.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/config.py b/keystone/common/config.py
index 9ce993fa7..5b495a436 100644
--- a/keystone/common/config.py
+++ b/keystone/common/config.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -21,62 +19,185 @@ _DEFAULT_AUTH_METHODS = ['external', 'password', 'token']
FILE_OPTIONS = {
- '': [
- cfg.StrOpt('admin_token', secret=True, default='ADMIN'),
+ None: [
+ cfg.StrOpt('admin_token', secret=True, default='ADMIN',
+ help=('A "shared secret" that can be used to bootstrap '
+ 'Keystone. This "token" does not represent a user, '
+ 'and carries no explicit authorization. To disable '
+ 'in production (highly recommended), remove '
+ 'AdminTokenAuthMiddleware from your paste '
+ 'application pipelines (for example, in '
+ 'keystone-paste.ini).')),
cfg.StrOpt('public_bind_host',
default='0.0.0.0',
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
- group='DEFAULT')]),
+ group='DEFAULT')],
+ help=('The IP Address of the network interface to for the '
+ 'public service to listen on.')),
cfg.StrOpt('admin_bind_host',
default='0.0.0.0',
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
- group='DEFAULT')]),
- cfg.IntOpt('compute_port', default=8774),
- cfg.IntOpt('admin_port', default=35357),
- cfg.IntOpt('public_port', default=5000),
+ group='DEFAULT')],
+ help=('The IP Address of the network interface to for the '
+ 'admin service to listen on.')),
+ cfg.IntOpt('compute_port', default=8774,
+ help=('The port which the OpenStack Compute service '
+ 'listens on.')),
+ cfg.IntOpt('admin_port', default=35357,
+ help=('The port number which the admin service listens '
+ 'on.')),
+ cfg.IntOpt('public_port', default=5000,
+ help=('The port number which the public service listens '
+ 'on.')),
cfg.StrOpt('public_endpoint',
- default='http://localhost:%(public_port)s/'),
+ default='http://localhost:%(public_port)s/',
+ help=('The base public endpoint URL for keystone that are '
+ 'advertised to clients (NOTE: this does NOT affect '
+ 'how keystone listens for connections)')),
cfg.StrOpt('admin_endpoint',
- default='http://localhost:%(admin_port)s/'),
- cfg.StrOpt('onready'),
+ default='http://localhost:%(admin_port)s/',
+ help=('The base admin endpoint URL for keystone that are '
+ 'advertised to clients (NOTE: this does NOT affect '
+ 'how keystone listens for connections)')),
+ cfg.StrOpt('onready',
+ help=('onready allows you to send a notification when the '
+ 'process is ready to serve For example, to have it '
+ 'notify using systemd, one could set shell command: '
+ '"onready = systemd-notify --ready" or a module '
+ 'with notify() method: '
+ '"onready = keystone.common.systemd"')),
# default max request size is 112k
- cfg.IntOpt('max_request_body_size', default=114688),
- cfg.IntOpt('max_param_size', default=64),
+ cfg.IntOpt('max_request_body_size', default=114688,
+ help=('enforced by optional sizelimit middleware '
+ '(keystone.middleware:RequestBodySizeLimiter)')),
+ cfg.IntOpt('max_param_size', default=64,
+ help='limit the sizes of user & tenant ID/names'),
# we allow tokens to be a bit larger to accommodate PKI
- cfg.IntOpt('max_token_size', default=8192),
+ cfg.IntOpt('max_token_size', default=8192,
+ help=('similar to max_param_size, but provides an '
+ 'exception for token values')),
cfg.StrOpt('member_role_id',
- default='9fe2ff9ee4384b1894a90878d3e92bab'),
- cfg.StrOpt('member_role_name', default='_member_'),
- cfg.IntOpt('crypt_strength', default=40000)],
+ default='9fe2ff9ee4384b1894a90878d3e92bab',
+ help=('During a SQL upgrade member_role_id will be used '
+ 'to create a new role that will replace records in '
+ 'the user_tenant_membership table with explicit '
+ 'role grants. After migration, the member_role_id '
+ 'will be used in the API add_user_to_project.')),
+ cfg.StrOpt('member_role_name', default='_member_',
+ help=('During a SQL upgrade member_role_id will be used '
+ 'to create a new role that will replace records in '
+ 'the user_tenant_membership table with explicit '
+ 'role grants. After migration, member_role_name will '
+ 'be ignored.')),
+ cfg.IntOpt('crypt_strength', default=40000,
+ help=('The value passed as the keyword "rounds" to passlib '
+ 'encrypt method.')),
+ cfg.BoolOpt('tcp_keepalive', default=False,
+ help=("Set this to True if you want to enable "
+ "TCP_KEEPALIVE on server sockets i.e. sockets used "
+ "by the keystone wsgi server for client "
+ "connections")),
+ cfg.IntOpt('tcp_keepidle',
+ default=600,
+ help=("Sets the value of TCP_KEEPIDLE in seconds for each "
+ "server socket. Only applies if tcp_keepalive is "
+ "True. Not supported on OS X.")),
+ cfg.IntOpt('list_limit', default=None,
+ help=('The maximum number of entities that will be '
+ 'returned in a collection can be set with '
+ 'list_limit, with no limit set by default. This '
+ 'global limit may be then overridden for a specific '
+ 'driver, by specifying a list_limit in the '
+ 'appropriate section (e.g. [assignment]'))],
'identity': [
- cfg.StrOpt('default_domain_id', default='default'),
+ cfg.StrOpt('default_domain_id', default='default',
+ help=('This references the domain to use for all '
+ 'Identity API v2 requests (which are not aware of '
+ 'domains). A domain with this ID will be created '
+ 'for you by keystone-manage db_sync in migration '
+ '008. The domain referenced by this ID cannot be '
+ 'deleted on the v3 API, to prevent accidentally '
+ 'breaking the v2 API. There is nothing special about '
+ 'this domain, other than the fact that it must '
+ 'exist to order to maintain support for your v2 '
+ 'clients.')),
cfg.BoolOpt('domain_specific_drivers_enabled',
- default=False),
+ default=False,
+ help=('A subset (or all) of domains can have their own '
+ 'identity driver, each with their own partial '
+ 'configuration file in a domain configuration '
+ 'directory. Only values specific to the domain '
+ 'need to be placed in the domain specific '
+ 'configuration file. This feature is disabled by '
+ 'default; set to True to enable.')),
cfg.StrOpt('domain_config_dir',
- default='/etc/keystone/domains'),
+ default='/etc/keystone/domains',
+ help=('Path for Keystone to locate the domain specific'
+ 'identity configuration files if '
+ 'domain_specific_drivers_enabled is set to true.')),
cfg.StrOpt('driver',
default=('keystone.identity.backends'
- '.sql.Identity')),
- cfg.IntOpt('max_password_length', default=4096)],
+ '.sql.Identity'),
+ help='Keystone Identity backend driver'),
+ cfg.IntOpt('max_password_length', default=4096,
+ help=('Maximum supported length for user passwords; '
+ 'decrease to improve performance.')),
+ cfg.IntOpt('list_limit', default=None,
+ help=('Maximum number of entities that will be returned in '
+ 'an identity collection'))],
'trust': [
- cfg.BoolOpt('enabled', default=True),
+ cfg.BoolOpt('enabled', default=True,
+ help=('delegation and impersonation features can be '
+ 'optionally disabled')),
cfg.StrOpt('driver',
- default='keystone.trust.backends.sql.Trust')],
+ default='keystone.trust.backends.sql.Trust',
+ help='Keystone Trust backend driver')],
'os_inherit': [
- cfg.BoolOpt('enabled', default=False)],
+ cfg.BoolOpt('enabled', default=False,
+ help=('role-assignment inheritance to projects from '
+ 'owning domain can be optionally enabled'))],
'token': [
- cfg.ListOpt('bind', default=[]),
- cfg.StrOpt('enforce_token_bind', default='permissive'),
- cfg.IntOpt('expiration', default=3600),
- cfg.StrOpt('provider', default=None),
+ cfg.ListOpt('bind', default=[],
+ help=('External auth mechanisms that should add bind '
+ 'information to token e.g. kerberos, x509')),
+ cfg.StrOpt('enforce_token_bind', default='permissive',
+ help=('Enforcement policy on tokens presented to keystone '
+ 'with bind information. One of disabled, permissive, '
+ 'strict, required or a specifically required bind '
+ 'mode e.g. kerberos or x509 to require binding to '
+ 'that authentication.')),
+ cfg.IntOpt('expiration', default=3600,
+ help=('Amount of time a token should remain valid '
+ '(in seconds)')),
+ cfg.StrOpt('provider', default=None,
+ help=('Controls the token construction, validation, and '
+ 'revocation operations. Core providers are '
+ 'keystone.token.providers.[pki|uuid].Provider')),
cfg.StrOpt('driver',
- default='keystone.token.backends.sql.Token'),
- cfg.BoolOpt('caching', default=True),
- cfg.IntOpt('revocation_cache_time', default=3600),
- cfg.IntOpt('cache_time', default=None)],
+ default='keystone.token.backends.sql.Token',
+ help='Keystone Token persistence backend driver'),
+ cfg.BoolOpt('caching', default=True,
+ help=('Toggle for token system cacheing. This has no '
+ 'effect unless global caching is enabled.')),
+ cfg.IntOpt('revocation_cache_time', default=3600,
+ help=('Time to cache the revocation list (in seconds). '
+ 'This has no effect unless global and token '
+ 'caching are enabled.')),
+ cfg.IntOpt('cache_time', default=None,
+ help=('Time to cache tokens (in seconds). This has no '
+ 'effect unless global and token caching are '
+ 'enabled.'))],
'cache': [
- cfg.StrOpt('config_prefix', default='cache.keystone'),
- cfg.IntOpt('expiration_time', default=600),
+ cfg.StrOpt('config_prefix', default='cache.keystone',
+ help=('Prefix for building the configuration dictionary '
+ 'for the cache region. This should not need to be '
+ 'changed unless there is another dogpile.cache '
+ 'region with the same configuration name')),
+ cfg.IntOpt('expiration_time', default=600,
+ help=('Default TTL, in seconds, for any cached item in '
+ 'the dogpile.cache region. This applies to any '
+ 'cached method that doesn\'t have an explicit '
+ 'cache expiration time defined for it.')),
# NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
# and other such single-process/thread deployments. Running
# dogpile.cache.memory in any other configuration has the same pitfalls
@@ -85,92 +206,179 @@ FILE_OPTIONS = {
# prevent issues with the memory cache ending up in "production"
# unintentionally, we register a no-op as the keystone default caching
# backend.
- cfg.StrOpt('backend', default='keystone.common.cache.noop'),
- cfg.BoolOpt('use_key_mangler', default=True),
- cfg.MultiStrOpt('backend_argument', default=[]),
- cfg.ListOpt('proxies', default=[]),
- # Global toggle for all caching using the should_cache_fn mechanism.
- cfg.BoolOpt('enabled', default=False),
- # caching backend specific debugging.
- cfg.BoolOpt('debug_cache_backend', default=False)],
+ cfg.StrOpt('backend', default='keystone.common.cache.noop',
+ help=('Dogpile.cache backend module. It is recommended '
+ 'that Memcache (dogpile.cache.memcache) or Redis '
+ '(dogpile.cache.redis) be used in production '
+ 'deployments. Small workloads (single process) '
+ 'like devstack can use the dogpile.cache.memory '
+ 'backend.')),
+ cfg.BoolOpt('use_key_mangler', default=True,
+ help=('Use a key-mangling function (sha1) to ensure '
+ 'fixed length cache-keys. This is toggle-able for '
+ 'debugging purposes, it is highly recommended to '
+ 'always leave this set to True.')),
+ cfg.MultiStrOpt('backend_argument', default=[],
+ help=('Arguments supplied to the backend module. '
+ 'Specify this option once per argument to be '
+ 'passed to the dogpile.cache backend. Example '
+ 'format: <argname>:<value>')),
+ cfg.ListOpt('proxies', default=[],
+ help=('Proxy Classes to import that will affect the way '
+ 'the dogpile.cache backend functions. See the '
+ 'dogpile.cache documentation on '
+ 'changing-backend-behavior. Comma delimited '
+ 'list e.g. '
+ 'my.dogpile.proxy.Class, my.dogpile.proxyClass2')),
+ cfg.BoolOpt('enabled', default=False,
+ help=('Global toggle for all caching using the '
+ 'should_cache_fn mechanism')),
+ cfg.BoolOpt('debug_cache_backend', default=False,
+ help=('Extra debugging from the cache backend (cache '
+ 'keys, get/set/delete/etc calls) This is only '
+ 'really useful if you need to see the specific '
+ 'cache-backend get/set/delete calls with the '
+ 'keys/values. Typically this should be left set '
+ 'to False.'))],
'ssl': [
- cfg.BoolOpt('enable', default=False),
+ cfg.BoolOpt('enable', default=False,
+ help=('Toggle for SSL support on the keystone '
+ 'eventlet servers.')),
cfg.StrOpt('certfile',
- default="/etc/keystone/ssl/certs/keystone.pem"),
+ default="/etc/keystone/ssl/certs/keystone.pem",
+ help='Path of the certfile for SSL.'),
cfg.StrOpt('keyfile',
- default="/etc/keystone/ssl/private/keystonekey.pem"),
+ default='/etc/keystone/ssl/private/keystonekey.pem',
+ help='Path of the keyfile for SSL.'),
cfg.StrOpt('ca_certs',
- default="/etc/keystone/ssl/certs/ca.pem"),
+ default='/etc/keystone/ssl/certs/ca.pem',
+ help='Path of the ca cert file for SSL.'),
cfg.StrOpt('ca_key',
- default="/etc/keystone/ssl/private/cakey.pem"),
+ default='/etc/keystone/ssl/private/cakey.pem',
+ help='Path of the CA key file for SSL'),
cfg.BoolOpt('cert_required', default=False),
- cfg.IntOpt('key_size', default=1024),
- cfg.IntOpt('valid_days', default=3650),
+ cfg.IntOpt('key_size', default=1024,
+ help='SSL Key Length (in bits) (auto generated '
+ 'certificate)'),
+ cfg.IntOpt('valid_days', default=3650,
+ help='Days the certificate is valid for once signed '
+ '(auto generated certificate)'),
cfg.StrOpt('cert_subject',
- default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost')],
+ default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost',
+ help='SSL Certificate Subject (auto generated '
+ 'certificate)')],
'signing': [
- cfg.StrOpt('token_format', default=None),
+ cfg.StrOpt('token_format', default=None,
+ help=('Deprecated in favor of provider in the '
+ '[token] section')),
cfg.StrOpt('certfile',
- default="/etc/keystone/ssl/certs/signing_cert.pem"),
+ default='/etc/keystone/ssl/certs/signing_cert.pem',
+ help='Path of the certfile for token signing.'),
cfg.StrOpt('keyfile',
- default="/etc/keystone/ssl/private/signing_key.pem"),
+ default='/etc/keystone/ssl/private/signing_key.pem',
+ help='Path of the keyfile for token signing.'),
cfg.StrOpt('ca_certs',
- default="/etc/keystone/ssl/certs/ca.pem"),
+ default='/etc/keystone/ssl/certs/ca.pem',
+ help='Path of the CA for token signing.'),
cfg.StrOpt('ca_key',
- default="/etc/keystone/ssl/private/cakey.pem"),
- cfg.IntOpt('key_size', default=2048),
- cfg.IntOpt('valid_days', default=3650),
+ default='/etc/keystone/ssl/private/cakey.pem',
+ help='Path of the CA Key for token signing'),
+ cfg.IntOpt('key_size', default=2048,
+ help='Key Size (in bits) for token signing cert '
+ '(auto generated certificate)'),
+ cfg.IntOpt('valid_days', default=3650,
+ help='Day the token signing cert is valid for '
+ '(auto generated certificate)'),
cfg.StrOpt('cert_subject',
default=('/C=US/ST=Unset/L=Unset/O=Unset/'
- 'CN=www.example.com'))],
+ 'CN=www.example.com'),
+ help='Certificate Subject (auto generated certificate) for '
+ 'token signing.')],
'assignment': [
# assignment has no default for backward compatibility reasons.
# If assignment driver is not specified, the identity driver chooses
# the backend
- cfg.StrOpt('driver', default=None),
- cfg.BoolOpt('caching', default=True),
- cfg.IntOpt('cache_time', default=None)],
+ cfg.StrOpt('driver', default=None,
+ help='Keystone Assignment backend driver'),
+ cfg.BoolOpt('caching', default=True,
+ help=('Toggle for assignment caching. This has no effect '
+ 'unless global caching is enabled.')),
+ cfg.IntOpt('cache_time', default=None,
+ help='TTL (in seconds) to cache assignment data. This has '
+ 'no effect unless global caching is enabled.'),
+ cfg.IntOpt('list_limit', default=None,
+ help=('Maximum number of entities that will be returned '
+ 'in an assignment collection'))],
'credential': [
cfg.StrOpt('driver',
default=('keystone.credential.backends'
- '.sql.Credential'))],
+ '.sql.Credential'),
+ help='Keystone Credential backend driver')],
'oauth1': [
cfg.StrOpt('driver',
- default='keystone.contrib.oauth1.backends.sql.OAuth1'),
- cfg.IntOpt('request_token_duration', default=28800),
- cfg.IntOpt('access_token_duration', default=86400)],
+ default='keystone.contrib.oauth1.backends.sql.OAuth1',
+ help='Keystone Credential backend driver'),
+ cfg.IntOpt('request_token_duration', default=28800,
+ help='Duration (in seconds) for the OAuth Request Token'),
+ cfg.IntOpt('access_token_duration', default=86400,
+ help='Duration (in seconds) for the OAuth Access Token')],
'federation': [
cfg.StrOpt('driver',
default='keystone.contrib.federation.'
- 'backends.sql.Federation')],
+ 'backends.sql.Federation',
+ help='Keystone Federation backend driver')],
'policy': [
cfg.StrOpt('driver',
- default='keystone.policy.backends.sql.Policy')],
+ default='keystone.policy.backends.sql.Policy',
+ help='Keystone Policy backend driver'),
+ cfg.IntOpt('list_limit', default=None,
+ help=('Maximum number of entities that will be returned '
+ 'in a policy collection'))],
'ec2': [
cfg.StrOpt('driver',
- default='keystone.contrib.ec2.backends.kvs.Ec2')],
+ default='keystone.contrib.ec2.backends.kvs.Ec2',
+ help='Keystone EC2Credential backend driver')],
'endpoint_filter': [
cfg.StrOpt('driver',
default='keystone.contrib.endpoint_filter.backends'
- '.sql.EndpointFilter'),
- cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True)],
+ '.sql.EndpointFilter',
+ help='Keystone Endpoint Filter backend driver'),
+ cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True,
+ help='Toggle to return all active endpoints if no filter '
+ 'exists.')],
'stats': [
cfg.StrOpt('driver',
default=('keystone.contrib.stats.backends'
- '.kvs.Stats'))],
+ '.kvs.Stats'),
+ help='Keystone stats backend driver')],
'ldap': [
- cfg.StrOpt('url', default='ldap://localhost'),
- cfg.StrOpt('user', default=None),
- cfg.StrOpt('password', secret=True, default=None),
- cfg.StrOpt('suffix', default='cn=example,cn=com'),
+ cfg.StrOpt('url', default='ldap://localhost',
+ help='URL for connecting to the LDAP server'),
+ cfg.StrOpt('user', default=None,
+ help='User BindDN to query the LDAP server'),
+ cfg.StrOpt('password', secret=True, default=None,
+ help='Password for the BindDN to query the LDAP server'),
+ cfg.StrOpt('suffix', default='cn=example,cn=com',
+ help='LDAP server suffix'),
cfg.BoolOpt('use_dumb_member', default=False),
cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent'),
- cfg.BoolOpt('allow_subtree_delete', default=False),
- cfg.StrOpt('query_scope', default='one'),
- cfg.IntOpt('page_size', default=0),
- cfg.StrOpt('alias_dereferencing', default='default'),
+ cfg.BoolOpt('allow_subtree_delete', default=False,
+ help='allow deleting subtrees'),
+ cfg.StrOpt('query_scope', default='one',
+ help=('The LDAP scope for queries, this can be either '
+ '"one" (onelevel/singleLevel) or "sub" '
+ '(subtree/wholeSubtree)')),
+ cfg.IntOpt('page_size', default=0,
+ help=('Maximum results per page; a value of zero ("0") '
+ 'disables paging')),
+ cfg.StrOpt('alias_dereferencing', default='default',
+ help=('The LDAP dereferencing option for queries. This '
+ 'can be either "never", "searching", "always", '
+ '"finding" or "default". The "default" option falls '
+ 'back to using default dereferencing configured by '
+ 'your ldap.conf.')),
cfg.StrOpt('user_tree_dn', default=None),
cfg.StrOpt('user_filter', default=None),
@@ -242,34 +450,63 @@ FILE_OPTIONS = {
cfg.StrOpt('tls_cacertfile', default=None),
cfg.StrOpt('tls_cacertdir', default=None),
cfg.BoolOpt('use_tls', default=False),
- cfg.StrOpt('tls_req_cert', default='demand')],
+ cfg.StrOpt('tls_req_cert', default='demand',
+ help=('valid options for tls_req_cert are demand, never, '
+ 'and allow'))],
'pam': [
cfg.StrOpt('userid', default=None),
cfg.StrOpt('password', default=None)],
'auth': [
- cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS),
+ cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS,
+ help='Default auth methods.'),
cfg.StrOpt('password',
- default='keystone.auth.plugins.password.Password'),
+ default='keystone.auth.plugins.password.Password',
+ help='The password auth plugin module'),
cfg.StrOpt('token',
- default='keystone.auth.plugins.token.Token'),
+ default='keystone.auth.plugins.token.Token',
+ help='The token auth plugin module'),
#deals with REMOTE_USER authentication
cfg.StrOpt('external',
- default='keystone.auth.plugins.external.DefaultDomain')],
+ default='keystone.auth.plugins.external.DefaultDomain',
+ help='The external (REMOTE_USER) auth plugin module.')],
'paste_deploy': [
- cfg.StrOpt('config_file', default=None)],
+ cfg.StrOpt('config_file', default='keystone-paste.ini',
+ help=('Name of the paste configuration file that defines '
+ 'the available pipelines'))],
'memcache': [
- cfg.ListOpt('servers', default=['localhost:11211']),
- cfg.IntOpt('max_compare_and_set_retry', default=16)],
+ cfg.ListOpt('servers', default=['localhost:11211'],
+ help='Memcache servers in the format of "host:port"'),
+ cfg.IntOpt('max_compare_and_set_retry', default=16,
+ help=('Number of compare-and-set attempts to make when '
+ 'using compare-and-set in the token memcache back '
+ 'end'))],
'catalog': [
cfg.StrOpt('template_file',
- default='default_catalog.templates'),
+ default='default_catalog.templates',
+ help='Catalog template file name for use with the '
+ 'template catalog backend.'),
cfg.StrOpt('driver',
- default='keystone.catalog.backends.sql.Catalog')],
+ default='keystone.catalog.backends.sql.Catalog',
+ help='Keystone catalog backend driver'),
+ cfg.IntOpt('list_limit', default=None,
+ help=('Maximum number of entities that will be returned '
+ 'in a catalog collection'))],
'kvs': [
- cfg.ListOpt('backends', default=[]),
- cfg.StrOpt('config_prefix', default='keystone.kvs'),
- cfg.BoolOpt('enable_key_mangler', default=True),
- cfg.IntOpt('default_lock_timeout', default=5)]}
+ cfg.ListOpt('backends', default=[],
+ help='Extra dogpile.cache backend modules to register '
+ 'with the dogpile.cache library'),
+ cfg.StrOpt('config_prefix', default='keystone.kvs',
+ help=('Prefix for building the configuration dictionary '
+ 'for the KVS region. This should not need to be '
+ 'changed unless there is another dogpile.cache '
+ 'region with the same configuration name')),
+ cfg.BoolOpt('enable_key_mangler', default=True,
+ help=('Toggle to disable using a key-mangling function '
+ 'to ensure fixed length keys. This is toggle-able '
+ 'for debugging purposes, it is highly recommended '
+ 'to always leave this set to True.')),
+ cfg.IntOpt('default_lock_timeout', default=5,
+ help='Default lock timeout for distributed locking.')]}
CONF = cfg.CONF
@@ -307,3 +544,27 @@ def configure(conf=None):
# register any non-default auth methods here (used by extensions, etc)
setup_authentication(conf)
+
+
+def list_opts():
+ """Return a list of oslo.config options available in Keystone.
+
+ The returned list includes all oslo.config options which are registered as
+ the "FILE_OPTIONS" in keystone.common.config. This list will not include
+ the options from the oslo-incubator library or any options registered
+ dynamically at run time.
+
+ Each object in the list is a two element tuple. The first element of
+ each tuple is the name of the group under which the list of options in the
+ second element will be registered. A group name of None corresponds to the
+ [DEFAULT] group in config files.
+
+ This function is also discoverable via the 'oslo.config.opts' entry point
+ under the 'keystone.config.opts' namespace.
+
+ The purpose of this is to allow tools like the Oslo sample config file
+ generator to discover the options exposed to users by this library.
+
+ :returns: a list of (group_name, opts) tuples
+ """
+ return FILE_OPTIONS.items()
diff --git a/keystone/common/controller.py b/keystone/common/controller.py
index 6347e39cf..5658a8bd7 100644
--- a/keystone/common/controller.py
+++ b/keystone/common/controller.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -221,6 +219,47 @@ class V2Controller(wsgi.Application):
ref['name'] = ref.pop('username')
return ref
+ @staticmethod
+ def v3_to_v2_user(ref):
+ """Convert a user_ref from v3 to v2 compatible.
+
+ * v2.0 users are not domain aware, and should have domain_id removed
+ * v2.0 users expect the use of tenantId instead of default_project_id
+ * v2.0 users have a username attribute
+
+ This method should only be applied to user_refs being returned from the
+ v2.0 controller(s).
+
+ If ref is a list type, we will iterate through each element and do the
+ conversion.
+ """
+
+ def _format_default_project_id(ref):
+ """Convert default_project_id to tenantId for v2 calls."""
+ default_project_id = ref.pop('default_project_id', None)
+ if default_project_id is not None:
+ ref['tenantId'] = default_project_id
+ elif 'tenantId' in ref:
+ # NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
+ # tenantId property sneaks its way into the extra blob on the
+ # user, we remove it here. If default_project_id is set, we
+ # would override it in either case.
+ del ref['tenantId']
+
+ def _normalize_and_filter_user_properties(ref):
+ """Run through the various filter/normalization methods."""
+ _format_default_project_id(ref)
+ V2Controller.filter_domain_id(ref)
+ V2Controller.normalize_username_in_response(ref)
+ return ref
+
+ if isinstance(ref, dict):
+ return _normalize_and_filter_user_properties(ref)
+ elif isinstance(ref, list):
+ return [_normalize_and_filter_user_properties(x) for x in ref]
+ else:
+ raise ValueError(_('Expected dict or list: %s') % type(ref))
+
@dependency.requires('policy_api', 'token_api')
class V3Controller(wsgi.Application):
@@ -268,16 +307,16 @@ class V3Controller(wsgi.Application):
Returns the wrapped collection, which includes:
- Executing any filtering not already carried out
- - Paginating if necessary
+ - Truncate to a set limit if necessary
- Adds 'self' links in every member
- Adds 'next', 'self' and 'prev' links for the whole collection.
:param context: the current context, containing the original url path
and query string
:param refs: the list of members of the collection
- :param hints: list hints, containing any relevant
- filters. Any filters already satisfied by drivers
- will have been removed
+ :param hints: list hints, containing any relevant filters and limit.
+ Any filters already satisfied by managers will have been
+ removed
"""
# Check if there are any filters in hints that were not
# handled by the drivers. The driver will not have paginated or
@@ -287,7 +326,7 @@ class V3Controller(wsgi.Application):
if hints is not None:
refs = cls.filter_by_attributes(refs, hints)
- refs = cls.paginate(context, refs)
+ list_limited, refs = cls.limit(refs, hints)
for ref in refs:
cls.wrap_member(context, ref)
@@ -297,17 +336,45 @@ class V3Controller(wsgi.Application):
'next': None,
'self': cls.base_url(path=context['path']),
'previous': None}
+
+ if list_limited:
+ container['truncated'] = True
+
return container
@classmethod
- def paginate(cls, context, refs):
- """Paginates a list of references by page & per_page query strings."""
- # FIXME(dolph): client needs to support pagination first
- return refs
+ def limit(cls, refs, hints):
+ """Limits a list of entities.
+
+ The underlying driver layer may have already truncated the collection
+ for us, but in case it was unable to handle truncation we check here.
+
+ :param refs: the list of members of the collection
+ :param hints: hints, containing, among other things, the limit
+ requested
+
+ :returns: boolean indicating whether the list was truncated, as well
+ as the list of (truncated if necessary) entities.
+
+ """
+ NOT_LIMITED = False
+ LIMITED = True
+
+ if hints is None or hints.get_limit() is None:
+ # No truncation was requested
+ return NOT_LIMITED, refs
+
+ list_limit = hints.get_limit()
+ if list_limit.get('truncated', False):
+ # The driver did truncate the list
+ return LIMITED, refs
+
+ if len(refs) > list_limit['limit']:
+ # The driver layer wasn't able to truncate it for us, so we must
+ # do it here
+ return LIMITED, refs[:list_limit['limit']]
- page = context['query_string'].get('page', 1)
- per_page = context['query_string'].get('per_page', 30)
- return refs[per_page * (page - 1):per_page * page]
+ return NOT_LIMITED, refs
@classmethod
def filter_by_attributes(cls, refs, hints):
diff --git a/keystone/common/dependency.py b/keystone/common/dependency.py
index 9a93ee1a1..e03205415 100644
--- a/keystone/common/dependency.py
+++ b/keystone/common/dependency.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/driver_hints.py b/keystone/common/driver_hints.py
index 971764930..2784b25db 100644
--- a/keystone/common/driver_hints.py
+++ b/keystone/common/driver_hints.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
@@ -21,14 +19,15 @@ class Hints(list):
Hints are modifiers that affect the return of entities from a
list_<entities> operation. They are typically passed to a driver to give
- direction as to what filtering and pagination actions are being requested.
+ direction as to what filtering, pagination or list limiting actions are
+ being requested.
It is optional for a driver to action some or all of the list hints,
but any filters that it does satisfy must be marked as such by calling
removing the filter from the list.
- A Hint object is a list of dicts, initially all of type 'filter', although
- other types may be added in the future. The list can be enumerated
+ A Hint object is a list of dicts, initially of type 'filter' or 'limit',
+ although other types may be added in the future. The list can be enumerated
directly, or by using the filters() method which will guarantee to only
return filters.
@@ -60,3 +59,22 @@ class Hints(list):
if (entry['type'] == 'filter' and entry['name'] == name and
entry['comparator'] == 'equals'):
return entry
+
+ def set_limit(self, limit, truncated=False):
+ """Set a limit to indicate the list should be truncated."""
+ # We only allow one limit entry in the list, so if it already exists
+ # we overwrite the old one
+ for x in self:
+ if x['type'] == 'limit':
+ x['limit'] = limit
+ x['truncated'] = truncated
+ break
+ else:
+ self.append({'limit': limit, 'type': 'limit',
+ 'truncated': truncated})
+
+ def get_limit(self):
+ """Get the limit to which the list should be truncated."""
+ for x in self:
+ if x['type'] == 'limit':
+ return x
diff --git a/keystone/common/environment/__init__.py b/keystone/common/environment/__init__.py
index fcd343b4e..4554ddb2e 100644
--- a/keystone/common/environment/__init__.py
+++ b/keystone/common/environment/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/environment/eventlet_server.py b/keystone/common/environment/eventlet_server.py
index 661c0521a..37a3d0944 100644
--- a/keystone/common/environment/eventlet_server.py
+++ b/keystone/common/environment/eventlet_server.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -35,7 +33,8 @@ LOG = log.getLogger(__name__)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
- def __init__(self, application, host=None, port=None, threads=1000):
+ def __init__(self, application, host=None, port=None, threads=1000,
+ keepalive=False, keepidle=None):
self.application = application
self.host = host or '0.0.0.0'
self.port = port or 0
@@ -44,6 +43,8 @@ class Server(object):
self.greenthread = None
self.do_ssl = False
self.cert_required = False
+ self.keepalive = keepalive
+ self.keepidle = keepidle
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
@@ -77,6 +78,15 @@ class Server(object):
ca_certs=self.ca_certs)
_socket = sslsocket
+ # Optionally enable keepalive on the wsgi socket.
+ if self.keepalive:
+ _socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+
+ # This option isn't available in the OS X version of eventlet
+ if hasattr(socket, 'TCP_KEEPIDLE') and self.keepidle is not None:
+ _socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
+ self.keepidle)
+
self.greenthread = self.pool.spawn(self._run,
self.application,
_socket)
@@ -107,7 +117,7 @@ class Server(object):
logger = log.getLogger('eventlet.wsgi.server')
try:
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
- log=log.WritableLogger(logger))
+ log=log.WritableLogger(logger), debug=False)
except Exception:
LOG.exception(_('Server error'))
raise
diff --git a/keystone/common/extension.py b/keystone/common/extension.py
index 37e7d6155..b2ea80bc3 100644
--- a/keystone/common/extension.py
+++ b/keystone/common/extension.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/kvs/backends/memcached.py b/keystone/common/kvs/backends/memcached.py
index 101f7fe4e..a1c40cd25 100644
--- a/keystone/common/kvs/backends/memcached.py
+++ b/keystone/common/kvs/backends/memcached.py
@@ -35,9 +35,9 @@ LOG = log.getLogger(__name__)
NO_VALUE = api.NO_VALUE
-VALID_DOGPILE_BACKENDS = filter(
- lambda x: x not in ('GenericMemcachedBackend', 'MemcachedLock'),
- memcached.__all__)
+VALID_DOGPILE_BACKENDS = dict(pylibmc=memcached.PylibmcBackend,
+ bmemcached=memcached.BMemcachedBackend,
+ memcached=memcached.MemcachedBackend)
class MemcachedLock(object):
@@ -76,20 +76,23 @@ class MemcachedLock(object):
class MemcachedBackend(manager.Manager):
- """Pivot point to leverage the various dogpile.cache memcache backends.
+ """Pivot point to leverage the various dogpile.cache memcached backends.
- To specify a specific dogpile.cache memcached backend, pass the region
- backend argument `dogpile_memcache_backend` set to one of the known dogpile
- memcache backends (at this time `MemcachedBackend`, `BMemcachedBackend`,
- `PylibmcBackend` are valid).
+ To specify a specific dogpile.cache memcached driver, pass the argument
+ `memcached_driver` set to one of the provided memcached drivers (at this
+ time `memcached`, `bmemcached`, `pylibmc` are valid).
"""
def __init__(self, arguments):
+ self._key_mangler = None
+ self.raw_no_expiry_keys = set(arguments.pop('no_expiry_keys', set()))
+ self.no_expiry_hashed_keys = set()
+
self.lock_timeout = arguments.pop('lock_timeout', None)
self.max_lock_attempts = arguments.pop('max_lock_attempts', 15)
# NOTE(morganfainberg): Remove distributed locking from the arguments
# passed to the "real" backend if it exists.
arguments.pop('distributed_lock', None)
- backend = arguments.pop('dogpile_memcache_backend', None)
+ backend = arguments.pop('memcached_backend', None)
if 'url' not in arguments:
# FIXME(morganfainberg): Log deprecation warning for old-style
# configuration once full dict_config style configuration for
@@ -100,13 +103,55 @@ class MemcachedBackend(manager.Manager):
if backend is None:
# NOTE(morganfainberg): Use the basic memcached backend if nothing
# else is supplied.
- self.driver = memcached.MemcachedBackend(arguments)
+ self.driver = VALID_DOGPILE_BACKENDS['memcached'](arguments)
else:
if backend not in VALID_DOGPILE_BACKENDS:
- raise ValueError(_('Backend `%s` is not a valid dogpile '
- 'memcached backend.'), backend)
+ raise ValueError(
+ _('Backend `%(driver)s` is not a valid memcached '
+ 'backend. Valid drivers: %(driver_list)s') %
+ {'driver': backend,
+ 'driver_list': ','.join(VALID_DOGPILE_BACKENDS.keys())})
else:
- self.driver = getattr(memcached, backend)(arguments)
+ self.driver = VALID_DOGPILE_BACKENDS[backend](arguments)
+
+ def _get_set_arguments_driver_attr(self, exclude_expiry=False):
+
+ # NOTE(morganfainberg): Shallow copy the .set_arguments dict to
+ # ensure no changes cause the values to change in the instance
+ # variable.
+ set_arguments = getattr(self.driver, 'set_arguments', {}).copy()
+
+ if exclude_expiry:
+ # NOTE(morganfainberg): Explicitly strip out the 'time' key/value
+ # from the set_arguments in the case that this key isn't meant
+ # to expire
+ set_arguments.pop('time', None)
+ return set_arguments
+
+ def set(self, key, value):
+ mapping = {key: value}
+ self.set_multi(mapping)
+
+ def set_multi(self, mapping):
+ mapping_keys = set(mapping.keys())
+ no_expiry_keys = mapping_keys.intersection(self.no_expiry_hashed_keys)
+ has_expiry_keys = mapping_keys.difference(self.no_expiry_hashed_keys)
+
+ if no_expiry_keys:
+ # NOTE(morganfainberg): For keys that have expiry excluded,
+ # bypass the backend and directly call the client. Bypass directly
+ # to the client is required as the 'set_arguments' are applied to
+ # all ``set`` and ``set_multi`` calls by the driver, by calling
+ # the client directly it is possible to exclude the ``time``
+ # argument to the memcached server.
+ new_mapping = dict((k, mapping[k]) for k in no_expiry_keys)
+ set_arguments = self._get_set_arguments_driver_attr(
+ exclude_expiry=True)
+ self.driver.client.set_multi(new_mapping, **set_arguments)
+
+ if has_expiry_keys:
+ new_mapping = dict((k, mapping[k]) for k in has_expiry_keys)
+ self.driver.set_multi(new_mapping)
@classmethod
def from_config_dict(cls, config_dict, prefix):
@@ -118,7 +163,28 @@ class MemcachedBackend(manager.Manager):
@property
def key_mangler(self):
- return self.driver.key_mangler
+ if self._key_mangler is None:
+ self._key_mangler = self.driver.key_mangler
+ return self._key_mangler
+
+ @key_mangler.setter
+ def key_mangler(self, key_mangler):
+ if callable(key_mangler):
+ self._key_mangler = key_mangler
+ self._rehash_keys()
+ elif key_mangler is None:
+ # NOTE(morganfainberg): Set the hashed key map to the unhashed
+ # list since we no longer have a key_mangler.
+ self._key_mangler = None
+ self.no_expiry_hashed_keys = self.raw_no_expiry_keys
+ else:
+ raise TypeError(_('`key_mangler` functions must be callable.'))
+
+ def _rehash_keys(self):
+ no_expire = set()
+ for key in self.raw_no_expiry_keys:
+ no_expire.add(self._key_mangler(key))
+ self.no_expiry_hashed_keys = no_expire
def get_mutex(self, key):
return MemcachedLock(lambda: self.driver.client, key,
diff --git a/keystone/common/kvs/core.py b/keystone/common/kvs/core.py
index fa538ef9a..52e206283 100644
--- a/keystone/common/kvs/core.py
+++ b/keystone/common/kvs/core.py
@@ -141,6 +141,26 @@ class KeyValueStore(object):
'configured: %s'),
self._region.name)
+ def _set_keymangler_on_backend(self, key_mangler):
+ try:
+ self._region.backend.key_mangler = key_mangler
+ except Exception as e:
+ # NOTE(morganfainberg): The setting of the key_mangler on the
+ # backend is used to allow the backend to
+ # calculate a hashed key value as needed. Not all backends
+ # require the ability to calculate hashed keys. If the
+ # backend does not support/require this feature log a
+ # debug line and move on otherwise raise the proper exception.
+ # Support of the feature is implied by the existence of the
+ # 'raw_no_expiry_keys' attribute.
+ if not hasattr(self._region.backend, 'raw_no_expiry_keys'):
+ LOG.debug(_('Non-expiring keys not supported/required by '
+ '%(region)s backend; unable to set '
+ 'key_mangler for backend: %(err)s'),
+ {'region': self._region.name, 'err': e})
+ else:
+ raise
+
def _set_key_mangler(self, key_mangler):
# Set the key_mangler that is appropriate for the given region being
# configured here. The key_mangler function is called prior to storing
@@ -180,9 +200,11 @@ class KeyValueStore(object):
# unintended cases of exceeding cache-key in backends such
# as memcache.
self._region.key_mangler = dogpile_util.sha1_mangle_key
+ self._set_keymangler_on_backend(self._region.key_mangler)
else:
LOG.info(_('KVS region %s key_mangler disabled.'),
self._region.name)
+ self._set_keymangler_on_backend(None)
def _configure_region(self, backend, **config_args):
prefix = CONF.kvs.config_prefix
diff --git a/keystone/common/kvs/legacy.py b/keystone/common/kvs/legacy.py
index 3bc839291..439ff5e14 100644
--- a/keystone/common/kvs/legacy.py
+++ b/keystone/common/kvs/legacy.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/ldap/__init__.py b/keystone/common/ldap/__init__.py
index 24069356c..5cfd58385 100644
--- a/keystone/common/ldap/__init__.py
+++ b/keystone/common/ldap/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/common/ldap/core.py b/keystone/common/ldap/core.py
index 1e7d141b3..a2f86132d 100644
--- a/keystone/common/ldap/core.py
+++ b/keystone/common/ldap/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -297,6 +295,21 @@ class BaseLdap(object):
return obj
+ def check_allow_create(self):
+ if not self.allow_create:
+ action = _('LDAP %s create') % self.options_name
+ raise exception.ForbiddenAction(action=action)
+
+ def check_allow_update(self):
+ if not self.allow_update:
+ action = _('LDAP %s update') % self.options_name
+ raise exception.ForbiddenAction(action=action)
+
+ def check_allow_delete(self):
+ if not self.allow_delete:
+ action = _('LDAP %s delete') % self.options_name
+ raise exception.ForbiddenAction(action=action)
+
def affirm_unique(self, values):
if values.get('name') is not None:
try:
@@ -320,10 +333,6 @@ class BaseLdap(object):
def create(self, values):
self.affirm_unique(values)
- if not self.allow_create:
- action = _('LDAP %s create') % self.options_name
- raise exception.ForbiddenAction(action=action)
-
conn = self.get_connection()
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
@@ -406,10 +415,6 @@ class BaseLdap(object):
for x in self._ldap_get_all(ldap_filter)]
def update(self, object_id, values, old_obj=None):
- if not self.allow_update:
- action = _('LDAP %s update') % self.options_name
- raise exception.ForbiddenAction(action=action)
-
if old_obj is None:
old_obj = self.get(object_id)
@@ -454,10 +459,6 @@ class BaseLdap(object):
return self.get(object_id)
def delete(self, object_id):
- if not self.allow_delete:
- action = _('LDAP %s delete') % self.options_name
- raise exception.ForbiddenAction(action=action)
-
conn = self.get_connection()
try:
conn.delete_s(self._id_to_dn(object_id))
@@ -675,10 +676,10 @@ class EnabledEmuMixIn(BaseLdap):
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that groupOfNames, default is
- cn=enabled_$name,$tree_dn
+ cn=enabled_${name}s,${tree_dn}
- Where $name is self.options_name ('user' or 'tenant'), $tree_dn is
- self.tree_dn.
+ Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
+ ${tree_dn} is self.tree_dn.
"""
def __init__(self, conf):
@@ -718,7 +719,7 @@ class EnabledEmuMixIn(BaseLdap):
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', ['groupOfNames']),
('member',
- [self._id_to_dn(object_id)])]
+ [self._id_to_dn(object_id)])]
if self.use_dumb_member:
attr_list[1][1].append(self.dumb_member)
conn.add_s(self.enabled_emulation_dn, attr_list)
diff --git a/keystone/common/manager.py b/keystone/common/manager.py
index 693a7bb34..3325255f6 100644
--- a/keystone/common/manager.py
+++ b/keystone/common/manager.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,6 +17,42 @@ import functools
from keystone.openstack.common import importutils
+def response_truncated(f):
+ """Truncate the list returned by the wrapped function.
+
+ This is designed to wrap Manager list_{entity} methods to ensure that
+ any list limits that are defined are passed to the driver layer. If a
+ hints list is provided, the wrapper will insert the relevant limit into
+ the hints so that the underlying driver call can try and honor it. If the
+ driver does truncate the response, it will update the 'truncated' attribute
+ in the 'limit' entry in the hints list, which enables the caller of this
+ function to know if truncation has taken place. If, however, the driver
+ layer is unable to perform truncation, the 'limit' entry is simply left in
+ the hints list for the caller to handle.
+
+ A _get_list_limit() method is required to be present in the object class
+ hierarchy, which returns the limit for this backend to which we will
+ truncate.
+
+ If a hints list is not provided in the arguments of the wrapped call then
+ any limits set in the config file are ignored. This allows internal use
+ of such wrapped methods where the entire data set is needed as input for
+ the calculations of some other API (e.g. get role assignments for a given
+ project).
+
+ """
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if kwargs.get('hints') is None:
+ return f(self, *args, **kwargs)
+
+ list_limit = self.driver._get_list_limit()
+ if list_limit:
+ kwargs['hints'].set_limit(list_limit)
+ return f(self, *args, **kwargs)
+ return wrapper
+
+
class Manager(object):
"""Base class for intermediary request layer.
diff --git a/keystone/common/models.py b/keystone/common/models.py
index b76bcac1c..e0ec4683f 100644
--- a/keystone/common/models.py
+++ b/keystone/common/models.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
# Copyright (C) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/keystone/common/openssl.py b/keystone/common/openssl.py
index 7d1264fef..dcd6b7d74 100644
--- a/keystone/common/openssl.py
+++ b/keystone/common/openssl.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -73,9 +71,11 @@ class BaseCertificateConfigure(object):
self.ssl_dictionary.update(kwargs)
def exec_command(self, command):
- to_exec = command % self.ssl_dictionary
- LOG.info(to_exec)
- environment.subprocess.check_call(to_exec.rsplit(' '))
+ to_exec = []
+ for cmd_part in command:
+ to_exec.append(cmd_part % self.ssl_dictionary)
+ LOG.info(' '.join(to_exec))
+ environment.subprocess.check_call(to_exec)
def build_ssl_config_file(self):
utils.make_dirs(os.path.dirname(self.ssl_config_file_name),
@@ -118,8 +118,9 @@ class BaseCertificateConfigure(object):
user=self.use_keystone_user,
group=self.use_keystone_group, log=LOG)
if not file_exists(ca_key_file):
- self.exec_command('openssl genrsa -out %(ca_private_key)s '
- '%(key_size)d')
+ self.exec_command(['openssl', 'genrsa',
+ '-out', '%(ca_private_key)s',
+ '%(key_size)d'])
utils.set_permissions(ca_key_file,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
@@ -131,11 +132,13 @@ class BaseCertificateConfigure(object):
user=self.use_keystone_user,
group=self.use_keystone_group, log=LOG)
if not file_exists(ca_cert):
- self.exec_command('openssl req -new -x509 -extensions v3_ca '
- '-key %(ca_private_key)s -out %(ca_cert)s '
- '-days %(valid_days)d '
- '-config %(ssl_config)s '
- '-subj %(cert_subject)s')
+ self.exec_command(['openssl', 'req', '-new', '-x509',
+ '-extensions', 'v3_ca',
+ '-key', '%(ca_private_key)s',
+ '-out', '%(ca_cert)s',
+ '-days', '%(valid_days)d',
+ '-config', '%(ssl_config)s',
+ '-subj', '%(cert_subject)s'])
utils.set_permissions(ca_cert,
mode=PUBLIC_FILE_PERMS,
user=self.use_keystone_user,
@@ -148,8 +151,8 @@ class BaseCertificateConfigure(object):
user=self.use_keystone_user,
group=self.use_keystone_group, log=LOG)
if not file_exists(signing_keyfile):
- self.exec_command('openssl genrsa -out %(signing_key)s '
- '%(key_size)d ')
+ self.exec_command(['openssl', 'genrsa', '-out', '%(signing_key)s',
+ '%(key_size)d'])
utils.set_permissions(signing_keyfile,
mode=PRIVATE_FILE_PERMS,
user=self.use_keystone_user,
@@ -163,14 +166,18 @@ class BaseCertificateConfigure(object):
user=self.use_keystone_user,
group=self.use_keystone_group, log=LOG)
if not file_exists(signing_cert):
- self.exec_command('openssl req -key %(signing_key)s -new '
- '-out %(request_file)s -config %(ssl_config)s '
- '-subj %(cert_subject)s')
-
- self.exec_command('openssl ca -batch -out %(signing_cert)s '
- '-config %(ssl_config)s -days %(valid_days)dd '
- '-cert %(ca_cert)s -keyfile %(ca_private_key)s '
- '-infiles %(request_file)s')
+ self.exec_command(['openssl', 'req', '-key', '%(signing_key)s',
+ '-new', '-out', '%(request_file)s',
+ '-config', '%(ssl_config)s',
+ '-subj', '%(cert_subject)s'])
+
+ self.exec_command(['openssl', 'ca', '-batch',
+ '-out', '%(signing_cert)s',
+ '-config', '%(ssl_config)s',
+ '-days', '%(valid_days)dd',
+ '-cert', '%(ca_cert)s',
+ '-keyfile', '%(ca_private_key)s',
+ '-infiles', '%(request_file)s'])
def run(self):
self.build_ssl_config_file()
diff --git a/keystone/common/pemutils.py b/keystone/common/pemutils.py
index 1b25962ab..59962e840 100755
--- a/keystone/common/pemutils.py
+++ b/keystone/common/pemutils.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -392,7 +390,7 @@ def parse_pem(text, pem_type=None, max_items=None):
'%(position)d: %(err_msg)s') %
{'pem_type': block.pem_type,
'position': block.pem_start,
- 'err_msg': str(e)})
+ 'err_msg': six.text_type(e)})
else:
block.binary_data = binary_data
diff --git a/keystone/common/router.py b/keystone/common/router.py
index d35d3ca32..48a377c12 100644
--- a/keystone/common/router.py
+++ b/keystone/common/router.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/serializer.py b/keystone/common/serializer.py
index 52179681b..93b9eb4ec 100644
--- a/keystone/common/serializer.py
+++ b/keystone/common/serializer.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -111,7 +109,7 @@ class XmlDeserializer(object):
values = {}
for k, v in six.iteritems(element.attrib):
# boolean-looking attributes become booleans in JSON
- if k in ['enabled']:
+ if k in ['enabled', 'truncated']:
if v in ['true']:
v = True
elif v in ['false']:
@@ -143,6 +141,7 @@ class XmlDeserializer(object):
return {'links': self._deserialize_links(element)}
links = None
+ truncated = False
for child in [self.walk_element(x) for x in element
if not isinstance(x, ENTITY_TYPE)]:
if list_item_tag:
@@ -152,7 +151,10 @@ class XmlDeserializer(object):
if list_item_tag in child:
values.append(child[list_item_tag])
else:
- links = child['links']
+ if 'links' in child:
+ links = child['links']
+ else:
+ truncated = child['truncated']
else:
values = dict(values.items() + child.items())
@@ -167,6 +169,9 @@ class XmlDeserializer(object):
d['links'].setdefault('next')
d['links'].setdefault('previous')
+ if truncated:
+ d['truncated'] = truncated['truncated']
+
return d
@@ -178,17 +183,23 @@ class XmlSerializer(object):
"""
links = None
+ truncated = False
# FIXME(dolph): skipping links for now
for key in d.keys():
if '_links' in key:
d.pop(key)
- # FIXME(gyee): special-case links in collections
+ # NOTE(gyee, henry-nash): special-case links and truncation
+ # attribute in collections
if 'links' == key:
if links:
# we have multiple links
raise Exception('Multiple links found')
links = d.pop(key)
-
+ if 'truncated' == key:
+ if truncated:
+ # we have multiple attributes
+ raise Exception(_('Multiple truncation attributes found'))
+ truncated = d.pop(key)
assert len(d.keys()) == 1, ('Cannot encode more than one root '
'element: %s' % d.keys())
@@ -206,9 +217,11 @@ class XmlSerializer(object):
self.populate_element(root, d[name])
- # FIXME(gyee): special-case links for now
+ # NOTE(gyee, henry-nash): special-case links and truncation attribute
if links:
self._populate_links(root, links)
+ if truncated:
+ self._populate_truncated(root, truncated)
# TODO(dolph): you can get a doctype from lxml, using ElementTrees
return '%s\n%s' % (DOCTYPE, etree.tostring(root, pretty_print=True))
@@ -223,6 +236,11 @@ class XmlSerializer(object):
links.append(link)
element.append(links)
+ def _populate_truncated(self, element, truncated_value):
+ truncated = etree.Element('truncated')
+ self._populate_bool(truncated, 'truncated', truncated_value)
+ element.append(truncated)
+
def _populate_list(self, element, k, v):
"""Populates an element with a key & list value."""
# spec has a lot of inconsistency here!
diff --git a/keystone/common/sql/__init__.py b/keystone/common/sql/__init__.py
index ee65b12d8..863d826fe 100644
--- a/keystone/common/sql/__init__.py
+++ b/keystone/common/sql/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/common/sql/core.py b/keystone/common/sql/core.py
index 4053ee221..dbb1172b4 100644
--- a/keystone/common/sql/core.py
+++ b/keystone/common/sql/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -44,14 +42,18 @@ ModelBase = declarative.declarative_base()
Column = sql.Column
Index = sql.Index
String = sql.String
+Integer = sql.Integer
+Enum = sql.Enum
ForeignKey = sql.ForeignKey
DateTime = sql.DateTime
IntegrityError = sql.exc.IntegrityError
+DBDuplicateEntry = db_exception.DBDuplicateEntry
OperationalError = sql.exc.OperationalError
NotFound = sql.orm.exc.NoResultFound
Boolean = sql.Boolean
Text = sql.Text
UniqueConstraint = sql.UniqueConstraint
+PrimaryKeyConstraint = sql.PrimaryKeyConstraint
relationship = sql.orm.relationship
joinedload = sql.orm.joinedload
# Suppress flake8's unused import warning for flag_modified:
@@ -146,6 +148,19 @@ class DictBase(models.ModelBase):
return getattr(self, key)
+class ModelDictMixin(object):
+
+ @classmethod
+ def from_dict(cls, d):
+ """Returns a model instance from a dictionary."""
+ return cls(**d)
+
+ def to_dict(self):
+ """Returns the model's attributes as a dictionary."""
+ names = (column.name for column in self.__table__.columns)
+ return dict((name, getattr(self, name)) for name in names)
+
+
@contextlib.contextmanager
def transaction(expire_on_commit=False):
"""Return a SQLAlchemy session in a scoped transaction."""
@@ -154,112 +169,186 @@ def transaction(expire_on_commit=False):
yield session
-# Backends
-class Base(object):
- def _filter(self, model, query, hints):
- """Applies filtering to a query.
+def truncated(f):
+ """Ensure list truncation is detected in Driver list entity methods.
+
+ This is designed to wrap and sql Driver list_{entity} methods in order to
+ calculate if the resultant list has been truncated. Provided a limit dict
+ is found in the hints list, we increment the limit by one so as to ask the
+ wrapped function for one more entity than the limit, and then once the list
+ has been generated, we check to see if the original limit has been
+ exceeded, in which case we truncate back to that limit and set the
+ 'truncated' boolean to 'true' in the hints limit dict.
+
+ """
+ @functools.wraps(f)
+ def wrapper(self, hints, *args, **kwargs):
+ if not hasattr(hints, 'get_limit'):
+ raise exception.UnexpectedError(
+ _('Cannot truncate a driver call without hints list as '
+ 'first parameter after self '))
+
+ limit_dict = hints.get_limit()
+ if limit_dict is None:
+ return f(self, hints, *args, **kwargs)
+
+ # A limit is set, so ask for one more entry than we need
+ list_limit = limit_dict['limit']
+ hints.set_limit(list_limit + 1)
+ ref_list = f(self, hints, *args, **kwargs)
+
+ # If we got more than the original limit then trim back the list and
+ # mark it truncated. In both cases, make sure we set the limit back
+ # to its original value.
+ if len(ref_list) > list_limit:
+ hints.set_limit(list_limit, truncated=True)
+ return ref_list[:list_limit]
+ else:
+ hints.set_limit(list_limit)
+ return ref_list
+ return wrapper
+
+
+def _filter(model, query, hints):
+ """Applies filtering to a query.
+
+ :param model: the table model in question
+ :param query: query to apply filters to
+ :param hints: contains the list of filters yet to be satisfied.
+ Any filters satisfied here will be removed so that
+ the caller will know if any filters remain.
+
+ :returns query: query, updated with any filters satisfied
+
+ """
+ def inexact_filter(model, query, filter_, hints):
+ """Applies an inexact filter to a query.
:param model: the table model in question
:param query: query to apply filters to
+ :param filter_: the dict that describes this filter
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
- :returns query: query, updated with any filters satisfied
+ :returns query: query updated to add any inexact filters we could
+ satisfy
"""
- def inexact_filter(model, query, filter_, hints):
- """Applies an inexact filter to a query.
-
- :param model: the table model in question
- :param query: query to apply filters to
- :param filter_: the dict that describes this filter
- :param hints: contains the list of filters yet to be satisfied.
- Any filters satisfied here will be removed so that
- the caller will know if any filters remain.
-
- :returns query: query updated to add any inexact filters we could
- satisfy
-
- """
- column_attr = getattr(model, filter_['name'])
-
- # TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity
- # so once we find a way of changing that (maybe on a call-by-call
- # basis), we can add support for the case sensitive versions of
- # the filters below. For now, these case sensitive versions will
- # be handled at the controller level.
-
- if filter_['case_sensitive']:
- return query
-
- if filter_['comparator'] == 'contains':
- query_term = column_attr.ilike('%%%s%%' % filter_['value'])
- elif filter_['comparator'] == 'startswith':
- query_term = column_attr.ilike('%s%%' % filter_['value'])
- elif filter_['comparator'] == 'endswith':
- query_term = column_attr.ilike('%%%s' % filter_['value'])
- else:
- # It's a filter we don't understand, so let the caller
- # work out if they need to do something with it.
- return query
-
- hints.remove(filter_)
- return query.filter(query_term)
-
- def exact_filter(model, filter_, cumlative_filter_dict, hints):
- """Applies an exact filter to a query.
-
- :param model: the table model in question
- :param filter_: the dict that describes this filter
- :param cumlative_filter_dict: a dict that describes the set of
- exact filters built up so far
- :param hints: contains the list of filters yet to be satisfied.
- Any filters satisfied here will be removed so that
- the caller will know if any filters remain.
-
- :returns cumlative_filter_dict: updated cumulative dict
-
- """
- key = filter_['name']
- if isinstance(getattr(model, key).property.columns[0].type,
- sql.types.Boolean):
- filter_dict[key] = utils.attr_as_boolean(filter_['value'])
- else:
- filter_dict[key] = filter_['value']
- hints.remove(filter_)
- return filter_dict
-
- filter_dict = {}
-
- for filter_ in hints.filters():
- # TODO(henry-nash): Check if name is valid column, if not skip
- if filter_['comparator'] == 'equals':
- filter_dict = exact_filter(model, filter_, filter_dict, hints)
- else:
- query = inexact_filter(model, query, filter_, hints)
-
- # Apply any exact filters we built up
- if filter_dict:
- query = query.filter_by(**filter_dict)
-
- return query
-
- def filter_query(self, model, query, hints):
- """Applies filtering to a query.
+ column_attr = getattr(model, filter_['name'])
+
+ # TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity
+ # so once we find a way of changing that (maybe on a call-by-call
+ # basis), we can add support for the case sensitive versions of
+ # the filters below. For now, these case sensitive versions will
+ # be handled at the controller level.
+
+ if filter_['case_sensitive']:
+ return query
+
+ if filter_['comparator'] == 'contains':
+ query_term = column_attr.ilike('%%%s%%' % filter_['value'])
+ elif filter_['comparator'] == 'startswith':
+ query_term = column_attr.ilike('%s%%' % filter_['value'])
+ elif filter_['comparator'] == 'endswith':
+ query_term = column_attr.ilike('%%%s' % filter_['value'])
+ else:
+ # It's a filter we don't understand, so let the caller
+ # work out if they need to do something with it.
+ return query
+
+ hints.remove(filter_)
+ return query.filter(query_term)
+
+ def exact_filter(model, filter_, cumulative_filter_dict, hints):
+ """Applies an exact filter to a query.
- :param model: table model
- :param query: query to apply filters to
+ :param model: the table model in question
+ :param filter_: the dict that describes this filter
+ :param cumulative_filter_dict: a dict that describes the set of
+ exact filters built up so far
:param hints: contains the list of filters yet to be satisfied.
Any filters satisfied here will be removed so that
the caller will know if any filters remain.
- :returns updated query
+ :returns: updated cumulative dict
"""
- if hints is not None:
- query = self._filter(model, query, hints)
+ key = filter_['name']
+ if isinstance(getattr(model, key).property.columns[0].type,
+ sql.types.Boolean):
+ cumulative_filter_dict[key] = (
+ utils.attr_as_boolean(filter_['value']))
+ else:
+ cumulative_filter_dict[key] = filter_['value']
+ hints.remove(filter_)
+ return cumulative_filter_dict
+
+ filter_dict = {}
+
+ for filter_ in hints.filters():
+ # TODO(henry-nash): Check if name is valid column, if not skip
+ if filter_['comparator'] == 'equals':
+ filter_dict = exact_filter(model, filter_, filter_dict, hints)
+ else:
+ query = inexact_filter(model, query, filter_, hints)
+
+ # Apply any exact filters we built up
+ if filter_dict:
+ query = query.filter_by(**filter_dict)
+
+ return query
+
+
+def _limit(query, hints):
+ """Applies a limit to a query.
+
+ :param query: query to apply filters to
+ :param hints: contains the list of filters and limit details.
+
+ :returns updated query
+
+ """
+ # NOTE(henry-nash): If we were to implement pagination, then we
+ # we would expand this method to support pagination and limiting.
+
+ # If we satisfied all the filters, set an upper limit if supplied
+ list_limit = hints.get_limit()
+ if list_limit:
+ query = query.limit(list_limit['limit'])
+ return query
+
+
+def filter_limit_query(model, query, hints):
+ """Applies filtering and limit to a query.
+
+ :param model: table model
+ :param query: query to apply filters to
+ :param hints: contains the list of filters and limit details. This may
+ be None, indicating that there are no filters or limits
+ to be applied. If it's not None, then any filters
+ satisfied here will be removed so that the caller will
+ know if any filters remain.
+
+ :returns: updated query
+
+ """
+ if hints is None:
+ return query
+
+ # First try and satisfy any filters
+ query = _filter(model, query, hints)
+
+ # NOTE(henry-nash): Any unsatisfied filters will have been left in
+ # the hints list for the controller to handle. We can only try and
+ # limit here if all the filters are already satisfied since, if not,
+ # doing so might mess up the final results. If there are still
+ # unsatisfied filters, we have to leave any limiting to the controller
+ # as well.
+ if not hints.filters():
+ return _limit(query, hints)
+ else:
return query
@@ -271,13 +360,14 @@ def handle_conflicts(conflict_type='object'):
try:
return method(*args, **kwargs)
except db_exception.DBDuplicateEntry as e:
- raise exception.Conflict(type=conflict_type, details=str(e))
+ raise exception.Conflict(type=conflict_type,
+ details=six.text_type(e))
except db_exception.DBError as e:
# TODO(blk-u): inspecting inner_exception breaks encapsulation;
# oslo.db should provide exception we need.
if isinstance(e.inner_exception, IntegrityError):
raise exception.Conflict(type=conflict_type,
- details=str(e))
+ details=six.text_type(e))
raise
return wrapper
diff --git a/keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py b/keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
index 64fff312c..db8779ff5 100644
--- a/keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
+++ b/keystone/common/sql/migrate_repo/versions/001_add_initial_tables.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/002_token_id_hash.py b/keystone/common/sql/migrate_repo/versions/002_token_id_hash.py
index 4d38b525d..d2b6d4730 100644
--- a/keystone/common/sql/migrate_repo/versions/002_token_id_hash.py
+++ b/keystone/common/sql/migrate_repo/versions/002_token_id_hash.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/003_token_valid.py b/keystone/common/sql/migrate_repo/versions/003_token_valid.py
index c8df800cc..5fefc0515 100644
--- a/keystone/common/sql/migrate_repo/versions/003_token_valid.py
+++ b/keystone/common/sql/migrate_repo/versions/003_token_valid.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/004_undo_token_id_hash.py b/keystone/common/sql/migrate_repo/versions/004_undo_token_id_hash.py
index 8cfad79f4..cfe1fce81 100644
--- a/keystone/common/sql/migrate_repo/versions/004_undo_token_id_hash.py
+++ b/keystone/common/sql/migrate_repo/versions/004_undo_token_id_hash.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/005_set_utf8_character_set.py b/keystone/common/sql/migrate_repo/versions/005_set_utf8_character_set.py
index 1f1ff2af2..72cac71f3 100644
--- a/keystone/common/sql/migrate_repo/versions/005_set_utf8_character_set.py
+++ b/keystone/common/sql/migrate_repo/versions/005_set_utf8_character_set.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/006_add_policy_table.py b/keystone/common/sql/migrate_repo/versions/006_add_policy_table.py
index 0be005b0b..62f87d427 100644
--- a/keystone/common/sql/migrate_repo/versions/006_add_policy_table.py
+++ b/keystone/common/sql/migrate_repo/versions/006_add_policy_table.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/007_add_domain_tables.py b/keystone/common/sql/migrate_repo/versions/007_add_domain_tables.py
index 6fda6ec8c..adf350b4c 100644
--- a/keystone/common/sql/migrate_repo/versions/007_add_domain_tables.py
+++ b/keystone/common/sql/migrate_repo/versions/007_add_domain_tables.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/008_create_default_domain.py b/keystone/common/sql/migrate_repo/versions/008_create_default_domain.py
index f28d2ee38..09e16cbf5 100644
--- a/keystone/common/sql/migrate_repo/versions/008_create_default_domain.py
+++ b/keystone/common/sql/migrate_repo/versions/008_create_default_domain.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/009_normalize_identity.py b/keystone/common/sql/migrate_repo/versions/009_normalize_identity.py
index dec7bbb47..b41b5172a 100644
--- a/keystone/common/sql/migrate_repo/versions/009_normalize_identity.py
+++ b/keystone/common/sql/migrate_repo/versions/009_normalize_identity.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/010_normalize_identity_migration.py b/keystone/common/sql/migrate_repo/versions/010_normalize_identity_migration.py
index 9c7485b0c..41b609ad7 100644
--- a/keystone/common/sql/migrate_repo/versions/010_normalize_identity_migration.py
+++ b/keystone/common/sql/migrate_repo/versions/010_normalize_identity_migration.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/011_endpoints_v3.py b/keystone/common/sql/migrate_repo/versions/011_endpoints_v3.py
index 08eb5821e..d9dc00acb 100644
--- a/keystone/common/sql/migrate_repo/versions/011_endpoints_v3.py
+++ b/keystone/common/sql/migrate_repo/versions/011_endpoints_v3.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/012_populate_endpoint_type.py b/keystone/common/sql/migrate_repo/versions/012_populate_endpoint_type.py
index e1e380cde..1759e16c1 100644
--- a/keystone/common/sql/migrate_repo/versions/012_populate_endpoint_type.py
+++ b/keystone/common/sql/migrate_repo/versions/012_populate_endpoint_type.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/013_drop_legacy_endpoints.py b/keystone/common/sql/migrate_repo/versions/013_drop_legacy_endpoints.py
index 93c731a3a..ca83faa29 100644
--- a/keystone/common/sql/migrate_repo/versions/013_drop_legacy_endpoints.py
+++ b/keystone/common/sql/migrate_repo/versions/013_drop_legacy_endpoints.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/014_add_group_tables.py b/keystone/common/sql/migrate_repo/versions/014_add_group_tables.py
index 0e15874a5..5fdf3d444 100644
--- a/keystone/common/sql/migrate_repo/versions/014_add_group_tables.py
+++ b/keystone/common/sql/migrate_repo/versions/014_add_group_tables.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/015_tenant_to_project.py b/keystone/common/sql/migrate_repo/versions/015_tenant_to_project.py
index 66108311c..7338aaee7 100644
--- a/keystone/common/sql/migrate_repo/versions/015_tenant_to_project.py
+++ b/keystone/common/sql/migrate_repo/versions/015_tenant_to_project.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py b/keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py
index 5c0586e62..22bdc7078 100644
--- a/keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py
+++ b/keystone/common/sql/migrate_repo/versions/016_normalize_domain_ids.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
diff --git a/keystone/common/sql/migrate_repo/versions/017_membership_role.py b/keystone/common/sql/migrate_repo/versions/017_membership_role.py
index 2c1771f9b..64c9c4740 100644
--- a/keystone/common/sql/migrate_repo/versions/017_membership_role.py
+++ b/keystone/common/sql/migrate_repo/versions/017_membership_role.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/018_add_trust_tables.py b/keystone/common/sql/migrate_repo/versions/018_add_trust_tables.py
index 99f165eae..cec39a74c 100644
--- a/keystone/common/sql/migrate_repo/versions/018_add_trust_tables.py
+++ b/keystone/common/sql/migrate_repo/versions/018_add_trust_tables.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/019_fixup_role.py b/keystone/common/sql/migrate_repo/versions/019_fixup_role.py
index a9454e4ee..bbd0cae5d 100644
--- a/keystone/common/sql/migrate_repo/versions/019_fixup_role.py
+++ b/keystone/common/sql/migrate_repo/versions/019_fixup_role.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py b/keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py
index cfde23738..05237c0d9 100644
--- a/keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py
+++ b/keystone/common/sql/migrate_repo/versions/020_migrate_metadata_table_roles.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py b/keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py
index 1de51e1b8..b5dd3eacc 100644
--- a/keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py
+++ b/keystone/common/sql/migrate_repo/versions/021_add_trust_to_token.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py b/keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py
index 70570002b..58737f579 100644
--- a/keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py
+++ b/keystone/common/sql/migrate_repo/versions/022_move_legacy_endpoint_id.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/023_drop_credential_constraints.py b/keystone/common/sql/migrate_repo/versions/023_drop_credential_constraints.py
index 99967e6a9..689384269 100644
--- a/keystone/common/sql/migrate_repo/versions/023_drop_credential_constraints.py
+++ b/keystone/common/sql/migrate_repo/versions/023_drop_credential_constraints.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/024_add_index_to_expires.py b/keystone/common/sql/migrate_repo/versions/024_add_index_to_expires.py
index ffb827e6e..24f98043d 100644
--- a/keystone/common/sql/migrate_repo/versions/024_add_index_to_expires.py
+++ b/keystone/common/sql/migrate_repo/versions/024_add_index_to_expires.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/025_add_index_to_valid.py b/keystone/common/sql/migrate_repo/versions/025_add_index_to_valid.py
index 3bb547c9b..62bee18aa 100644
--- a/keystone/common/sql/migrate_repo/versions/025_add_index_to_valid.py
+++ b/keystone/common/sql/migrate_repo/versions/025_add_index_to_valid.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/026_drop_user_group_constraints.py b/keystone/common/sql/migrate_repo/versions/026_drop_user_group_constraints.py
index 266085c4a..f6f236216 100644
--- a/keystone/common/sql/migrate_repo/versions/026_drop_user_group_constraints.py
+++ b/keystone/common/sql/migrate_repo/versions/026_drop_user_group_constraints.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/027_set_engine_mysql_innodb.py b/keystone/common/sql/migrate_repo/versions/027_set_engine_mysql_innodb.py
index c5e954f80..48d77f526 100644
--- a/keystone/common/sql/migrate_repo/versions/027_set_engine_mysql_innodb.py
+++ b/keystone/common/sql/migrate_repo/versions/027_set_engine_mysql_innodb.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/028_fixup_group_metadata.py b/keystone/common/sql/migrate_repo/versions/028_fixup_group_metadata.py
index d1ca484ca..61fce39eb 100644
--- a/keystone/common/sql/migrate_repo/versions/028_fixup_group_metadata.py
+++ b/keystone/common/sql/migrate_repo/versions/028_fixup_group_metadata.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/029_update_assignment_metadata.py b/keystone/common/sql/migrate_repo/versions/029_update_assignment_metadata.py
index 82e992b73..a02f9d8a1 100644
--- a/keystone/common/sql/migrate_repo/versions/029_update_assignment_metadata.py
+++ b/keystone/common/sql/migrate_repo/versions/029_update_assignment_metadata.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/030_drop_credential_constraint_sqlite.py b/keystone/common/sql/migrate_repo/versions/030_drop_credential_constraint_sqlite.py
index 5d6e3d26d..aff1713fa 100644
--- a/keystone/common/sql/migrate_repo/versions/030_drop_credential_constraint_sqlite.py
+++ b/keystone/common/sql/migrate_repo/versions/030_drop_credential_constraint_sqlite.py
@@ -1,6 +1,3 @@
-
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/031_drop_credential_indexes.py b/keystone/common/sql/migrate_repo/versions/031_drop_credential_indexes.py
index 89ca04f02..37943aff0 100644
--- a/keystone/common/sql/migrate_repo/versions/031_drop_credential_indexes.py
+++ b/keystone/common/sql/migrate_repo/versions/031_drop_credential_indexes.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/032_username_length.py b/keystone/common/sql/migrate_repo/versions/032_username_length.py
index 052b62ca2..26530eb6b 100644
--- a/keystone/common/sql/migrate_repo/versions/032_username_length.py
+++ b/keystone/common/sql/migrate_repo/versions/032_username_length.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/033_migrate_ec2credentials_table_credentials.py b/keystone/common/sql/migrate_repo/versions/033_migrate_ec2credentials_table_credentials.py
index e389e9bb3..080c84ce3 100644
--- a/keystone/common/sql/migrate_repo/versions/033_migrate_ec2credentials_table_credentials.py
+++ b/keystone/common/sql/migrate_repo/versions/033_migrate_ec2credentials_table_credentials.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/034_add_default_project_id_column_to_user.py b/keystone/common/sql/migrate_repo/versions/034_add_default_project_id_column_to_user.py
index def59d286..17ad21713 100644
--- a/keystone/common/sql/migrate_repo/versions/034_add_default_project_id_column_to_user.py
+++ b/keystone/common/sql/migrate_repo/versions/034_add_default_project_id_column_to_user.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/035_add_compound_revoked_token_index.py b/keystone/common/sql/migrate_repo/versions/035_add_compound_revoked_token_index.py
index bcce9d4b3..171681919 100644
--- a/keystone/common/sql/migrate_repo/versions/035_add_compound_revoked_token_index.py
+++ b/keystone/common/sql/migrate_repo/versions/035_add_compound_revoked_token_index.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/036_token_drop_valid_index.py b/keystone/common/sql/migrate_repo/versions/036_token_drop_valid_index.py
index 6bf3c5e93..89e03b750 100644
--- a/keystone/common/sql/migrate_repo/versions/036_token_drop_valid_index.py
+++ b/keystone/common/sql/migrate_repo/versions/036_token_drop_valid_index.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/037_add_region_table.py b/keystone/common/sql/migrate_repo/versions/037_add_region_table.py
index e1f2e659f..08413e2d9 100644
--- a/keystone/common/sql/migrate_repo/versions/037_add_region_table.py
+++ b/keystone/common/sql/migrate_repo/versions/037_add_region_table.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/sql/migrate_repo/versions/038_add_assignment_table.py b/keystone/common/sql/migrate_repo/versions/038_add_assignment_table.py
new file mode 100644
index 000000000..92e9faab2
--- /dev/null
+++ b/keystone/common/sql/migrate_repo/versions/038_add_assignment_table.py
@@ -0,0 +1,51 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+from keystone.assignment.backends import sql as assignment_sql
+
+ASSIGNMENT_TABLE = 'assignment'
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ sql.Table('role', meta, autoload=True)
+ assignment_table = sql.Table(
+ ASSIGNMENT_TABLE,
+ meta,
+ sql.Column('type', sql.Enum(
+ assignment_sql.AssignmentType.USER_PROJECT,
+ assignment_sql.AssignmentType.GROUP_PROJECT,
+ assignment_sql.AssignmentType.USER_DOMAIN,
+ assignment_sql.AssignmentType.GROUP_DOMAIN,
+ name='type'),
+ nullable=False),
+ sql.Column('actor_id', sql.String(64), nullable=False),
+ sql.Column('target_id', sql.String(64), nullable=False),
+ sql.Column('role_id', sql.String(64), sql.ForeignKey('role.id'),
+ nullable=False),
+ sql.Column('inherited', sql.Boolean, default=False, nullable=False),
+ sql.PrimaryKeyConstraint('type', 'actor_id', 'target_id', 'role_id'))
+ assignment_table.create(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ assignment = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
+ assignment.drop(migrate_engine, checkfirst=True)
diff --git a/keystone/common/sql/migrate_repo/versions/039_grant_to_assignment.py b/keystone/common/sql/migrate_repo/versions/039_grant_to_assignment.py
new file mode 100644
index 000000000..df4985839
--- /dev/null
+++ b/keystone/common/sql/migrate_repo/versions/039_grant_to_assignment.py
@@ -0,0 +1,231 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import sqlalchemy as sql
+
+from keystone.assignment.backends import sql as assignment_sql
+
+USER_PROJECT_TABLE = 'user_project_metadata'
+GROUP_PROJECT_TABLE = 'group_project_metadata'
+USER_DOMAIN_TABLE = 'user_domain_metadata'
+GROUP_DOMAIN_TABLE = 'group_domain_metadata'
+
+ASSIGNMENT_TABLE = 'assignment'
+
+GRANT_TABLES = [USER_PROJECT_TABLE, USER_DOMAIN_TABLE,
+ GROUP_PROJECT_TABLE, GROUP_DOMAIN_TABLE]
+
+
+def migrate_grant_table(meta, migrate_engine, session, table_name):
+
+ def extract_actor_and_target(table_name, composite_grant):
+ if table_name == USER_PROJECT_TABLE:
+ return {'type': assignment_sql.AssignmentType.USER_PROJECT,
+ 'actor_id': composite_grant.user_id,
+ 'target_id': composite_grant.project_id}
+ elif table_name == GROUP_PROJECT_TABLE:
+ return {'type': assignment_sql.AssignmentType.GROUP_PROJECT,
+ 'actor_id': composite_grant.group_id,
+ 'target_id': composite_grant.project_id}
+ elif table_name == USER_DOMAIN_TABLE:
+ return {'type': assignment_sql.AssignmentType.USER_DOMAIN,
+ 'actor_id': composite_grant.user_id,
+ 'target_id': composite_grant.domain_id}
+ else:
+ return {'type': assignment_sql.AssignmentType.GROUP_DOMAIN,
+ 'actor_id': composite_grant.group_id,
+ 'target_id': composite_grant.domain_id}
+
+ def grant_to_grant_dict_list(table_name, composite_grant):
+ """Make each role in the list of this entry a separate assignment."""
+ json_metadata = json.loads(composite_grant.data)
+ role_dict_list = []
+ if 'roles' in json_metadata:
+ for x in json_metadata['roles']:
+ if x.get('id') is None:
+ # Looks like an invalid role, drop it
+ break
+ grant = extract_actor_and_target(table_name, composite_grant)
+ grant['role_id'] = x.get('id')
+ grant['inherited'] = False
+ if x.get('inherited_to') == 'projects':
+ grant['inherited'] = True
+ role_dict_list.append(grant)
+ return role_dict_list
+
+ upgrade_table = sql.Table(table_name, meta, autoload=True)
+ assignment_table = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
+
+ # For each grant in this table, expand it out to be an assignment entry for
+ # each role in the metadata
+ for grant in session.query(upgrade_table).all():
+ for grant_role in grant_to_grant_dict_list(table_name, grant):
+ new_entry = assignment_table.insert().values(
+ type=grant_role['type'],
+ actor_id=grant_role['actor_id'],
+ target_id=grant_role['target_id'],
+ role_id=grant_role['role_id'],
+ inherited=grant_role['inherited'])
+ migrate_engine.execute(new_entry)
+
+ # Delete all the rows
+ migrate_engine.execute(upgrade_table.delete())
+
+
+def downgrade_assignment_table(meta, migrate_engine):
+
+ def add_to_dict_list(metadata, assignment_row):
+ """Update a metadata dict list with the role.
+
+ For the assignment row supplied, we need to append the role_id into
+ the metadata list of dicts. If the row is inherited, then we mark
+ it so in the dict we append.
+
+ """
+ new_entry = {'id': assignment_row.role_id}
+ if assignment_row.inherited and (
+ assignment_row.type ==
+ assignment_sql.AssignmentType.USER_DOMAIN or
+ assignment_row.type ==
+ assignment_sql.AssignmentType.GROUP_DOMAIN):
+ new_entry['inherited_to'] = 'projects'
+
+ if metadata is not None:
+ json_metadata = json.loads(metadata)
+ else:
+ json_metadata = {}
+
+ if json_metadata.get('roles') is None:
+ json_metadata['roles'] = []
+
+ json_metadata['roles'].append(new_entry)
+ return json.dumps(json_metadata)
+
+ def build_user_project_entry(meta, session, row):
+ update_table = sql.Table(USER_PROJECT_TABLE, meta, autoload=True)
+ q = session.query(update_table)
+ q = q.filter_by(user_id=row.actor_id)
+ q = q.filter_by(project_id=row.target_id)
+ ref = q.first()
+ if ref is not None:
+ values = {'data': add_to_dict_list(ref.data, row)}
+ update = update_table.update().where(
+ update_table.c.user_id == ref.user_id).where(
+ update_table.c.project_id == ref.project_id).values(values)
+ else:
+ values = {'user_id': row.actor_id,
+ 'project_id': row.target_id,
+ 'data': add_to_dict_list(None, row)}
+ update = update_table.insert().values(values)
+ return update
+
+ def build_group_project_entry(meta, session, row):
+ update_table = sql.Table(GROUP_PROJECT_TABLE, meta, autoload=True)
+ q = session.query(update_table)
+ q = q.filter_by(group_id=row.actor_id)
+ q = q.filter_by(project_id=row.target_id)
+ ref = q.first()
+ if ref is not None:
+ values = {'data': add_to_dict_list(ref.data, row)}
+ update = update_table.update().where(
+ update_table.c.group_id == ref.group_id).where(
+ update_table.c.project_id == ref.project_id).values(values)
+ else:
+ values = {'group_id': row.actor_id,
+ 'project_id': row.target_id,
+ 'data': add_to_dict_list(None, row)}
+ update = update_table.insert().values(values)
+ return update
+
+ def build_user_domain_entry(meta, session, row):
+ update_table = sql.Table(USER_DOMAIN_TABLE, meta, autoload=True)
+ q = session.query(update_table)
+ q = q.filter_by(user_id=row.actor_id)
+ q = q.filter_by(domain_id=row.target_id)
+ ref = q.first()
+ if ref is not None:
+ values = {'data': add_to_dict_list(ref.data, row)}
+ update = update_table.update().where(
+ update_table.c.user_id == ref.user_id).where(
+ update_table.c.domain_id == ref.domain_id).values(values)
+ else:
+ values = {'user_id': row.actor_id,
+ 'domain_id': row.target_id,
+ 'data': add_to_dict_list(None, row)}
+ update = update_table.insert().values(values)
+ return update
+
+ def build_group_domain_entry(meta, session, row):
+ update_table = sql.Table(GROUP_DOMAIN_TABLE, meta, autoload=True)
+ q = session.query(update_table)
+ q = q.filter_by(group_id=row.actor_id)
+ q = q.filter_by(domain_id=row.target_id)
+ ref = q.first()
+ if ref is not None:
+ values = {'data': add_to_dict_list(ref.data, row)}
+ update = update_table.update().where(
+ update_table.c.group_id == ref.group_id).where(
+ update_table.c.domain_id == ref.domain_id).values(values)
+ else:
+ values = {'group_id': row.actor_id,
+ 'domain_id': row.target_id,
+ 'data': add_to_dict_list(None, row)}
+ update = update_table.insert().values(values)
+ return update
+
+ def build_update(meta, session, row):
+ """Build an update or an insert to the correct metadata table."""
+ if row.type == assignment_sql.AssignmentType.USER_PROJECT:
+ return build_user_project_entry(meta, session, row)
+ elif row.type == assignment_sql.AssignmentType.GROUP_PROJECT:
+ return build_group_project_entry(meta, session, row)
+ elif row.type == assignment_sql.AssignmentType.USER_DOMAIN:
+ return build_user_domain_entry(meta, session, row)
+ elif row.type == assignment_sql.AssignmentType.GROUP_DOMAIN:
+ return build_group_domain_entry(meta, session, row)
+ # If the row type doesn't match any that we understand we drop
+ # the data.
+
+ session = sql.orm.sessionmaker(bind=migrate_engine)()
+ downgrade_table = sql.Table(ASSIGNMENT_TABLE, meta, autoload=True)
+ for assignment in session.query(downgrade_table).all():
+ update = build_update(meta, session, assignment)
+ if update is not None:
+ migrate_engine.execute(update)
+
+ # Delete all the rows
+ migrate_engine.execute(downgrade_table.delete())
+
+ session.commit()
+ session.close()
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ session = sql.orm.sessionmaker(bind=migrate_engine)()
+ for table_name in GRANT_TABLES:
+ migrate_grant_table(meta, migrate_engine, session, table_name)
+ session.commit()
+ session.close()
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ downgrade_assignment_table(meta, migrate_engine)
diff --git a/keystone/common/sql/migrate_repo/versions/040_drop_grant_tables.py b/keystone/common/sql/migrate_repo/versions/040_drop_grant_tables.py
new file mode 100644
index 000000000..58c0f7dc5
--- /dev/null
+++ b/keystone/common/sql/migrate_repo/versions/040_drop_grant_tables.py
@@ -0,0 +1,106 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sqlalchemy as sql
+
+USER_PROJECT_TABLE = 'user_project_metadata'
+GROUP_PROJECT_TABLE = 'group_project_metadata'
+USER_DOMAIN_TABLE = 'user_domain_metadata'
+GROUP_DOMAIN_TABLE = 'group_domain_metadata'
+
+GRANT_TABLES = [USER_PROJECT_TABLE, USER_DOMAIN_TABLE,
+ GROUP_PROJECT_TABLE, GROUP_DOMAIN_TABLE]
+
+
+def recreate_grant_tables(meta, migrate_engine):
+ sql.Table('user', meta, autoload=True)
+ sql.Table('group', meta, autoload=True)
+ sql.Table('project', meta, autoload=True)
+ sql.Table('domain', meta, autoload=True)
+
+ user_project_metadata_table = sql.Table(
+ USER_PROJECT_TABLE,
+ meta,
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ primary_key=True),
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ sql.ForeignKey('project.id'),
+ primary_key=True),
+ sql.Column('data', sql.Text()))
+ user_project_metadata_table.create(migrate_engine, checkfirst=True)
+
+ group_project_metadata_table = sql.Table(
+ GROUP_PROJECT_TABLE,
+ meta,
+ sql.Column(
+ 'group_id',
+ sql.String(64),
+ primary_key=True),
+ sql.Column(
+ 'project_id',
+ sql.String(64),
+ sql.ForeignKey('project.id'),
+ primary_key=True),
+ sql.Column('data', sql.Text()))
+ group_project_metadata_table.create(migrate_engine, checkfirst=True)
+
+ user_domain_metadata_table = sql.Table(
+ USER_DOMAIN_TABLE,
+ meta,
+ sql.Column(
+ 'user_id',
+ sql.String(64),
+ primary_key=True),
+ sql.Column(
+ 'domain_id',
+ sql.String(64),
+ sql.ForeignKey('domain.id'),
+ primary_key=True),
+ sql.Column('data', sql.Text()))
+ user_domain_metadata_table.create(migrate_engine, checkfirst=True)
+
+ group_domain_metadata_table = sql.Table(
+ GROUP_DOMAIN_TABLE,
+ meta,
+ sql.Column(
+ 'group_id',
+ sql.String(64),
+ primary_key=True),
+ sql.Column(
+ 'domain_id',
+ sql.String(64),
+ sql.ForeignKey('domain.id'),
+ primary_key=True),
+ sql.Column('data', sql.Text()))
+ group_domain_metadata_table.create(migrate_engine, checkfirst=True)
+
+
+def upgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ for table_name in GRANT_TABLES:
+ grant_table = sql.Table(table_name, meta, autoload=True)
+ grant_table.drop(migrate_engine, checkfirst=True)
+
+
+def downgrade(migrate_engine):
+ meta = sql.MetaData()
+ meta.bind = migrate_engine
+
+ recreate_grant_tables(meta, migrate_engine)
diff --git a/keystone/common/sql/migration.py b/keystone/common/sql/migration.py
index c96fdf062..f4cd0c77e 100644
--- a/keystone/common/sql/migration.py
+++ b/keystone/common/sql/migration.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -17,11 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
import sys
from migrate.versioning import api as versioning_api
+from keystone.common.sql import migration_helpers
from keystone import config
@@ -49,42 +47,27 @@ def migrate_repository(version, current_version, repo_path):
return result
-def db_sync(version=None, repo_path=None):
+def db_sync(version=None, package=None):
if version is not None:
try:
version = int(version)
except ValueError:
raise Exception(_('version should be an integer'))
- if repo_path is None:
- repo_path = find_migrate_repo()
- current_version = db_version(repo_path=repo_path)
+ repo_path = migration_helpers.find_migrate_repo(package=package)
+ current_version = db_version(package=package)
return migrate_repository(version, current_version, repo_path)
-def db_version(repo_path=None):
- if repo_path is None:
- repo_path = find_migrate_repo()
+def db_version(package=None):
+ repo_path = migration_helpers.find_migrate_repo(package=package)
try:
return versioning_api.db_version(CONF.database.connection, repo_path)
except versioning_exceptions.DatabaseNotControlledError:
- return db_version_control(0)
+ return db_version_control(version=0, package=package)
-def db_version_control(version=None, repo_path=None):
- if repo_path is None:
- repo_path = find_migrate_repo()
+def db_version_control(version=None, package=None):
+ repo_path = migration_helpers.find_migrate_repo(package=package)
versioning_api.version_control(CONF.database.connection, repo_path,
version)
return version
-
-
-def find_migrate_repo(package=None):
- """Get the path for the migrate repository."""
- if package is None:
- filename = __file__
- else:
- filename = package.__file__
- path = os.path.join(os.path.abspath(os.path.dirname(filename)),
- 'migrate_repo')
- assert os.path.exists(path)
- return path
diff --git a/keystone/common/sql/migration_helpers.py b/keystone/common/sql/migration_helpers.py
index 48fa188ce..df98bdb39 100644
--- a/keystone/common/sql/migration_helpers.py
+++ b/keystone/common/sql/migration_helpers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
@@ -15,9 +13,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+import os
+
import migrate
import sqlalchemy
+from keystone.common import sql
+from keystone import exception
+
# Different RDBMSs use different schemes for naming the Foreign Key
# Constraints. SQLAlchemy does not yet attempt to determine the name
@@ -93,3 +97,12 @@ def rename_tables_with_constraints(renames, constraints, engine):
if engine != 'sqlite':
add_constraints(constraints)
+
+
+def find_migrate_repo(package=None, repo_name='migrate_repo'):
+ package = package or sql
+ path = os.path.abspath(os.path.join(
+ os.path.dirname(package.__file__), repo_name))
+ if os.path.isdir(path):
+ return path
+ raise exception.MigrationNotProvided(package.__name__, path)
diff --git a/keystone/common/systemd.py b/keystone/common/systemd.py
index 52d7aff90..807b24184 100644
--- a/keystone/common/systemd.py
+++ b/keystone/common/systemd.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/common/utils.py b/keystone/common/utils.py
index 826e47992..a36e45303 100644
--- a/keystone/common/utils.py
+++ b/keystone/common/utils.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
diff --git a/keystone/common/wsgi.py b/keystone/common/wsgi.py
index 8ebf7aca1..1369bb521 100644
--- a/keystone/common/wsgi.py
+++ b/keystone/common/wsgi.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -102,16 +100,15 @@ def validate_token_bind(context, token_ref):
raise exception.Unauthorized()
-class Request(webob.Request):
- def best_match_language(self):
- """Determines the best available locale from the Accept-Language
- HTTP header passed in the request.
- """
+def best_match_language(req):
+ """Determines the best available locale from the Accept-Language
+ HTTP header passed in the request.
+ """
- if not self.accept_language:
- return None
- return self.accept_language.best_match(
- gettextutils.get_available_languages('keystone'))
+ if not req.accept_language:
+ return None
+ return req.accept_language.best_match(
+ gettextutils.get_available_languages('keystone'))
class BaseApplication(object):
@@ -145,7 +142,7 @@ class BaseApplication(object):
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
- @webob.dec.wsgify(RequestClass=Request)
+ @webob.dec.wsgify()
def __call__(self, req):
# Any of the following objects work as responses:
@@ -181,7 +178,7 @@ class BaseApplication(object):
@dependency.requires('assignment_api', 'policy_api', 'token_api')
class Application(BaseApplication):
- @webob.dec.wsgify(RequestClass=Request)
+ @webob.dec.wsgify()
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
@@ -216,18 +213,18 @@ class Application(BaseApplication):
LOG.warning(
_('Authorization failed. %(exception)s from %(remote_addr)s'),
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
- return render_exception(e, user_locale=req.best_match_language())
+ return render_exception(e, user_locale=best_match_language(req))
except exception.Error as e:
LOG.warning(e)
- return render_exception(e, user_locale=req.best_match_language())
+ return render_exception(e, user_locale=best_match_language(req))
except TypeError as e:
LOG.exception(e)
return render_exception(exception.ValidationError(e),
- user_locale=req.best_match_language())
+ user_locale=best_match_language(req))
except Exception as e:
LOG.exception(e)
return render_exception(exception.UnexpectedError(exception=e),
- user_locale=req.best_match_language())
+ user_locale=best_match_language(req))
if result is None:
return render_response(status=(204, 'No Content'))
@@ -362,7 +359,7 @@ class Middleware(Application):
"""Do whatever you'd like to the response, based on the request."""
return response
- @webob.dec.wsgify(RequestClass=Request)
+ @webob.dec.wsgify()
def __call__(self, request):
try:
response = self.process_request(request)
@@ -373,15 +370,15 @@ class Middleware(Application):
except exception.Error as e:
LOG.warning(e)
return render_exception(e,
- user_locale=request.best_match_language())
+ user_locale=best_match_language(request))
except TypeError as e:
LOG.exception(e)
return render_exception(exception.ValidationError(e),
- user_locale=request.best_match_language())
+ user_locale=best_match_language(request))
except Exception as e:
LOG.exception(e)
return render_exception(exception.UnexpectedError(exception=e),
- user_locale=request.best_match_language())
+ user_locale=best_match_language(request))
class Debug(Middleware):
@@ -392,7 +389,7 @@ class Debug(Middleware):
"""
- @webob.dec.wsgify(RequestClass=Request)
+ @webob.dec.wsgify()
def __call__(self, req):
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
@@ -456,7 +453,7 @@ class Router(object):
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
- @webob.dec.wsgify(RequestClass=Request)
+ @webob.dec.wsgify()
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
@@ -466,7 +463,7 @@ class Router(object):
return self._router
@staticmethod
- @webob.dec.wsgify(RequestClass=Request)
+ @webob.dec.wsgify()
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
@@ -479,7 +476,7 @@ class Router(object):
if not match:
return render_exception(
exception.NotFound(_('The resource could not be found.')),
- user_locale=req.best_match_language())
+ user_locale=best_match_language(req))
app = match['controller']
return app
diff --git a/keystone/config.py b/keystone/config.py
index 367c8eddd..8236afd45 100644
--- a/keystone/config.py
+++ b/keystone/config.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -28,6 +26,31 @@ setup_authentication = config.setup_authentication
configure = config.configure
+def set_default_for_default_log_levels():
+ """Set the default for the default_log_levels option for keystone.
+
+ Keystone uses some packages that other OpenStack services don't use that do
+ logging. This will set the default_log_levels default level for those
+ packages.
+
+ This function needs to be called before CONF().
+
+ """
+
+ extra_log_level_defaults = [
+ 'dogpile=INFO',
+ 'routes=INFO',
+ ]
+
+ def find_default_log_levels_opt():
+ for opt in log.log_opts:
+ if opt.dest == 'default_log_levels':
+ return opt
+
+ opt = find_default_log_levels_opt()
+ opt.default.extend(extra_log_level_defaults)
+
+
def setup_logging():
"""Sets up logging for the keystone package."""
log.setup('keystone')
diff --git a/keystone/contrib/access/__init__.py b/keystone/contrib/access/__init__.py
index 28e6c6510..afc7d422d 100644
--- a/keystone/contrib/access/__init__.py
+++ b/keystone/contrib/access/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2013 OpenStack Foundation
diff --git a/keystone/contrib/access/core.py b/keystone/contrib/access/core.py
index b0c6f7c0a..ff06bd504 100644
--- a/keystone/contrib/access/core.py
+++ b/keystone/contrib/access/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/admin_crud/__init__.py b/keystone/contrib/admin_crud/__init__.py
index 9511edc99..8b6af0e10 100644
--- a/keystone/contrib/admin_crud/__init__.py
+++ b/keystone/contrib/admin_crud/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/contrib/admin_crud/core.py b/keystone/contrib/admin_crud/core.py
index d5bc76528..342da5487 100644
--- a/keystone/contrib/admin_crud/core.py
+++ b/keystone/contrib/admin_crud/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/ec2/__init__.py b/keystone/contrib/ec2/__init__.py
index 18492c673..e7026fbb5 100644
--- a/keystone/contrib/ec2/__init__.py
+++ b/keystone/contrib/ec2/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/contrib/ec2/controllers.py b/keystone/contrib/ec2/controllers.py
index 44b1549ca..388ff860e 100644
--- a/keystone/contrib/ec2/controllers.py
+++ b/keystone/contrib/ec2/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -128,7 +126,7 @@ class Ec2Controller(controller.V2Controller):
# NOTE(morganfainberg): Make sure the data is in correct form since it
# might be consumed external to Keystone and this is a v2.0 controller.
# The token provider doesn't actually expect either v2 or v3 user data.
- user_ref = self.identity_api.v3_to_v2_user(user_ref)
+ user_ref = self.v3_to_v2_user(user_ref)
auth_token_data = dict(user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref,
@@ -180,7 +178,7 @@ class Ec2Controller(controller.V2Controller):
user_id=user_id)
return {'credentials':
[self._convert_v3_to_ec2_credential(credential)
- for credential in credential_refs]}
+ for credential in credential_refs]}
def get_credential(self, context, user_id, credential_id):
"""Retrieve a user's access/secret pair by the access key.
diff --git a/keystone/contrib/ec2/core.py b/keystone/contrib/ec2/core.py
index dffc8e2b3..9f1f85f02 100644
--- a/keystone/contrib/ec2/core.py
+++ b/keystone/contrib/ec2/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/ec2/routers.py b/keystone/contrib/ec2/routers.py
index 562bda589..e4f8e9552 100644
--- a/keystone/contrib/ec2/routers.py
+++ b/keystone/contrib/ec2/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/endpoint_filter/__init__.py b/keystone/contrib/endpoint_filter/__init__.py
index ce74bfdf3..43f4fb60b 100644
--- a/keystone/contrib/endpoint_filter/__init__.py
+++ b/keystone/contrib/endpoint_filter/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2013 OpenStack Foundation
diff --git a/keystone/contrib/endpoint_filter/backends/catalog_sql.py b/keystone/contrib/endpoint_filter/backends/catalog_sql.py
index 698f03efc..1f789a8c5 100644
--- a/keystone/contrib/endpoint_filter/backends/catalog_sql.py
+++ b/keystone/contrib/endpoint_filter/backends/catalog_sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/endpoint_filter/backends/sql.py b/keystone/contrib/endpoint_filter/backends/sql.py
index d0d02f1c9..14c5c528d 100644
--- a/keystone/contrib/endpoint_filter/backends/sql.py
+++ b/keystone/contrib/endpoint_filter/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,8 +13,10 @@
# under the License.
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
+from keystone.contrib import endpoint_filter
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
@@ -32,11 +32,12 @@ class ProjectEndpoint(sql.ModelBase, sql.DictBase):
nullable=False)
-class EndpointFilter(sql.Base):
+class EndpointFilter(object):
# Internal interface to manage the database
def db_sync(self, version=None):
- migration.db_sync(version=version)
+ abs_path = migration_helpers.find_migrate_repo(endpoint_filter)
+ migration.db_sync(abs_path, version=version)
@sql.handle_conflicts(conflict_type='project_endpoint')
def add_endpoint_to_project(self, endpoint_id, project_id):
diff --git a/keystone/contrib/endpoint_filter/controllers.py b/keystone/contrib/endpoint_filter/controllers.py
index 474b1f9e4..3168b9925 100644
--- a/keystone/contrib/endpoint_filter/controllers.py
+++ b/keystone/contrib/endpoint_filter/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/endpoint_filter/core.py b/keystone/contrib/endpoint_filter/core.py
index b94225ffd..8b95d0367 100644
--- a/keystone/contrib/endpoint_filter/core.py
+++ b/keystone/contrib/endpoint_filter/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -30,12 +28,12 @@ CONF = config.CONF
LOG = log.getLogger(__name__)
extension_data = {
- 'name': 'Openstack Keystone Endpoint Filter API',
+ 'name': 'OpenStack Keystone Endpoint Filter API',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-EP-FILTER/v1.0',
'alias': 'OS-EP-FILTER',
'updated': '2013-07-23T12:00:0-00:00',
- 'description': 'Openstack Keystone Endpoint Filter API.',
+ 'description': 'OpenStack Keystone Endpoint Filter API.',
'links': [
{
'rel': 'describedby',
diff --git a/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py b/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
index d3bf07516..090e7f471 100644
--- a/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
+++ b/keystone/contrib/endpoint_filter/migrate_repo/versions/001_add_endpoint_filtering_table.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/endpoint_filter/routers.py b/keystone/contrib/endpoint_filter/routers.py
index 8f7d48227..91f09bd05 100644
--- a/keystone/contrib/endpoint_filter/routers.py
+++ b/keystone/contrib/endpoint_filter/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/example/controllers.py b/keystone/contrib/example/controllers.py
index 204f88429..95b3e82f4 100644
--- a/keystone/contrib/example/controllers.py
+++ b/keystone/contrib/example/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/example/core.py b/keystone/contrib/example/core.py
index f66af2ba2..86d1e2644 100644
--- a/keystone/contrib/example/core.py
+++ b/keystone/contrib/example/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/example/migrate_repo/versions/001_example_table.py b/keystone/contrib/example/migrate_repo/versions/001_example_table.py
index 7f7de147a..10b7ccc7e 100644
--- a/keystone/contrib/example/migrate_repo/versions/001_example_table.py
+++ b/keystone/contrib/example/migrate_repo/versions/001_example_table.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/example/routers.py b/keystone/contrib/example/routers.py
index d7f502bc2..23ba4c2ca 100644
--- a/keystone/contrib/example/routers.py
+++ b/keystone/contrib/example/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/federation/__init__.py b/keystone/contrib/federation/__init__.py
index c12c04467..35bf9f312 100644
--- a/keystone/contrib/federation/__init__.py
+++ b/keystone/contrib/federation/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/federation/backends/sql.py b/keystone/contrib/federation/backends/sql.py
index f77da2d9a..b49181e74 100644
--- a/keystone/contrib/federation/backends/sql.py
+++ b/keystone/contrib/federation/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,9 +13,11 @@
# under the License.
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
+from keystone.contrib import federation
from keystone.contrib.federation import core
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
from keystone.openstack.common import jsonutils
@@ -88,10 +88,11 @@ class MappingModel(sql.ModelBase, sql.DictBase):
return d
-class Federation(sql.Base, core.Driver):
+class Federation(core.Driver):
def db_sync(self):
- migration.db_sync()
+ abs_path = migration_helpers.find_migrate_repo(federation)
+ migration.db_sync(abs_path)
# Identity Provider CRUD
@sql.handle_conflicts(conflict_type='identity_provider')
@@ -152,12 +153,6 @@ class Federation(sql.Base, core.Driver):
'idp_id': idp_id}
raise exception.FederatedProtocolNotFound(**kwargs)
- def _store_protocol(self, session, protocol_ref):
- try:
- session.add(protocol_ref)
- except sql.IntegrityError:
- raise exception.ValidationError()
-
@sql.handle_conflicts(conflict_type='federation_protocol')
def create_protocol(self, idp_id, protocol_id, protocol):
session = db_session.get_session()
diff --git a/keystone/contrib/federation/controllers.py b/keystone/contrib/federation/controllers.py
index 83c675827..554618ed4 100644
--- a/keystone/contrib/federation/controllers.py
+++ b/keystone/contrib/federation/controllers.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/federation/core.py b/keystone/contrib/federation/core.py
index 69e0b764b..4c8df4e61 100644
--- a/keystone/contrib/federation/core.py
+++ b/keystone/contrib/federation/core.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -36,13 +33,11 @@ EXTENSION_DATA = {
'alias': 'OS-FEDERATION',
'updated': '2013-12-17T12:00:0-00:00',
'description': 'OpenStack Identity Providers Mechanism.',
- 'links': [
- {
- 'rel': 'describedby',
- 'type': 'text/html',
- 'href': 'https://github.com/openstack/identity-api'
- }
- ]}
+ 'links': [{
+ 'rel': 'describedby',
+ 'type': 'text/html',
+ 'href': 'https://github.com/openstack/identity-api'
+ }]}
extension.register_admin_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
extension.register_public_extension(EXTENSION_DATA['alias'], EXTENSION_DATA)
diff --git a/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py b/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
index 4eac8bb88..1a522c206 100644
--- a/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
+++ b/keystone/contrib/federation/migrate_repo/versions/001_add_identity_provider_table.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py b/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
index 4e990a620..17bb8ded5 100644
--- a/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
+++ b/keystone/contrib/federation/migrate_repo/versions/002_add_mapping_tables.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/federation/routers.py b/keystone/contrib/federation/routers.py
index 39331627a..c38735707 100644
--- a/keystone/contrib/federation/routers.py
+++ b/keystone/contrib/federation/routers.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/federation/utils.py b/keystone/contrib/federation/utils.py
index f134da285..8c0753c87 100644
--- a/keystone/contrib/federation/utils.py
+++ b/keystone/contrib/federation/utils.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -15,9 +12,16 @@
"""Utilities for Federation Extension."""
+import re
+
import jsonschema
+import six
from keystone import exception
+from keystone.openstack.common import log
+
+
+LOG = log.getLogger(__name__)
MAPPING_SCHEMA = {
@@ -107,3 +111,272 @@ def validate_mapping_structure(ref):
if messages:
raise exception.ValidationError(messages)
+
+
+class RuleProcessor(object):
+ """A class to process assertions and mapping rules."""
+
+ class _EvalType(object):
+ """Mapping rule evaluation types."""
+ ANY_ONE_OF = 'any_one_of'
+ NOT_ANY_OF = 'not_any_of'
+
+ def __init__(self, rules):
+ """Initialize RuleProcessor.
+
+ Example rules can be found at:
+ :class:`keystone.tests.mapping_fixtures`
+
+ :param rules: rules from a mapping
+ :type rules: dict
+
+ """
+
+ self.rules = rules
+
+ def process(self, assertion_data):
+ """Transform assertion to a dictionary of user name and group ids
+ based on mapping rules.
+
+ This function will iterate through the mapping rules to find
+ assertions that are valid.
+
+ :param assertion_data: an assertion containing values from an IdP
+ :type assertion_data: dict
+
+ Example assertion_data::
+
+ {
+ 'Email': 'testacct@example.com',
+ 'UserName': 'testacct',
+ 'FirstName': 'Test',
+ 'LastName': 'Account',
+ 'orgPersonType': 'Tester'
+ }
+
+ :returns: dictionary with user and group_ids
+
+ The expected return structure is::
+
+ {
+ 'name': 'foobar',
+ 'group_ids': ['abc123', 'def456']
+ }
+
+ """
+
+ # Assertions will come in as string key-value pairs, and will use a
+ # semi-colon to indicate multiple values, i.e. groups.
+ # This will create a new dictionary where the values are arrays, and
+ # any multiple values are stored in the arrays.
+ assertion = dict((n, v.split(';')) for n, v in assertion_data.items())
+ identity_values = []
+
+ for rule in self.rules:
+ direct_maps = self._verify_all_requirements(rule['remote'],
+ assertion)
+
+ # If the compare comes back as None, then the rule did not apply
+ # to the assertion data, go on to the next rule
+ if direct_maps is None:
+ continue
+
+ # If there are no direct mappings, then add the local mapping
+ # directly to the array of saved values. However, if there is
+ # a direct mapping, then perform variable replacement.
+ if not direct_maps:
+ identity_values += rule['local']
+ else:
+ for local in rule['local']:
+ new_local = self._update_local_mapping(local, direct_maps)
+ identity_values.append(new_local)
+
+ return self._transform(identity_values)
+
+ def _transform(self, identity_values):
+ """Transform local mappings, to an easier to understand format.
+
+ Transform the incoming array to generate the return value for
+ the process function. Generating content for Keystone tokens will
+ be easier if some pre-processing is done at this level.
+
+ :param identity_values: local mapping from valid evaluations
+ :type identity_values: array of dict
+
+ Example identity_values::
+
+ [{'group': {'id': '0cd5e9'}, 'user': {'email': 'bob@example.com'}}]
+
+ :returns: dictionary with user name and group_ids.
+
+ """
+
+ # initialize the group_ids as a set to eliminate duplicates
+ user_name = None
+ group_ids = set()
+
+ for identity_value in identity_values:
+ if 'user' in identity_value:
+ # if a mapping outputs more than one user name, log it
+ if user_name is not None:
+ LOG.warning(_('Ignoring user name %s'),
+ identity_value['user']['name'])
+ else:
+ user_name = identity_value['user']['name']
+ if 'group' in identity_value:
+ group_ids.add(identity_value['group']['id'])
+
+ return {'name': user_name, 'group_ids': list(group_ids)}
+
+ def _update_local_mapping(self, local, direct_maps):
+ """Replace any {0}, {1} ... values with data from the assertion.
+
+ :param local: local mapping reference that needs to be updated
+ :type local: dict
+ :param direct_maps: list of identity values, used to update local
+ :type direct_maps: list
+
+ Example local::
+
+ {'user': {'name': '{0} {1}', 'email': '{2}'}}
+
+ Example direct_maps::
+
+ ['Bob', 'Thompson', 'bob@example.com']
+
+ :returns: new local mapping reference with replaced values.
+
+ The expected return structure is::
+
+ {'user': {'name': 'Bob Thompson', 'email': 'bob@example.org'}}
+
+ """
+
+ new = {}
+ for k, v in six.iteritems(local):
+ if isinstance(v, dict):
+ new_value = self._update_local_mapping(v, direct_maps)
+ else:
+ new_value = v.format(*direct_maps)
+ new[k] = new_value
+ return new
+
+ def _verify_all_requirements(self, requirements, assertion):
+ """Go through the remote requirements of a rule, and compare against
+ the assertion.
+
+ If a value of ``None`` is returned, the rule with this assertion
+ doesn't apply.
+ If an array of zero length is returned, then there are no direct
+ mappings to be performed, but the rule is valid.
+ Otherwise, then it will return the values, in order, to be directly
+ mapped, again, the rule is valid.
+
+ :param requirements: list of remote requirements from rules
+ :type requirements: list
+
+ Example requirements::
+
+ [
+ {
+ "type": "UserName"
+ },
+ {
+ "type": "orgPersonType",
+ "any_one_of": [
+ "Customer"
+ ]
+ }
+ ]
+
+ :param assertion: dict of attributes from an IdP
+ :type assertion: dict
+
+ Example assertion::
+
+ {
+ 'UserName': ['testacct'],
+ 'LastName': ['Account'],
+ 'orgPersonType': ['Tester'],
+ 'Email': ['testacct@example.com'],
+ 'FirstName': ['Test']
+ }
+
+ :returns: list of direct mappings or None.
+
+ """
+
+ direct_maps = []
+
+ for requirement in requirements:
+ requirement_type = requirement['type']
+ regex = requirement.get('regex', False)
+
+ any_one_values = requirement.get(self._EvalType.ANY_ONE_OF)
+ if any_one_values is not None:
+ if self._evaluate_requirement(any_one_values,
+ requirement_type,
+ self._EvalType.ANY_ONE_OF,
+ regex,
+ assertion):
+ continue
+ else:
+ return None
+
+ not_any_values = requirement.get(self._EvalType.NOT_ANY_OF)
+ if not_any_values is not None:
+ if self._evaluate_requirement(not_any_values,
+ requirement_type,
+ self._EvalType.NOT_ANY_OF,
+ regex,
+ assertion):
+ continue
+ else:
+ return None
+
+ # If 'any_one_of' or 'not_any_of' are not found, then values are
+ # within 'type'. Attempt to find that 'type' within the assertion.
+ direct_map_values = assertion.get(requirement_type)
+ if direct_map_values:
+ direct_maps += direct_map_values
+
+ return direct_maps
+
+ def _evaluate_requirement(self, values, requirement_type,
+ eval_type, regex, assertion):
+ """Evaluate the incoming requirement and assertion.
+
+ If the requirement type does not exist in the assertion data, then
+ return False. If regex is specified, then compare the values and
+ assertion values. Otherwise, grab the intersection of the values
+ and use that to compare against the evaluation type.
+
+ :param values: list of allowed values, defined in the requirement
+ :type values: list
+ :param requirement_type: key to look for in the assertion
+ :type requirement_type: string
+ :param eval_type: determine how to evaluate requirements
+ :type eval_type: string
+ :param regex: perform evaluation with regex
+ :type regex: boolean
+ :param assertion: dict of attributes from the IdP
+ :type assertion: dict
+
+ :returns: boolean, whether requirement is valid or not.
+
+ """
+
+ assertion_values = assertion.get(requirement_type)
+ if not assertion_values:
+ return False
+
+ if regex:
+ return re.search(values[0], assertion_values[0])
+
+ any_match = bool(set(values).intersection(set(assertion_values)))
+ if any_match and eval_type == self._EvalType.ANY_ONE_OF:
+ return True
+ if not any_match and eval_type == self._EvalType.NOT_ANY_OF:
+ return True
+
+ return False
diff --git a/keystone/contrib/kds/__init__.py b/keystone/contrib/kds/__init__.py
index 068074764..e69de29bb 100644
--- a/keystone/contrib/kds/__init__.py
+++ b/keystone/contrib/kds/__init__.py
@@ -1,11 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/contrib/kds/api/__init__.py b/keystone/contrib/kds/api/__init__.py
index 8c6d5f866..e69de29bb 100644
--- a/keystone/contrib/kds/api/__init__.py
+++ b/keystone/contrib/kds/api/__init__.py
@@ -1,13 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/contrib/kds/api/app.py b/keystone/contrib/kds/api/app.py
index 8373e2ab6..582d88637 100644
--- a/keystone/contrib/kds/api/app.py
+++ b/keystone/contrib/kds/api/app.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/api/config.py b/keystone/contrib/kds/api/config.py
index d376b58e7..6d7a2e66b 100644
--- a/keystone/contrib/kds/api/config.py
+++ b/keystone/contrib/kds/api/config.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/api/hooks.py b/keystone/contrib/kds/api/hooks.py
index 3c1578c6c..105f39567 100644
--- a/keystone/contrib/kds/api/hooks.py
+++ b/keystone/contrib/kds/api/hooks.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/api/root.py b/keystone/contrib/kds/api/root.py
index 5a6e79f61..408e3d0f8 100644
--- a/keystone/contrib/kds/api/root.py
+++ b/keystone/contrib/kds/api/root.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -14,9 +12,18 @@
import pecan
+from keystone.contrib.kds.api.v1 import controllers
+
class RootController(object):
+ v1 = controllers.Controller()
+
@pecan.expose('json')
def index(self):
- return {'hello': 'world'}
+ pecan.response.status = 300
+ return {
+ 'versions': [
+ self.v1.version_info(),
+ ]
+ }
diff --git a/keystone/contrib/kds/api/v1/__init__.py b/keystone/contrib/kds/api/v1/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/contrib/kds/api/v1/__init__.py
diff --git a/keystone/contrib/kds/api/v1/controllers/__init__.py b/keystone/contrib/kds/api/v1/controllers/__init__.py
new file mode 100644
index 000000000..48cdc0f94
--- /dev/null
+++ b/keystone/contrib/kds/api/v1/controllers/__init__.py
@@ -0,0 +1,19 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from keystone.contrib.kds.api.v1.controllers import controller
+
+
+Controller = controller.Controller
+
+__all__ = ['Controller']
diff --git a/keystone/contrib/kds/api/v1/controllers/controller.py b/keystone/contrib/kds/api/v1/controllers/controller.py
new file mode 100644
index 000000000..cee4c47dd
--- /dev/null
+++ b/keystone/contrib/kds/api/v1/controllers/controller.py
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pecan
+
+
+class Controller(object):
+ """Version 1 API controller root."""
+
+ @staticmethod
+ def version_info():
+ return {'status': 'stable',
+ 'id': 'v1.0',
+ 'links': [{
+ 'href': '%s/v1/' % pecan.request.host_url,
+ 'rel': 'self'}]}
+
+ @pecan.expose('json')
+ def index(self):
+ return {'version': self.version_info()}
diff --git a/keystone/contrib/kds/cli/__init__.py b/keystone/contrib/kds/cli/__init__.py
index 8c6d5f866..e69de29bb 100644
--- a/keystone/contrib/kds/cli/__init__.py
+++ b/keystone/contrib/kds/cli/__init__.py
@@ -1,13 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/contrib/kds/cli/api.py b/keystone/contrib/kds/cli/api.py
index 8f111b654..414ee8808 100644
--- a/keystone/contrib/kds/cli/api.py
+++ b/keystone/contrib/kds/cli/api.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/cli/manage.py b/keystone/contrib/kds/cli/manage.py
index c13de289b..64a36a4ce 100644
--- a/keystone/contrib/kds/cli/manage.py
+++ b/keystone/contrib/kds/cli/manage.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/common/__init__.py b/keystone/contrib/kds/common/__init__.py
index 8c6d5f866..e69de29bb 100644
--- a/keystone/contrib/kds/common/__init__.py
+++ b/keystone/contrib/kds/common/__init__.py
@@ -1,13 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/contrib/kds/common/exception.py b/keystone/contrib/kds/common/exception.py
index 0126e3222..e255a7a2c 100644
--- a/keystone/contrib/kds/common/exception.py
+++ b/keystone/contrib/kds/common/exception.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/common/service.py b/keystone/contrib/kds/common/service.py
index f9fa6ea2d..f4b264293 100644
--- a/keystone/contrib/kds/common/service.py
+++ b/keystone/contrib/kds/common/service.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -18,16 +16,22 @@ from keystone.openstack.common import log
CONF = cfg.CONF
-API_SERVICE_OPTS = [
- cfg.StrOpt('bind_ip',
- default='0.0.0.0',
- help='IP for the server to bind to'),
- cfg.IntOpt('port',
- default=9109,
- help='The port for the server'),
-]
+FILE_OPTIONS = {
+ None: [
+ cfg.StrOpt('bind_ip',
+ default='0.0.0.0',
+ help='IP for the server to bind to'),
+ cfg.IntOpt('port',
+ default=9109,
+ help='The port for the server')]}
+
+
+def configure(conf=None):
+ if conf is None:
+ conf = CONF
-CONF.register_opts(API_SERVICE_OPTS)
+ for group in FILE_OPTIONS:
+ conf.register_opts(FILE_OPTIONS[group], group=group)
def parse_args(args, default_config_files=None):
@@ -43,3 +47,6 @@ def prepare_service(argv=[]):
])
parse_args(argv)
log.setup('kds')
+
+
+configure()
diff --git a/keystone/contrib/kds/common/utils.py b/keystone/contrib/kds/common/utils.py
index 77e364970..897b439d8 100644
--- a/keystone/contrib/kds/common/utils.py
+++ b/keystone/contrib/kds/common/utils.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/api.py b/keystone/contrib/kds/db/api.py
index a6861b483..e85db0359 100644
--- a/keystone/contrib/kds/db/api.py
+++ b/keystone/contrib/kds/db/api.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/connection.py b/keystone/contrib/kds/db/connection.py
index 1f959aab4..fbf2a8eb7 100644
--- a/keystone/contrib/kds/db/connection.py
+++ b/keystone/contrib/kds/db/connection.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -34,10 +32,11 @@ class Connection(object):
:param DateTime expiration: When the key should expire
(None is never expire).
- :raises IntegrityError: If a key exists then new keys assigned to the
- name must have the same 'group' setting. If the
- value of group is changed an IntegrityError is
- raised.
+ :raises GroupStatusChanged: If a key exists then new keys assigned to
+ the name must have the same 'group' setting. If the value of group
+ is changed a
+ :class:`keystone.contrib.kds.common.exception.GroupStatusChanged`
+ is raised.
:returns int: The generation number of this key.
"""
@@ -55,6 +54,7 @@ class Connection(object):
:returns dict: A dictionary of the key information or None if not
found. Keys will contain:
+
- name: Unique name of the key.
- group: If this key is a group key or not.
- key: The key data.
@@ -62,4 +62,5 @@ class Connection(object):
- generation: The generation of this key.
- expiration: When the key expires (or None).
Expired keys can be returned.
+
"""
diff --git a/keystone/contrib/kds/db/kvs/api.py b/keystone/contrib/kds/db/kvs/api.py
index 2e267cdd1..4ee37074a 100644
--- a/keystone/contrib/kds/db/kvs/api.py
+++ b/keystone/contrib/kds/db/kvs/api.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/migration.py b/keystone/contrib/kds/db/migration.py
index ea8ab225c..c114a6af4 100644
--- a/keystone/contrib/kds/db/migration.py
+++ b/keystone/contrib/kds/db/migration.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/sqlalchemy/api.py b/keystone/contrib/kds/db/sqlalchemy/api.py
index 74049e631..cae129d72 100644
--- a/keystone/contrib/kds/db/sqlalchemy/api.py
+++ b/keystone/contrib/kds/db/sqlalchemy/api.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/sqlalchemy/migrate_repo/manage.py b/keystone/contrib/kds/db/sqlalchemy/migrate_repo/manage.py
index a7e6e509c..5e411b6e7 100644
--- a/keystone/contrib/kds/db/sqlalchemy/migrate_repo/manage.py
+++ b/keystone/contrib/kds/db/sqlalchemy/migrate_repo/manage.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/sqlalchemy/migrate_repo/versions/001_kds_table.py b/keystone/contrib/kds/db/sqlalchemy/migrate_repo/versions/001_kds_table.py
index 672522152..79565e352 100644
--- a/keystone/contrib/kds/db/sqlalchemy/migrate_repo/versions/001_kds_table.py
+++ b/keystone/contrib/kds/db/sqlalchemy/migrate_repo/versions/001_kds_table.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/sqlalchemy/migration.py b/keystone/contrib/kds/db/sqlalchemy/migration.py
index 61c631033..99f8a5be8 100644
--- a/keystone/contrib/kds/db/sqlalchemy/migration.py
+++ b/keystone/contrib/kds/db/sqlalchemy/migration.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/kds/db/sqlalchemy/models.py b/keystone/contrib/kds/db/sqlalchemy/models.py
index 32dc476dc..39232dea1 100644
--- a/keystone/contrib/kds/db/sqlalchemy/models.py
+++ b/keystone/contrib/kds/db/sqlalchemy/models.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/oauth1/__init__.py b/keystone/contrib/oauth1/__init__.py
index fdb8dc4b7..1dd804b65 100644
--- a/keystone/contrib/oauth1/__init__.py
+++ b/keystone/contrib/oauth1/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/oauth1/backends/__init__.py b/keystone/contrib/oauth1/backends/__init__.py
index 3f393b269..e69de29bb 100644
--- a/keystone/contrib/oauth1/backends/__init__.py
+++ b/keystone/contrib/oauth1/backends/__init__.py
@@ -1,15 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/contrib/oauth1/backends/sql.py b/keystone/contrib/oauth1/backends/sql.py
index 429a94830..05194da3e 100644
--- a/keystone/contrib/oauth1/backends/sql.py
+++ b/keystone/contrib/oauth1/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -18,10 +16,14 @@ import datetime
import random
import uuid
+import six
+
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
+from keystone.contrib import oauth1
from keystone.contrib.oauth1 import core
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
from keystone.openstack.common import jsonutils
from keystone.openstack.common import timeutils
@@ -56,7 +58,7 @@ class RequestToken(sql.ModelBase, sql.DictBase):
return cls(**user_dict)
def to_dict(self):
- return dict(self.iteritems())
+ return dict(six.iteritems(self))
class AccessToken(sql.ModelBase, sql.DictBase):
@@ -79,12 +81,12 @@ class AccessToken(sql.ModelBase, sql.DictBase):
return cls(**user_dict)
def to_dict(self):
- return dict(self.iteritems())
+ return dict(six.iteritems(self))
-class OAuth1(sql.Base):
+class OAuth1(object):
def db_sync(self):
- migration.db_sync()
+ migration.db_sync(migration_helpers.find_migrate_repo(oauth1))
def _get_consumer(self, session, consumer_id):
consumer_ref = session.query(Consumer).get(consumer_id)
diff --git a/keystone/contrib/oauth1/controllers.py b/keystone/contrib/oauth1/controllers.py
index 85cfec8b3..f8dc96f91 100644
--- a/keystone/contrib/oauth1/controllers.py
+++ b/keystone/contrib/oauth1/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -45,8 +43,8 @@ class ConsumerCrudV3(controller.V3Controller):
def update_consumer(self, context, consumer_id, consumer):
self._require_matching_id(consumer_id, consumer)
ref = self._normalize_dict(consumer)
- self._validate_consumer_ref(consumer)
- ref = self.oauth_api.update_consumer(consumer_id, consumer)
+ self._validate_consumer_ref(ref)
+ ref = self.oauth_api.update_consumer(consumer_id, ref)
return ConsumerCrudV3.wrap_member(context, ref)
@controller.protected()
diff --git a/keystone/contrib/oauth1/core.py b/keystone/contrib/oauth1/core.py
index d184f28c3..f0386e03b 100644
--- a/keystone/contrib/oauth1/core.py
+++ b/keystone/contrib/oauth1/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -30,6 +28,7 @@ from keystone.common import extension
from keystone.common import manager
from keystone import config
from keystone import exception
+from keystone import notifications
RequestValidator = oauth1.RequestValidator
@@ -150,10 +149,40 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+ _ACCESS_TOKEN = "OS-OAUTH1:access_token"
+ _REQUEST_TOKEN = "OS-OAUTH1:request_token"
+ _CONSUMER = "OS-OAUTH1:consumer"
def __init__(self):
super(Manager, self).__init__(CONF.oauth1.driver)
+ @notifications.created(_CONSUMER)
+ def create_consumer(self, consumer_ref):
+ return self.driver.create_consumer(consumer_ref)
+
+ @notifications.updated(_CONSUMER)
+ def update_consumer(self, consumer_id, consumer_ref):
+ return self.driver.update_consumer(consumer_id, consumer_ref)
+
+ @notifications.deleted(_CONSUMER)
+ def delete_consumer(self, consumer_id):
+ return self.driver.delete_consumer(consumer_id)
+
+ @notifications.created(_ACCESS_TOKEN)
+ def create_access_token(self, request_id, access_token_duration):
+ return self.driver.create_access_token(request_id,
+ access_token_duration)
+
+ @notifications.deleted(_ACCESS_TOKEN, resource_id_arg_index=2)
+ def delete_access_token(self, user_id, access_token_id):
+ return self.driver.delete_access_token(user_id, access_token_id)
+
+ @notifications.created(_REQUEST_TOKEN, resource_id_arg_index=2)
+ def create_request_token(self, consumer_id, requested_project,
+ request_token_duration):
+ return self.driver.create_request_token(
+ consumer_id, requested_project, request_token_duration)
+
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
diff --git a/keystone/contrib/oauth1/migrate_repo/__init__.py b/keystone/contrib/oauth1/migrate_repo/__init__.py
index 3f393b269..e69de29bb 100644
--- a/keystone/contrib/oauth1/migrate_repo/__init__.py
+++ b/keystone/contrib/oauth1/migrate_repo/__init__.py
@@ -1,15 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py b/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
index d3ed9033d..a4fbf155b 100644
--- a/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
+++ b/keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py b/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
index b621b493c..0504613ae 100644
--- a/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
+++ b/keystone/contrib/oauth1/migrate_repo/versions/002_fix_oauth_tables_fk.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py b/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
index 3cfc54765..e1cf8843e 100644
--- a/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
+++ b/keystone/contrib/oauth1/migrate_repo/versions/003_consumer_description_nullalbe.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py b/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
index aec13b8d1..5e33c5ba9 100644
--- a/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
+++ b/keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/oauth1/migrate_repo/versions/__init__.py b/keystone/contrib/oauth1/migrate_repo/versions/__init__.py
index 3f393b269..e69de29bb 100644
--- a/keystone/contrib/oauth1/migrate_repo/versions/__init__.py
+++ b/keystone/contrib/oauth1/migrate_repo/versions/__init__.py
@@ -1,15 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/contrib/oauth1/routers.py b/keystone/contrib/oauth1/routers.py
index 435088e4b..238a135b9 100644
--- a/keystone/contrib/oauth1/routers.py
+++ b/keystone/contrib/oauth1/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/oauth1/validator.py b/keystone/contrib/oauth1/validator.py
index 8a98a5dde..b05d9a9f4 100644
--- a/keystone/contrib/oauth1/validator.py
+++ b/keystone/contrib/oauth1/validator.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/s3/__init__.py b/keystone/contrib/s3/__init__.py
index 518ca4a27..076ff82e9 100644
--- a/keystone/contrib/s3/__init__.py
+++ b/keystone/contrib/s3/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/contrib/s3/core.py b/keystone/contrib/s3/core.py
index dbbc5af97..57af196d7 100644
--- a/keystone/contrib/s3/core.py
+++ b/keystone/contrib/s3/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/simple_cert/__init__.py b/keystone/contrib/simple_cert/__init__.py
index 06eae5551..89650a803 100644
--- a/keystone/contrib/simple_cert/__init__.py
+++ b/keystone/contrib/simple_cert/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/simple_cert/controllers.py b/keystone/contrib/simple_cert/controllers.py
index 653bca1eb..97598da97 100644
--- a/keystone/contrib/simple_cert/controllers.py
+++ b/keystone/contrib/simple_cert/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/simple_cert/core.py b/keystone/contrib/simple_cert/core.py
index 185def462..531c6aae1 100644
--- a/keystone/contrib/simple_cert/core.py
+++ b/keystone/contrib/simple_cert/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/simple_cert/routers.py b/keystone/contrib/simple_cert/routers.py
index 9ecbb337b..7b9fc7248 100644
--- a/keystone/contrib/simple_cert/routers.py
+++ b/keystone/contrib/simple_cert/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/contrib/stats/__init__.py b/keystone/contrib/stats/__init__.py
index db880bc42..3b72711e2 100644
--- a/keystone/contrib/stats/__init__.py
+++ b/keystone/contrib/stats/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/contrib/stats/backends/kvs.py b/keystone/contrib/stats/backends/kvs.py
index 1f347ffc8..0cc9fb554 100644
--- a/keystone/contrib/stats/backends/kvs.py
+++ b/keystone/contrib/stats/backends/kvs.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/contrib/stats/core.py b/keystone/contrib/stats/core.py
index c8cac9e87..36d6ab4b0 100644
--- a/keystone/contrib/stats/core.py
+++ b/keystone/contrib/stats/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,23 +17,20 @@ from keystone.common import manager
from keystone.common import wsgi
from keystone import config
from keystone import exception
-from keystone import identity
from keystone.openstack.common import log
from keystone.openstack.common import versionutils
-from keystone import policy
-from keystone import token
CONF = config.CONF
LOG = log.getLogger(__name__)
extension_data = {
- 'name': 'Openstack Keystone Stats API',
+ 'name': 'OpenStack Keystone Stats API',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-STATS/v1.0',
'alias': 'OS-STATS',
'updated': '2013-07-07T12:00:0-00:00',
- 'description': 'Openstack Keystone Stats API.',
+ 'description': 'OpenStack Keystone Stats API.',
'links': [
{
'rel': 'describedby',
@@ -95,10 +90,7 @@ class StatsExtension(wsgi.ExtensionRouter):
class StatsController(wsgi.Application):
def __init__(self):
- self.identity_api = identity.Manager()
- self.policy_api = policy.Manager()
self.stats_api = Manager()
- self.token_api = token.Manager()
super(StatsController, self).__init__()
def get_stats(self, context):
diff --git a/keystone/contrib/user_crud/__init__.py b/keystone/contrib/user_crud/__init__.py
index a07019f1e..795c98c5c 100644
--- a/keystone/contrib/user_crud/__init__.py
+++ b/keystone/contrib/user_crud/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 Red Hat, Inc
diff --git a/keystone/contrib/user_crud/core.py b/keystone/contrib/user_crud/core.py
index 15be597fe..c550cf3e6 100644
--- a/keystone/contrib/user_crud/core.py
+++ b/keystone/contrib/user_crud/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -62,6 +60,7 @@ class UserController(identity.controllers.User):
try:
user_ref = self.identity_api.authenticate(
+ context,
user_id=user_id_from_token,
password=original_password)
if not user_ref.get('enabled', True):
diff --git a/keystone/controllers.py b/keystone/controllers.py
index a1bab30ff..cbdfbdac1 100644
--- a/keystone/controllers.py
+++ b/keystone/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/credential/__init__.py b/keystone/credential/__init__.py
index 621f7a13b..9e01cee1a 100644
--- a/keystone/credential/__init__.py
+++ b/keystone/credential/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2013 OpenStack Foundation
diff --git a/keystone/credential/backends/sql.py b/keystone/credential/backends/sql.py
index 7eb32ee3e..f4c396162 100644
--- a/keystone/credential/backends/sql.py
+++ b/keystone/credential/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,9 +13,10 @@
# under the License.
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone import credential
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
@@ -33,10 +32,11 @@ class CredentialModel(sql.ModelBase, sql.DictBase):
extra = sql.Column(sql.JsonBlob())
-class Credential(sql.Base, credential.Driver):
+class Credential(credential.Driver):
# Internal interface to manage the database
def db_sync(self, version=None):
- migration.db_sync(version=version)
+ migration.db_sync(
+ migration_helpers.find_migrate_repo(), version=version)
# credential crud
diff --git a/keystone/credential/controllers.py b/keystone/credential/controllers.py
index de9752dc8..ee6081a1b 100644
--- a/keystone/credential/controllers.py
+++ b/keystone/credential/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,6 +17,7 @@ import json
from keystone.common import controller
from keystone.common import dependency
+from keystone.common import driver_hints
from keystone import exception
@@ -80,9 +79,13 @@ class CredentialV3(controller.V3Controller):
@controller.protected()
def list_credentials(self, context):
+ # NOTE(henry-nash): Since there are no filters for credentials, we
+ # shouldn't limit the output, hence we don't pass a hints list into
+ # the driver.
refs = self.credential_api.list_credentials()
ret_refs = [self._blob_to_json(r) for r in refs]
- return CredentialV3.wrap_collection(context, ret_refs)
+ return CredentialV3.wrap_collection(context, ret_refs,
+ driver_hints.Hints())
@controller.protected()
def get_credential(self, context, credential_id):
diff --git a/keystone/credential/core.py b/keystone/credential/core.py
index 11f3b7ff1..80b98700e 100644
--- a/keystone/credential/core.py
+++ b/keystone/credential/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/credential/routers.py b/keystone/credential/routers.py
index 06378b966..75cd33431 100644
--- a/keystone/credential/routers.py
+++ b/keystone/credential/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/exception.py b/keystone/exception.py
index 09930b8c0..d932ce68d 100644
--- a/keystone/exception.py
+++ b/keystone/exception.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -295,3 +293,11 @@ class NotImplemented(Error):
class ConfigFileNotFound(UnexpectedError):
message_format = _("The Keystone configuration file %(config_file)s could "
"not be found.")
+
+
+class MigrationNotProvided(Exception):
+ def __init__(self, mod_name, path):
+ super(MigrationNotProvided, self).__init__(_(
+ "%(mod_name)s doesn't provide database migrations. The migration"
+ " repository path at %(path)s doesn't exist or isn't a directory."
+ ) % {'mod_name': mod_name, 'path': path})
diff --git a/keystone/identity/__init__.py b/keystone/identity/__init__.py
index 0bb3094ae..5e179a36e 100644
--- a/keystone/identity/__init__.py
+++ b/keystone/identity/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/identity/backends/kvs.py b/keystone/identity/backends/kvs.py
index 9cb9bd763..22dc3e725 100644
--- a/keystone/identity/backends/kvs.py
+++ b/keystone/identity/backends/kvs.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/identity/backends/ldap.py b/keystone/identity/backends/ldap.py
index efb2fbc3b..13aa3f7ee 100644
--- a/keystone/identity/backends/ldap.py
+++ b/keystone/identity/backends/ldap.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -87,10 +85,12 @@ class Identity(identity.Driver):
# CRUD
def create_user(self, user_id, user):
+ self.user.check_allow_create()
user_ref = self.user.create(user)
return identity.filter_user(user_ref)
def update_user(self, user_id, user):
+ self.user.check_allow_update()
if 'id' in user and user['id'] != user_id:
raise exception.ValidationError(_('Cannot change user ID'))
old_obj = self.user.get(user_id)
@@ -104,6 +104,7 @@ class Identity(identity.Driver):
return self.user.get_filtered(user_id)
def delete_user(self, user_id):
+ self.user.check_allow_delete()
self.assignment_api.delete_user(user_id)
user_dn = self.user._id_to_dn(user_id)
groups = self.group.list_user_groups(user_dn)
@@ -117,6 +118,7 @@ class Identity(identity.Driver):
self.user.delete(user_id)
def create_group(self, group_id, group):
+ self.group.check_allow_create()
group['name'] = clean.group_name(group['name'])
return self.group.create(group)
@@ -124,11 +126,13 @@ class Identity(identity.Driver):
return self.group.get(group_id)
def update_group(self, group_id, group):
+ self.group.check_allow_update()
if 'name' in group:
group['name'] = clean.group_name(group['name'])
return self.group.update(group_id, group)
def delete_group(self, group_id):
+ self.group.check_allow_delete()
return self.group.delete(group_id)
def add_user_to_group(self, user_id, group_id):
diff --git a/keystone/identity/backends/pam.py b/keystone/identity/backends/pam.py
index 67827e746..11dceaa34 100644
--- a/keystone/identity/backends/pam.py
+++ b/keystone/identity/backends/pam.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/identity/backends/sql.py b/keystone/identity/backends/sql.py
index 3891d84c6..80e4012ca 100644
--- a/keystone/identity/backends/sql.py
+++ b/keystone/identity/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,12 +14,17 @@
from keystone.common import dependency
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone.common import utils
from keystone import exception
from keystone import identity
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
+# Import assignment sql to ensure that the models defined in there are
+# available for the reference from User and Group to Domain.id.
+from keystone.assignment.backends import sql as assignment_sql # flake8: noqa
+
class User(sql.ModelBase, sql.DictBase):
__tablename__ = 'user'
@@ -72,13 +75,14 @@ class UserGroupMembership(sql.ModelBase, sql.DictBase):
@dependency.requires('assignment_api')
-class Identity(sql.Base, identity.Driver):
+class Identity(identity.Driver):
def default_assignment_driver(self):
return "keystone.assignment.backends.sql.Assignment"
# Internal interface to manage the database
def db_sync(self, version=None):
- migration.db_sync(version=version)
+ migration.db_sync(
+ migration_helpers.find_migrate_repo(), version=version)
def _check_password(self, password, user_ref):
"""Check the specified password against the data store.
@@ -120,10 +124,11 @@ class Identity(sql.Base, identity.Driver):
session.add(user_ref)
return identity.filter_user(user_ref.to_dict())
+ @sql.truncated
def list_users(self, hints):
session = db_session.get_session()
query = session.query(User)
- user_refs = self.filter_query(User, query, hints)
+ user_refs = sql.filter_limit_query(User, query, hints)
return [identity.filter_user(x.to_dict()) for x in user_refs]
def _get_user(self, session, user_id):
@@ -252,10 +257,11 @@ class Identity(sql.Base, identity.Driver):
session.add(ref)
return ref.to_dict()
+ @sql.truncated
def list_groups(self, hints):
session = db_session.get_session()
query = session.query(Group)
- refs = self.filter_query(Group, query, hints)
+ refs = sql.filter_limit_query(Group, query, hints)
return [ref.to_dict() for ref in refs]
def _get_group(self, session, group_id):
diff --git a/keystone/identity/controllers.py b/keystone/identity/controllers.py
index 1007bc973..b61c34fba 100644
--- a/keystone/identity/controllers.py
+++ b/keystone/identity/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -98,7 +96,7 @@ class User(controller.V2Controller):
def get_user(self, context, user_id):
self.assert_admin(context)
ref = self.identity_api.get_user(user_id)
- return {'user': self.identity_api.v3_to_v2_user(ref)}
+ return {'user': self.v3_to_v2_user(ref)}
@controller.v2_deprecated
def get_users(self, context):
@@ -110,14 +108,14 @@ class User(controller.V2Controller):
self.assert_admin(context)
user_list = self.identity_api.list_users()
- return {'users': self.identity_api.v3_to_v2_user(user_list)}
+ return {'users': self.v3_to_v2_user(user_list)}
@controller.v2_deprecated
def get_user_by_name(self, context, user_name):
self.assert_admin(context)
ref = self.identity_api.get_user_by_name(
user_name, CONF.identity.default_domain_id)
- return {'user': self.identity_api.v3_to_v2_user(ref)}
+ return {'user': self.v3_to_v2_user(ref)}
# CRUD extension
@controller.v2_deprecated
@@ -143,7 +141,7 @@ class User(controller.V2Controller):
user_id = uuid.uuid4().hex
user_ref = self._normalize_domain_id(context, user.copy())
user_ref['id'] = user_id
- new_user_ref = self.identity_api.v3_to_v2_user(
+ new_user_ref = self.v3_to_v2_user(
self.identity_api.create_user(user_id, user_ref))
if default_project_id is not None:
@@ -165,7 +163,7 @@ class User(controller.V2Controller):
if default_project_id is not None:
user['default_project_id'] = default_project_id
- old_user_ref = self.identity_api.v3_to_v2_user(
+ old_user_ref = self.v3_to_v2_user(
self.identity_api.get_user(user_id))
# Check whether a tenant is being added or changed for the user.
@@ -181,7 +179,7 @@ class User(controller.V2Controller):
# user update.
self.assignment_api.get_project(default_project_id)
- user_ref = self.identity_api.v3_to_v2_user(
+ user_ref = self.v3_to_v2_user(
self.identity_api.update_user(user_id, user))
# If 'tenantId' is in either ref, we might need to add or remove the
@@ -352,15 +350,11 @@ class UserV3(controller.V3Controller):
domain_scope = self._get_domain_id_for_request(context)
try:
- self.identity_api.authenticate(user_id=user_id,
- password=original_password,
- domain_scope=domain_scope)
+ self.identity_api.change_password(
+ context, user_id, original_password, password, domain_scope)
except AssertionError:
raise exception.Unauthorized()
- update_dict = {'password': password}
- self._update_user(context, user_id, update_dict, domain_scope)
-
@dependency.requires('identity_api')
class GroupV3(controller.V3Controller):
diff --git a/keystone/identity/core.py b/keystone/identity/core.py
index 106d55682..44807c754 100644
--- a/keystone/identity/core.py
+++ b/keystone/identity/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -24,7 +22,6 @@ from oslo.config import cfg
import six
from keystone import clean
-from keystone.common import controller
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import manager
@@ -75,7 +72,7 @@ def filter_user(user_ref):
class DomainConfigs(dict):
"""Discover, store and provide access to domain specific configs.
- The setup_domain_drives() call will be made via the wrapper from
+ The setup_domain_drivers() call will be made via the wrapper from
the first call to any driver function handled by this manager. This
setup call it will scan the domain config directory for files of the form
@@ -106,20 +103,20 @@ class DomainConfigs(dict):
LOG.warning(
_('Invalid domain name (%s) found in config file name'),
domain_name)
+ return
- if domain_ref:
- # Create a new entry in the domain config dict, which contains
- # a new instance of both the conf environment and driver using
- # options defined in this set of config files. Later, when we
- # service calls via this Manager, we'll index via this domain
- # config dict to make sure we call the right driver
- domain = domain_ref['id']
- self[domain] = {}
- self[domain]['cfg'] = cfg.ConfigOpts()
- config.configure(conf=self[domain]['cfg'])
- self[domain]['cfg'](args=[], project='keystone',
- default_config_files=file_list)
- self._load_driver(assignment_api, domain)
+ # Create a new entry in the domain config dict, which contains
+ # a new instance of both the conf environment and driver using
+ # options defined in this set of config files. Later, when we
+ # service calls via this Manager, we'll index via this domain
+ # config dict to make sure we call the right driver
+ domain = domain_ref['id']
+ self[domain] = {}
+ self[domain]['cfg'] = cfg.ConfigOpts()
+ config.configure(conf=self[domain]['cfg'])
+ self[domain]['cfg'](args=[], project='keystone',
+ default_config_files=file_list)
+ self._load_driver(assignment_api, domain)
def setup_domain_drivers(self, standard_driver, assignment_api):
# This is called by the api call wrapper
@@ -218,47 +215,6 @@ class Manager(manager.Manager):
super(Manager, self).__init__(CONF.identity.driver)
self.domain_configs = DomainConfigs()
- @staticmethod
- def v3_to_v2_user(ref):
- """Convert a user_ref from v3 to v2 compatible.
-
- * v2.0 users are not domain aware, and should have domain_id removed
- * v2.0 users expect the use of tenantId instead of default_project_id
- * v2.0 users have a username attribute
-
- This method should only be applied to user_refs being returned from the
- v2.0 controller(s).
-
- If ref is a list type, we will iterate through each element and do the
- conversion.
- """
-
- def _format_default_project_id(ref):
- """Convert default_project_id to tenantId for v2 calls."""
- default_project_id = ref.pop('default_project_id', None)
- if default_project_id is not None:
- ref['tenantId'] = default_project_id
- elif 'tenantId' in ref:
- # NOTE(morganfainberg): To avoid v2.0 confusion if somehow a
- # tenantId property sneaks its way into the extra blob on the
- # user, we remove it here. If default_project_id is set, we
- # would override it in either case.
- del ref['tenantId']
-
- def _normalize_and_filter_user_properties(ref):
- """Run through the various filter/normalization methods."""
- _format_default_project_id(ref)
- controller.V2Controller.filter_domain_id(ref)
- controller.V2Controller.normalize_username_in_response(ref)
- return ref
-
- if isinstance(ref, dict):
- return _normalize_and_filter_user_properties(ref)
- elif isinstance(ref, list):
- return [_normalize_and_filter_user_properties(x) for x in ref]
- else:
- raise ValueError(_('Expected dict or list: %s') % type(ref))
-
# Domain ID normalization methods
def _set_domain_id(self, ref, domain_id):
@@ -296,13 +252,6 @@ class Manager(manager.Manager):
self.assignment_api.get_domain(domain_id)
return self.driver
- def _get_domain_conf(self, domain_id):
- conf = self.domain_configs.get_domain_conf(domain_id)
- if conf:
- return conf
- else:
- return CONF
-
def _get_domain_id_and_driver(self, domain_scope):
domain_id = self._normalize_scope(domain_scope)
driver = self._select_identity_driver(domain_id)
@@ -321,8 +270,9 @@ class Manager(manager.Manager):
# - select the right driver for this domain
# - clear/set domain_ids for drivers that do not support domains
+ @notifications.emit_event('authenticate')
@domains_configured
- def authenticate(self, user_id, password, domain_scope=None):
+ def authenticate(self, context, user_id, password, domain_scope=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
ref = driver.authenticate(user_id, password)
if not driver.is_domain_aware():
@@ -363,6 +313,7 @@ class Manager(manager.Manager):
ref = self._set_domain_id(ref, domain_id)
return ref
+ @manager.response_truncated
@domains_configured
def list_users(self, domain_scope=None, hints=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
@@ -463,6 +414,7 @@ class Manager(manager.Manager):
driver.remove_user_from_group(user_id, group_id)
self.token_api.delete_tokens_for_user(user_id)
+ @manager.response_truncated
@domains_configured
def list_groups_for_user(self, user_id, domain_scope=None,
hints=None):
@@ -477,6 +429,7 @@ class Manager(manager.Manager):
ref_list = self._set_domain_id(ref_list, domain_id)
return ref_list
+ @manager.response_truncated
@domains_configured
def list_groups(self, domain_scope=None, hints=None):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
@@ -489,6 +442,7 @@ class Manager(manager.Manager):
ref_list = self._set_domain_id(ref_list, domain_id)
return ref_list
+ @manager.response_truncated
@domains_configured
def list_users_in_group(self, group_id, domain_scope=None,
hints=None):
@@ -508,6 +462,17 @@ class Manager(manager.Manager):
domain_id, driver = self._get_domain_id_and_driver(domain_scope)
return driver.check_user_in_group(user_id, group_id)
+ @domains_configured
+ def change_password(self, context, user_id, original_password,
+ new_password, domain_scope):
+
+ # authenticate() will raise an AssertionError if authentication fails
+ self.authenticate(context, user_id, original_password,
+ domain_scope=domain_scope)
+
+ update_dict = {'password': new_password}
+ self.update_user(user_id, update_dict, domain_scope=domain_scope)
+
# TODO(morganfainberg): Remove the following deprecated methods once
# Icehouse is released. Maintain identity -> assignment proxy for 1
# release.
@@ -638,6 +603,9 @@ class Manager(manager.Manager):
class Driver(object):
"""Interface description for an Identity driver."""
+ def _get_list_limit(self):
+ return CONF.identity.list_limit or CONF.list_limit
+
@abc.abstractmethod
def authenticate(self, user_id, password):
"""Authenticate a given user and password.
diff --git a/keystone/identity/routers.py b/keystone/identity/routers.py
index c7a94ea3c..6306dd718 100644
--- a/keystone/identity/routers.py
+++ b/keystone/identity/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/middleware/__init__.py b/keystone/middleware/__init__.py
index 9bfd68fff..d805f82d8 100644
--- a/keystone/middleware/__init__.py
+++ b/keystone/middleware/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/middleware/core.py b/keystone/middleware/core.py
index 11bd21d72..6569ca1fa 100644
--- a/keystone/middleware/core.py
+++ b/keystone/middleware/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -193,7 +191,7 @@ class RequestBodySizeLimiter(wsgi.Middleware):
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
- @webob.dec.wsgify(RequestClass=wsgi.Request)
+ @webob.dec.wsgify()
def __call__(self, req):
if req.content_length > CONF.max_request_body_size:
diff --git a/keystone/middleware/ec2_token.py b/keystone/middleware/ec2_token.py
index 1f4e8c8a5..db5010c60 100644
--- a/keystone/middleware/ec2_token.py
+++ b/keystone/middleware/ec2_token.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -45,7 +43,7 @@ CONF.register_opts(keystone_ec2_opts)
class EC2Token(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to token."""
- @webob.dec.wsgify(RequestClass=wsgi.Request)
+ @webob.dec.wsgify()
def __call__(self, req):
# Read request signature and access id.
try:
diff --git a/keystone/middleware/s3_token.py b/keystone/middleware/s3_token.py
index 6e55001d5..97231e8f9 100644
--- a/keystone/middleware/s3_token.py
+++ b/keystone/middleware/s3_token.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -24,6 +22,9 @@
"""
S3 TOKEN MIDDLEWARE
+The S3 Token middleware is deprecated as of IceHouse. It's been moved into
+python-keystoneclient, `keystoneclient.middleware.s3_token`.
+
This WSGI component:
* Get a request from the swift3 middleware with an S3 Authorization
@@ -33,233 +34,23 @@ This WSGI component:
"""
-import httplib
-
-from six.moves import urllib
-import webob
-
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common import log
-
-
-PROTOCOL_NAME = 'S3 Token Authentication'
-LOG = log.getLogger(__name__)
-
+from keystoneclient.middleware import s3_token
-# TODO(kun): remove it after oslo merge this.
-def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
- """Validate and split the given HTTP request path.
+from keystone.openstack.common import versionutils
- **Examples**::
- ['a'] = split_path('/a')
- ['a', None] = split_path('/a', 1, 2)
- ['a', 'c'] = split_path('/a/c', 1, 2)
- ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
+PROTOCOL_NAME = s3_token.PROTOCOL_NAME
+split_path = s3_token.split_path
+ServiceError = s3_token.ServiceError
+filter_factory = s3_token.filter_factory
- :param path: HTTP Request path to be split
- :param minsegs: Minimum number of segments to be extracted
- :param maxsegs: Maximum number of segments to be extracted
- :param rest_with_last: If True, trailing data will be returned as part
- of last segment. If False, and there is
- trailing data, raises ValueError.
- :returns: list of segments with a length of maxsegs (non-existant
- segments will return as None)
- :raises: ValueError if given an invalid path
- """
- if not maxsegs:
- maxsegs = minsegs
- if minsegs > maxsegs:
- raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
- if rest_with_last:
- segs = path.split('/', maxsegs)
- minsegs += 1
- maxsegs += 1
- count = len(segs)
- if (segs[0] or count < minsegs or count > maxsegs or
- '' in segs[1:minsegs]):
- raise ValueError('Invalid path: %s' % urllib.parse.quote(path))
- else:
- minsegs += 1
- maxsegs += 1
- segs = path.split('/', maxsegs)
- count = len(segs)
- if (segs[0] or count < minsegs or count > maxsegs + 1 or
- '' in segs[1:minsegs] or
- (count == maxsegs + 1 and segs[maxsegs])):
- raise ValueError('Invalid path: %s' % urllib.parse.quote(path))
- segs = segs[1:maxsegs]
- segs.extend([None] * (maxsegs - 1 - len(segs)))
- return segs
+class S3Token(s3_token.S3Token):
-class ServiceError(Exception):
- pass
-
-
-class S3Token(object):
- """Auth Middleware that handles S3 authenticating client calls."""
-
+ @versionutils.deprecated(
+ versionutils.deprecated.ICEHOUSE,
+ in_favor_of='keystoneclient.middleware.s3_token',
+ remove_in=+1,
+ what='keystone.middleware.s3_token')
def __init__(self, app, conf):
- """Common initialization code."""
- self.app = app
- self.logger = LOG
- self.logger.debug('Starting the %s component' % PROTOCOL_NAME)
- self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
- # where to find the auth service (we use this to validate tokens)
- self.auth_host = conf.get('auth_host')
- self.auth_port = int(conf.get('auth_port', 35357))
- self.auth_protocol = conf.get('auth_protocol', 'https')
- if self.auth_protocol == 'http':
- self.http_client_class = httplib.HTTPConnection
- else:
- self.http_client_class = httplib.HTTPSConnection
- # SSL
- self.cert_file = conf.get('certfile')
- self.key_file = conf.get('keyfile')
-
- def deny_request(self, code):
- error_table = {
- 'AccessDenied': (401, 'Access denied'),
- 'InvalidURI': (400, 'Could not parse the specified URI'),
- }
- resp = webob.Response(content_type='text/xml')
- resp.status = error_table[code][0]
- resp.body = error_table[code][1]
- resp.body = ('<?xml version="1.0" encoding="UTF-8"?>\r\n'
- '<Error>\r\n <Code>%s</Code>\r\n '
- '<Message>%s</Message>\r\n</Error>\r\n' %
- (code, error_table[code][1]))
- return resp
-
- def _json_request(self, creds_json):
- headers = {'Content-Type': 'application/json'}
- if self.auth_protocol == 'http':
- conn = self.http_client_class(self.auth_host, self.auth_port)
- else:
- conn = self.http_client_class(self.auth_host,
- self.auth_port,
- self.key_file,
- self.cert_file)
- try:
- conn.request('POST', '/v2.0/s3tokens',
- body=creds_json,
- headers=headers)
- response = conn.getresponse()
- output = response.read()
- except Exception as e:
- self.logger.info('HTTP connection exception: %s' % e)
- resp = self.deny_request('InvalidURI')
- raise ServiceError(resp)
- finally:
- conn.close()
-
- if response.status < 200 or response.status >= 300:
- self.logger.debug('Keystone reply error: status=%s reason=%s' %
- (response.status, response.reason))
- resp = self.deny_request('AccessDenied')
- raise ServiceError(resp)
-
- return (response, output)
-
- def __call__(self, environ, start_response):
- """Handle incoming request. authenticate and send downstream."""
- req = webob.Request(environ)
- self.logger.debug('Calling S3Token middleware.')
-
- try:
- parts = split_path(req.path, 1, 4, True)
- version, account, container, obj = parts
- except ValueError:
- msg = 'Not a path query, skipping.'
- self.logger.debug(msg)
- return self.app(environ, start_response)
-
- # Read request signature and access id.
- if 'Authorization' not in req.headers:
- msg = 'No Authorization header. skipping.'
- self.logger.debug(msg)
- return self.app(environ, start_response)
-
- token = req.headers.get('X-Auth-Token',
- req.headers.get('X-Storage-Token'))
- if not token:
- msg = 'You did not specify a auth or a storage token. skipping.'
- self.logger.debug(msg)
- return self.app(environ, start_response)
-
- auth_header = req.headers['Authorization']
- try:
- access, signature = auth_header.split(' ')[-1].rsplit(':', 1)
- except ValueError:
- msg = 'You have an invalid Authorization header: %s'
- self.logger.debug(msg % (auth_header))
- return self.deny_request('InvalidURI')(environ, start_response)
-
- # NOTE(chmou): This is to handle the special case with nova
- # when we have the option s3_affix_tenant. We will force it to
- # connect to another account than the one
- # authenticated. Before people start getting worried about
- # security, I should point that we are connecting with
- # username/token specified by the user but instead of
- # connecting to its own account we will force it to go to an
- # another account. In a normal scenario if that user don't
- # have the reseller right it will just fail but since the
- # reseller account can connect to every account it is allowed
- # by the swift_auth middleware.
- force_tenant = None
- if ':' in access:
- access, force_tenant = access.split(':')
-
- # Authenticate request.
- creds = {'credentials': {'access': access,
- 'token': token,
- 'signature': signature}}
- creds_json = jsonutils.dumps(creds)
- self.logger.debug('Connecting to Keystone sending this JSON: %s' %
- creds_json)
- # NOTE(vish): We could save a call to keystone by having
- # keystone return token, tenant, user, and roles
- # from this call.
- #
- # NOTE(chmou): We still have the same problem we would need to
- # change token_auth to detect if we already
- # identified and not doing a second query and just
- # pass it through to swiftauth in this case.
- try:
- resp, output = self._json_request(creds_json)
- except ServiceError as e:
- resp = e.args[0]
- msg = 'Received error, exiting middleware with error: %s'
- self.logger.debug(msg % (resp.status))
- return resp(environ, start_response)
-
- self.logger.debug('Keystone Reply: Status: %d, Output: %s' % (
- resp.status, output))
-
- try:
- identity_info = jsonutils.loads(output)
- token_id = str(identity_info['access']['token']['id'])
- tenant = identity_info['access']['token']['tenant']
- except (ValueError, KeyError):
- error = 'Error on keystone reply: %d %s'
- self.logger.debug(error % (resp.status, str(output)))
- return self.deny_request('InvalidURI')(environ, start_response)
-
- req.headers['X-Auth-Token'] = token_id
- tenant_to_connect = force_tenant or tenant['id']
- self.logger.debug('Connecting with tenant: %s' % (tenant_to_connect))
- new_tenant_name = '%s%s' % (self.reseller_prefix, tenant_to_connect)
- environ['PATH_INFO'] = environ['PATH_INFO'].replace(account,
- new_tenant_name)
- return self.app(environ, start_response)
-
-
-def filter_factory(global_conf, **local_conf):
- """Returns a WSGI filter app for use with paste.deploy."""
- conf = global_conf.copy()
- conf.update(local_conf)
-
- def auth_filter(app):
- return S3Token(app, conf)
- return auth_filter
+ super(S3Token, self).__init__(app, conf)
diff --git a/keystone/notifications.py b/keystone/notifications.py
index 725bc8f86..84d37d0be 100644
--- a/keystone/notifications.py
+++ b/keystone/notifications.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -17,17 +15,35 @@
"""Notifications module for OpenStack Identity Service resources"""
import logging
+import socket
+
+from oslo.config import cfg
+from oslo import messaging
+import pycadf
+from pycadf import cadftaxonomy as taxonomy
+from pycadf import cadftype
+from pycadf import eventfactory
+from pycadf import resource
from keystone.openstack.common import log
-from keystone.openstack.common.notifier import api as notifier_api
+notifier_opts = [
+ cfg.StrOpt('default_publisher_id',
+ default=None,
+ help='Default publisher_id for outgoing notifications'),
+]
LOG = log.getLogger(__name__)
# NOTE(gyee): actions that can be notified. One must update this list whenever
# a new action is supported.
-ACTIONS = frozenset(['created', 'deleted', 'updated'])
+ACTIONS = frozenset(['created', 'deleted', 'disabled', 'updated'])
# resource types that can be notified
SUBSCRIBERS = {}
+_notifier = None
+
+
+CONF = cfg.CONF
+CONF.register_opts(notifier_opts)
class ManagerNotificationWrapper(object):
@@ -36,13 +52,19 @@ class ManagerNotificationWrapper(object):
Sends a notification if the wrapped Manager method does not raise an
``Exception`` (such as ``keystone.exception.NotFound``).
+ :param operation: one of the values from ACTIONS
:param resource_type: type of resource being affected
- :param host: host of the resource (optional)
+ :param public: If True (default), the event will be sent to the notifier
+ API. If False, the event will only be sent via
+ notify_event_callbacks to in process listeners
+
"""
- def __init__(self, operation, resource_type, host=None):
+ def __init__(self, operation, resource_type, public=True,
+ resource_id_arg_index=1):
self.operation = operation
self.resource_type = resource_type
- self.host = host
+ self.public = public
+ self.resource_id_arg_index = resource_id_arg_index
def __call__(self, f):
def wrapper(*args, **kwargs):
@@ -52,11 +74,12 @@ class ManagerNotificationWrapper(object):
except Exception:
raise
else:
+ resource_id = args[self.resource_id_arg_index]
_send_notification(
self.operation,
self.resource_type,
- args[1], # f(self, resource_id, ...)
- self.host)
+ resource_id,
+ public=self.public)
return result
return wrapper
@@ -72,6 +95,11 @@ def updated(*args, **kwargs):
return ManagerNotificationWrapper('updated', *args, **kwargs)
+def disabled(*args, **kwargs):
+ """Decorator to send notifications when an object is disabled."""
+ return ManagerNotificationWrapper('disabled', *args, **kwargs)
+
+
def deleted(*args, **kwargs):
"""Decorator to send notifications for ``Manager.delete_*`` methods."""
return ManagerNotificationWrapper('deleted', *args, **kwargs)
@@ -131,7 +159,34 @@ def notify_event_callbacks(service, resource_type, operation, payload):
cb(service, resource_type, operation, payload)
-def _send_notification(operation, resource_type, resource_id, host=None):
+def _get_notifier():
+ """Return a notifier object.
+
+ If _notifier is None it means that a notifier object has not been set.
+ If _notifier is False it means that a notifier has previously failed to
+ construct.
+ Otherwise it is a constructed Notifier object.
+ """
+ global _notifier
+
+ if _notifier is None:
+ host = CONF.default_publisher_id or socket.gethostname()
+ try:
+ transport = messaging.get_transport(CONF)
+ _notifier = messaging.Notifier(transport, "identity.%s" % host)
+ except Exception:
+ LOG.exception("Failed to construct notifier")
+ _notifier = False
+
+ return _notifier
+
+
+def _reset_notifier():
+ global _notifier
+ _notifier = None
+
+
+def _send_notification(operation, resource_type, resource_id, public=True):
"""Send notification to inform observers about the affected resource.
This method doesn't raise an exception when sending the notification fails.
@@ -139,12 +194,14 @@ def _send_notification(operation, resource_type, resource_id, host=None):
:param operation: operation being performed (created, updated, or deleted)
:param resource_type: type of resource being operated on
:param resource_id: ID of resource being operated on
- :param host: resource host
+ :param public: if True (default), the event will be sent
+ to the notifier API.
+ if False, the event will only be sent via
+ notify_event_callbacks to in process listeners.
"""
context = {}
payload = {'resource_info': resource_id}
service = 'identity'
- publisher_id = notifier_api.publisher_id(service, host=host)
event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
'service': service,
'resource_type': resource_type,
@@ -152,10 +209,99 @@ def _send_notification(operation, resource_type, resource_id, host=None):
notify_event_callbacks(service, resource_type, operation, payload)
- try:
- notifier_api.notify(
- context, publisher_id, event_type, notifier_api.INFO, payload)
- except Exception:
- LOG.exception(
- _('Failed to send %(res_id)s %(event_type)s notification'),
- {'res_id': resource_id, 'event_type': event_type})
+ if public:
+ notifier = _get_notifier()
+ if notifier:
+ try:
+ notifier.info(context, event_type, payload)
+ except Exception:
+ LOG.exception(_(
+ 'Failed to send %(res_id)s %(event_type)s notification'),
+ {'res_id': resource_id, 'event_type': event_type})
+
+
+class CadfNotificationWrapper(object):
+ """Send CADF event notifications for various methods.
+
+ Sends CADF notifications for events such as whether an authentication was
+ successful or not.
+
+ """
+
+ def __init__(self, action):
+ self.action = action
+
+ def __call__(self, f):
+ def wrapper(wrapped_self, context, user_id, *args, **kwargs):
+ """Always send a notification."""
+
+ remote_addr = None
+ http_user_agent = None
+ environment = context.get('environment')
+
+ if environment:
+ remote_addr = environment.get('REMOTE_ADDR')
+ http_user_agent = environment.get('HTTP_USER_AGENT')
+
+ host = pycadf.host.Host(address=remote_addr, agent=http_user_agent)
+ initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER,
+ name=user_id, host=host)
+
+ _send_audit_notification(self.action, initiator,
+ taxonomy.OUTCOME_PENDING)
+ try:
+ result = f(wrapped_self, context, user_id, *args, **kwargs)
+ except Exception:
+ # For authentication failure send a cadf event as well
+ _send_audit_notification(self.action, initiator,
+ taxonomy.OUTCOME_FAILURE)
+ raise
+ else:
+ _send_audit_notification(self.action, initiator,
+ taxonomy.OUTCOME_SUCCESS)
+ return result
+
+ return wrapper
+
+
+def _send_audit_notification(action, initiator, outcome):
+ """Send CADF notification to inform observers about the affected resource.
+
+ This method logs an exception when sending the notification fails.
+
+ :param action: CADF action being audited (e.g., 'authenticate')
+ :param initiator: CADF resource representing the initiator
+ :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING,
+ taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE)
+
+ """
+
+ event = eventfactory.EventFactory().new_event(
+ eventType=cadftype.EVENTTYPE_ACTIVITY,
+ outcome=outcome,
+ action=action,
+ initiator=initiator,
+ target=resource.Resource(typeURI=taxonomy.ACCOUNT_USER),
+ observer=resource.Resource(typeURI='service/security'))
+
+ context = {}
+ payload = event.as_dict()
+ LOG.debug(_('CADF Event: %s'), payload)
+ service = 'identity'
+ event_type = '%(service)s.%(action)s' % {'service': service,
+ 'action': action}
+
+ notifier = _get_notifier()
+
+ if notifier:
+ try:
+ notifier.info(context, event_type, payload)
+ except Exception:
+ # diaper defense: any exception that occurs while emitting the
+ # notification should not interfere with the API request
+ LOG.exception(_(
+ 'Failed to send %(action)s %(event_type)s notification'),
+ {'action': action, 'event_type': event_type})
+
+
+emit_event = CadfNotificationWrapper
diff --git a/keystone/openstack/common/config/__init__.py b/keystone/openstack/common/config/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/openstack/common/config/__init__.py
diff --git a/keystone/openstack/common/config/generator.py b/keystone/openstack/common/config/generator.py
new file mode 100644
index 000000000..81bcec25c
--- /dev/null
+++ b/keystone/openstack/common/config/generator.py
@@ -0,0 +1,293 @@
+# Copyright 2012 SINA Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+"""Extracts OpenStack config option info from module(s)."""
+
+from __future__ import print_function
+
+import argparse
+import imp
+import os
+import re
+import socket
+import sys
+import textwrap
+
+from oslo.config import cfg
+import six
+import stevedore.named
+
+from keystone.openstack.common import gettextutils
+from keystone.openstack.common import importutils
+
+gettextutils.install('keystone')
+
+STROPT = "StrOpt"
+BOOLOPT = "BoolOpt"
+INTOPT = "IntOpt"
+FLOATOPT = "FloatOpt"
+LISTOPT = "ListOpt"
+MULTISTROPT = "MultiStrOpt"
+
+OPT_TYPES = {
+ STROPT: 'string value',
+ BOOLOPT: 'boolean value',
+ INTOPT: 'integer value',
+ FLOATOPT: 'floating point value',
+ LISTOPT: 'list value',
+ MULTISTROPT: 'multi valued',
+}
+
+OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
+ FLOATOPT, LISTOPT,
+ MULTISTROPT]))
+
+PY_EXT = ".py"
+BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ "../../../../"))
+WORDWRAP_WIDTH = 60
+
+
+def generate(argv):
+ parser = argparse.ArgumentParser(
+ description='generate sample configuration file',
+ )
+ parser.add_argument('-m', dest='modules', action='append')
+ parser.add_argument('-l', dest='libraries', action='append')
+ parser.add_argument('srcfiles', nargs='*')
+ parsed_args = parser.parse_args(argv)
+
+ mods_by_pkg = dict()
+ for filepath in parsed_args.srcfiles:
+ pkg_name = filepath.split(os.sep)[1]
+ mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
+ os.path.basename(filepath).split('.')[0]])
+ mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
+ # NOTE(lzyeval): place top level modules before packages
+ pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
+ ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
+ pkg_names.extend(ext_names)
+
+ # opts_by_group is a mapping of group name to an options list
+ # The options list is a list of (module, options) tuples
+ opts_by_group = {'DEFAULT': []}
+
+ if parsed_args.modules:
+ for module_name in parsed_args.modules:
+ module = _import_module(module_name)
+ if module:
+ for group, opts in _list_opts(module):
+ opts_by_group.setdefault(group, []).append((module_name,
+ opts))
+
+ # Look for entry points defined in libraries (or applications) for
+ # option discovery, and include their return values in the output.
+ #
+ # Each entry point should be a function returning an iterable
+ # of pairs with the group name (or None for the default group)
+ # and the list of Opt instances for that group.
+ if parsed_args.libraries:
+ loader = stevedore.named.NamedExtensionManager(
+ 'oslo.config.opts',
+ names=list(set(parsed_args.libraries)),
+ invoke_on_load=False,
+ )
+ for ext in loader:
+ for group, opts in ext.plugin():
+ opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
+ opt_list.append((ext.name, opts))
+
+ for pkg_name in pkg_names:
+ mods = mods_by_pkg.get(pkg_name)
+ mods.sort()
+ for mod_str in mods:
+ if mod_str.endswith('.__init__'):
+ mod_str = mod_str[:mod_str.rfind(".")]
+
+ mod_obj = _import_module(mod_str)
+ if not mod_obj:
+ raise RuntimeError("Unable to import module %s" % mod_str)
+
+ for group, opts in _list_opts(mod_obj):
+ opts_by_group.setdefault(group, []).append((mod_str, opts))
+
+ print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
+ for group in sorted(opts_by_group.keys()):
+ print_group_opts(group, opts_by_group[group])
+
+
+def _import_module(mod_str):
+ try:
+ if mod_str.startswith('bin.'):
+ imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
+ return sys.modules[mod_str[4:]]
+ else:
+ return importutils.import_module(mod_str)
+ except Exception as e:
+ sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
+ return None
+
+
+def _is_in_group(opt, group):
+ "Check if opt is in group."
+ for value in group._opts.values():
+ # NOTE(llu): Temporary workaround for bug #1262148, wait until
+ # newly released oslo.config support '==' operator.
+ if not(value['opt'] != opt):
+ return True
+ return False
+
+
+def _guess_groups(opt, mod_obj):
+ # is it in the DEFAULT group?
+ if _is_in_group(opt, cfg.CONF):
+ return 'DEFAULT'
+
+ # what other groups is it in?
+ for value in cfg.CONF.values():
+ if isinstance(value, cfg.CONF.GroupAttr):
+ if _is_in_group(opt, value._group):
+ return value._group.name
+
+ raise RuntimeError(
+ "Unable to find group for option %s, "
+ "maybe it's defined twice in the same group?"
+ % opt.name
+ )
+
+
+def _list_opts(obj):
+ def is_opt(o):
+ return (isinstance(o, cfg.Opt) and
+ not isinstance(o, cfg.SubCommandOpt))
+
+ opts = list()
+ for attr_str in dir(obj):
+ attr_obj = getattr(obj, attr_str)
+ if is_opt(attr_obj):
+ opts.append(attr_obj)
+ elif (isinstance(attr_obj, list) and
+ all(map(lambda x: is_opt(x), attr_obj))):
+ opts.extend(attr_obj)
+
+ ret = {}
+ for opt in opts:
+ ret.setdefault(_guess_groups(opt, obj), []).append(opt)
+ return ret.items()
+
+
+def print_group_opts(group, opts_by_module):
+ print("[%s]" % group)
+ print('')
+ for mod, opts in opts_by_module:
+ print('#')
+ print('# Options defined in %s' % mod)
+ print('#')
+ print('')
+ for opt in opts:
+ _print_opt(opt)
+ print('')
+
+
+def _get_my_ip():
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.error:
+ return None
+
+
+def _sanitize_default(name, value):
+ """Set up a reasonably sensible default for pybasedir, my_ip and host."""
+ if value.startswith(sys.prefix):
+ # NOTE(jd) Don't use os.path.join, because it is likely to think the
+ # second part is an absolute pathname and therefore drop the first
+ # part.
+ value = os.path.normpath("/usr/" + value[len(sys.prefix):])
+ elif value.startswith(BASEDIR):
+ return value.replace(BASEDIR, '/usr/lib/python/site-packages')
+ elif BASEDIR in value:
+ return value.replace(BASEDIR, '')
+ elif value == _get_my_ip():
+ return '10.0.0.1'
+ elif value == socket.gethostname() and 'host' in name:
+ return 'keystone'
+ elif value.strip() != value:
+ return '"%s"' % value
+ return value
+
+
+def _print_opt(opt):
+ opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
+ if not opt_help:
+ sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
+ opt_help = ""
+ opt_type = None
+ try:
+ opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
+ except (ValueError, AttributeError) as err:
+ sys.stderr.write("%s\n" % str(err))
+ sys.exit(1)
+ opt_help += ' (' + OPT_TYPES[opt_type] + ')'
+ print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
+ if opt.deprecated_opts:
+ for deprecated_opt in opt.deprecated_opts:
+ if deprecated_opt.name:
+ deprecated_group = (deprecated_opt.group if
+ deprecated_opt.group else "DEFAULT")
+ print('# Deprecated group/name - [%s]/%s' %
+ (deprecated_group,
+ deprecated_opt.name))
+ try:
+ if opt_default is None:
+ print('#%s=<None>' % opt_name)
+ elif opt_type == STROPT:
+ assert(isinstance(opt_default, six.string_types))
+ print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
+ opt_default)))
+ elif opt_type == BOOLOPT:
+ assert(isinstance(opt_default, bool))
+ print('#%s=%s' % (opt_name, str(opt_default).lower()))
+ elif opt_type == INTOPT:
+ assert(isinstance(opt_default, int) and
+ not isinstance(opt_default, bool))
+ print('#%s=%s' % (opt_name, opt_default))
+ elif opt_type == FLOATOPT:
+ assert(isinstance(opt_default, float))
+ print('#%s=%s' % (opt_name, opt_default))
+ elif opt_type == LISTOPT:
+ assert(isinstance(opt_default, list))
+ print('#%s=%s' % (opt_name, ','.join(opt_default)))
+ elif opt_type == MULTISTROPT:
+ assert(isinstance(opt_default, list))
+ if not opt_default:
+ opt_default = ['']
+ for default in opt_default:
+ print('#%s=%s' % (opt_name, default))
+ print('')
+ except Exception:
+ sys.stderr.write('Error in option "%s"\n' % opt_name)
+ sys.exit(1)
+
+
+def main():
+ generate(sys.argv[1:])
+
+if __name__ == '__main__':
+ main()
diff --git a/keystone/openstack/common/db/exception.py b/keystone/openstack/common/db/exception.py
index b57657fb8..0aa7df053 100644
--- a/keystone/openstack/common/db/exception.py
+++ b/keystone/openstack/common/db/exception.py
@@ -16,6 +16,8 @@
"""DB related custom exceptions."""
+import six
+
from keystone.openstack.common.gettextutils import _
@@ -23,7 +25,7 @@ class DBError(Exception):
"""Wraps an implementation specific exception."""
def __init__(self, inner_exception=None):
self.inner_exception = inner_exception
- super(DBError, self).__init__(str(inner_exception))
+ super(DBError, self).__init__(six.text_type(inner_exception))
class DBDuplicateEntry(DBError):
@@ -46,7 +48,7 @@ class DBInvalidUnicodeParameter(Exception):
class DbMigrationError(DBError):
"""Wraps migration specific exception."""
def __init__(self, message=None):
- super(DbMigrationError, self).__init__(str(message))
+ super(DbMigrationError, self).__init__(message)
class DBConnectionError(DBError):
diff --git a/keystone/openstack/common/db/sqlalchemy/session.py b/keystone/openstack/common/db/sqlalchemy/session.py
index 563f0ecf0..10701fa38 100644
--- a/keystone/openstack/common/db/sqlalchemy/session.py
+++ b/keystone/openstack/common/db/sqlalchemy/session.py
@@ -18,10 +18,12 @@
Initializing:
-* Call set_defaults with the minimal of the following kwargs:
- sql_connection, sqlite_db
+* Call `set_defaults()` with the minimal of the following kwargs:
+ ``sql_connection``, ``sqlite_db``
- Example::
+ Example:
+
+ .. code:: python
session.set_defaults(
sql_connection="sqlite:///var/lib/keystone/sqlite.db",
@@ -29,20 +31,22 @@ Initializing:
Recommended ways to use sessions within this framework:
-* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
- model_query() will implicitly use a session when called without one
+* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
+ `model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
- Note: Automatic retry will be enabled in a future patch.
+ .. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
- the code around quotas and reservation_rollback().
+ the code around quotas and `reservation_rollback()`.
+
+ Examples:
- Examples::
+ .. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
@@ -61,27 +65,26 @@ Recommended ways to use sessions within this framework:
return foo_ref
-* Within the scope of a single method, keeping all the reads and writes within
- the context managed by a single session. In this way, the session's __exit__
- handler will take care of calling flush() and commit() for you.
- If using this approach, you should not explicitly call flush() or commit().
- Any error within the context of the session will cause the session to emit
- a ROLLBACK. Database Errors like IntegrityError will be raised in
- session's __exit__ handler, and any try/except within the context managed
- by session will not be triggered. And catching other non-database errors in
- the session will not trigger the ROLLBACK, so exception handlers should
- always be outside the session, unless the developer wants to do a partial
- commit on purpose. If the connection is dropped before this is possible,
- the database will implicitly roll back the transaction.
+* Within the scope of a single method, keep all the reads and writes within
+ the context managed by a single session. In this way, the session's
+ `__exit__` handler will take care of calling `flush()` and `commit()` for
+ you. If using this approach, you should not explicitly call `flush()` or
+ `commit()`. Any error within the context of the session will cause the
+ session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
+ raised in `session`'s `__exit__` handler, and any try/except within the
+ context managed by `session` will not be triggered. And catching other
+ non-database errors in the session will not trigger the ROLLBACK, so
+ exception handlers should always be outside the session, unless the
+ developer wants to do a partial commit on purpose. If the connection is
+ dropped before this is possible, the database will implicitly roll back the
+ transaction.
- Note: statements in the session scope will not be automatically retried.
+ .. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
- do not need to call model.save()
+ do not need to call `model.save()`:
- ::
-
- ::
+ .. code:: python
def create_many_foo(context, foos):
session = get_session()
@@ -101,11 +104,16 @@ Recommended ways to use sessions within this framework:
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
- Note: update_bar is a trivially simple example of using "with session.begin".
- Whereas create_many_foo is a good example of when a transaction is needed,
- it is always best to use as few queries as possible. The two queries in
- update_bar can be better expressed using a single query which avoids
- the need for an explicit transaction. It can be expressed like so::
+ .. note:: `update_bar` is a trivially simple example of using
+ ``with session.begin``. Whereas `create_many_foo` is a good example of
+ when a transaction is needed, it is always best to use as few queries as
+ possible.
+
+ The two queries in `update_bar` can be better expressed using a single query
+ which avoids the need for an explicit transaction. It can be expressed like
+ so:
+
+ .. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
@@ -121,10 +129,12 @@ Recommended ways to use sessions within this framework:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
- Note: create_duplicate_foo is a trivially simple example of catching an
- exception while using "with session.begin". Here create two duplicate
- instances with same primary key, must catch the exception out of context
- managed by a single session:
+ .. note:: `create_duplicate_foo` is a trivially simple example of catching an
+ exception while using ``with session.begin``. Here create two duplicate
+ instances with same primary key, must catch the exception out of context
+ managed by a single session:
+
+ .. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
@@ -140,7 +150,7 @@ Recommended ways to use sessions within this framework:
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
- SQLAlchemy will throw an error when you call session.begin() on an existing
+ SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
@@ -153,7 +163,7 @@ Recommended ways to use sessions within this framework:
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
- ::
+ .. code:: python
def myfunc(foo):
session = get_session()
@@ -174,13 +184,13 @@ There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
- This means that your "with session.begin()" block should be as short
+ This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
-* Avoid "with_lockmode('UPDATE')" when possible.
+* Avoid ``with_lockmode('UPDATE')`` when possible.
- In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
+ In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
@@ -191,15 +201,18 @@ There are some things which it is best to avoid:
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
- The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
+ The better long-term solution is to use
+ ``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
-* To use/enable soft-deletes, the SoftDeleteMixin must be added
- to your model class. For example::
+* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
+ to your model class. For example:
+
+ .. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
@@ -207,15 +220,16 @@ Enabling soft deletes:
Efficient use of soft deletes:
-* There are two possible ways to mark a record as deleted::
+* There are two possible ways to mark a record as deleted:
+ `model.soft_delete()` and `query.soft_delete()`.
- model.soft_delete() and query.soft_delete().
+ The `model.soft_delete()` method works with a single already-fetched entry.
+ `query.soft_delete()` makes only one db request for all entries that
+ correspond to the query.
- model.soft_delete() method works with single already fetched entry.
- query.soft_delete() makes only one db request for all entries that correspond
- to query.
+* In almost all cases you should use `query.soft_delete()`. Some examples:
-* In almost all cases you should use query.soft_delete(). Some examples::
+ .. code:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
@@ -234,11 +248,11 @@ Efficient use of soft deletes:
if count == 0:
raise Exception("0 entries were soft deleted")
-* There is only one situation where model.soft_delete() is appropriate: when
+* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
- ::
+ .. code:: python
def soft_delete_bar_model():
session = get_session()
@@ -248,7 +262,9 @@ Efficient use of soft deletes:
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
- then soft delete them you should use query.soft_delete() method::
+ then soft delete them you should use the `query.soft_delete()` method:
+
+ .. code:: python
def soft_delete_multi_models():
session = get_session()
@@ -262,17 +278,19 @@ Efficient use of soft deletes:
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
- which issues a single query. Using model.soft_delete(), as in the following
+ which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
- ::
+ .. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
+
"""
import functools
+import logging
import os.path
import re
import time
@@ -287,16 +305,15 @@ from sqlalchemy.sql.expression import literal_column
from keystone.openstack.common.db import exception
from keystone.openstack.common.gettextutils import _
-from keystone.openstack.common import log as logging
from keystone.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='keystone.sqlite',
- help='the filename to use with sqlite'),
+ help='The file name to use with SQLite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
- help='If true, use synchronous mode for sqlite'),
+ help='If True, SQLite uses synchronous mode'),
]
database_opts = [
@@ -326,7 +343,7 @@ database_opts = [
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
- help='timeout before idle sql connections are reaped'),
+ help='Timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
@@ -349,7 +366,7 @@ database_opts = [
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
- help='maximum db connection retries during startup. '
+ help='Maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
@@ -357,7 +374,7 @@ database_opts = [
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
- help='interval between retries of opening a sql connection'),
+ help='Interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
@@ -687,7 +704,7 @@ def _is_db_connection_error(args):
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
- conn_err_codes = ('2002', '2003', '2006', '-30081')
+ conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
@@ -739,13 +756,16 @@ def create_engine(sql_connection, sqlite_fk=False,
if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback)
- if mysql_traditional_mode:
- sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
- else:
- LOG.warning(_("This application has not enabled MySQL traditional"
- " mode, which means silent data corruption may"
- " occur. Please encourage the application"
- " developers to enable this mode."))
+ if engine.name == 'mysql':
+ if mysql_traditional_mode:
+ sqlalchemy.event.listen(engine, 'checkout',
+ _set_mode_traditional)
+ else:
+ LOG.warning(_("This application has not enabled MySQL "
+ "traditional mode, which means silent "
+ "data corruption may occur. "
+ "Please encourage the application "
+ "developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
diff --git a/keystone/openstack/common/fixture/config.py b/keystone/openstack/common/fixture/config.py
index 0bf90ff7a..9489b85a5 100644
--- a/keystone/openstack/common/fixture/config.py
+++ b/keystone/openstack/common/fixture/config.py
@@ -14,22 +14,17 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import fixtures
from oslo.config import cfg
import six
class Config(fixtures.Fixture):
- """Override some configuration values.
-
- The keyword arguments are the names of configuration options to
- override and their values.
+ """Allows overriding configuration settings for the test.
- If a group argument is supplied, the overrides are applied to
- the specified configuration option group.
+ `conf` will be reset on cleanup.
- All overrides are automatically cleared at the end of the current
- test by the reset() method, which is registered by addCleanup().
"""
def __init__(self, conf=cfg.CONF):
@@ -37,9 +32,54 @@ class Config(fixtures.Fixture):
def setUp(self):
super(Config, self).setUp()
+ # NOTE(morganfainberg): unregister must be added to cleanup before
+ # reset is because cleanup works in reverse order of registered items,
+ # and a reset must occur before unregistering options can occur.
+ self.addCleanup(self._unregister_config_opts)
self.addCleanup(self.conf.reset)
+ self._registered_config_opts = {}
def config(self, **kw):
+ """Override configuration values.
+
+ The keyword arguments are the names of configuration options to
+ override and their values.
+
+ If a `group` argument is supplied, the overrides are applied to
+ the specified configuration option group, otherwise the overrides
+ are applied to the ``default`` group.
+
+ """
+
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
+
+ def _unregister_config_opts(self):
+ for group in self._registered_config_opts:
+ self.conf.unregister_opts(self._registered_config_opts[group],
+ group=group)
+
+ def register_opt(self, opt, group=None):
+ """Register a single option for the test run.
+
+ Options registered in this manner will automatically be unregistered
+ during cleanup.
+
+ If a `group` argument is supplied, it will register the new option
+ to that group, otherwise the option is registered to the ``default``
+ group.
+ """
+ self.conf.register_opt(opt, group=group)
+ self._registered_config_opts.setdefault(group, set()).add(opt)
+
+ def register_opts(self, opts, group=None):
+ """Register multiple options for the test run.
+
+ This works in the same manner as register_opt() but takes a list of
+ options as the first argument. All arguments will be registered to the
+ same group if the ``group`` argument is supplied, otherwise all options
+ will be registered to the ``default`` group.
+ """
+ for opt in opts:
+ self.register_opt(opt, group=group)
diff --git a/keystone/openstack/common/fixture/lockutils.py b/keystone/openstack/common/fixture/lockutils.py
index 3e18bbf3f..68185ea37 100644
--- a/keystone/openstack/common/fixture/lockutils.py
+++ b/keystone/openstack/common/fixture/lockutils.py
@@ -15,7 +15,7 @@
import fixtures
-from keystone.openstack.common.lockutils import lock
+from keystone.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
@@ -43,7 +43,7 @@ class LockFixture(fixtures.Fixture):
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
- self.mgr = lock(name, lock_file_prefix, True)
+ self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
diff --git a/keystone/openstack/common/fixture/mockpatch.py b/keystone/openstack/common/fixture/mockpatch.py
index 858e77cd0..a8ffeb370 100644
--- a/keystone/openstack/common/fixture/mockpatch.py
+++ b/keystone/openstack/common/fixture/mockpatch.py
@@ -22,14 +22,15 @@ import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
- def __init__(self, obj, attr, **kwargs):
+ def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
+ self.new = new
def setUp(self):
super(PatchObject, self).setUp()
- _p = mock.patch.object(self.obj, self.attr, **self.kwargs)
+ _p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
@@ -38,12 +39,13 @@ class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
- def __init__(self, obj, **kwargs):
+ def __init__(self, obj, new=mock.DEFAULT, **kwargs):
self.obj = obj
self.kwargs = kwargs
+ self.new = new
def setUp(self):
super(Patch, self).setUp()
- _p = mock.patch(self.obj, **self.kwargs)
+ _p = mock.patch(self.obj, self.new, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
diff --git a/keystone/openstack/common/log.py b/keystone/openstack/common/log.py
index 1fbc4a538..5d68b12c3 100644
--- a/keystone/openstack/common/log.py
+++ b/keystone/openstack/common/log.py
@@ -41,7 +41,7 @@ from oslo.config import cfg
import six
from six import moves
-from keystone.openstack.common.gettextutils import _ # noqa
+from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import importutils
from keystone.openstack.common import jsonutils
from keystone.openstack.common import local
@@ -391,9 +391,11 @@ class JSONFormatter(logging.Formatter):
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
- if CONF.verbose:
+ if CONF.verbose or CONF.debug:
extra['exc_info'] = (exc_type, value, tb)
- getLogger(product_name).critical(str(value), **extra)
+ getLogger(product_name).critical(
+ "".join(traceback.format_exception_only(exc_type, value)),
+ **extra)
return logging_excepthook
@@ -543,7 +545,7 @@ class WritableLogger(object):
self.level = level
def write(self, msg):
- self.logger.log(self.level, msg)
+ self.logger.log(self.level, msg.rstrip())
class ContextFormatter(logging.Formatter):
@@ -561,7 +563,7 @@ class ContextFormatter(logging.Formatter):
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
- # NOTE(sdague): default the fancier formating params
+ # NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
@@ -577,7 +579,7 @@ class ContextFormatter(logging.Formatter):
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
- # Cache this on the record, Logger will respect our formated copy
+ # Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
diff --git a/keystone/openstack/common/log_handler.py b/keystone/openstack/common/log_handler.py
deleted file mode 100644
index ea25d44a8..000000000
--- a/keystone/openstack/common/log_handler.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import logging
-
-from keystone.openstack.common import notifier
-
-from oslo.config import cfg
-
-
-class PublishErrorsHandler(logging.Handler):
- def emit(self, record):
- if ('keystone.openstack.common.notifier.log_notifier' in
- cfg.CONF.notification_driver):
- return
- notifier.api.notify(None, 'error.publisher',
- 'error_notification',
- notifier.api.ERROR,
- dict(error=record.msg))
diff --git a/keystone/openstack/common/notifier/__init__.py b/keystone/openstack/common/notifier/__init__.py
deleted file mode 100644
index 45c3b46ae..000000000
--- a/keystone/openstack/common/notifier/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/openstack/common/notifier/api.py b/keystone/openstack/common/notifier/api.py
deleted file mode 100644
index 51eb7eaed..000000000
--- a/keystone/openstack/common/notifier/api.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import socket
-import uuid
-
-from oslo.config import cfg
-
-from keystone.openstack.common import context
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import importutils
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common import log as logging
-from keystone.openstack.common import timeutils
-
-
-LOG = logging.getLogger(__name__)
-
-notifier_opts = [
- cfg.MultiStrOpt('notification_driver',
- default=[],
- help='Driver or drivers to handle sending notifications'),
- cfg.StrOpt('default_notification_level',
- default='INFO',
- help='Default notification level for outgoing notifications'),
- cfg.StrOpt('default_publisher_id',
- default=None,
- help='Default publisher_id for outgoing notifications'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(notifier_opts)
-
-WARN = 'WARN'
-INFO = 'INFO'
-ERROR = 'ERROR'
-CRITICAL = 'CRITICAL'
-DEBUG = 'DEBUG'
-
-log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
-
-
-class BadPriorityException(Exception):
- pass
-
-
-def notify_decorator(name, fn):
- """Decorator for notify which is used from utils.monkey_patch().
-
- :param name: name of the function
- :param function: - object of the function
- :returns: function -- decorated function
-
- """
- def wrapped_func(*args, **kwarg):
- body = {}
- body['args'] = []
- body['kwarg'] = {}
- for arg in args:
- body['args'].append(arg)
- for key in kwarg:
- body['kwarg'][key] = kwarg[key]
-
- ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
- notify(ctxt,
- CONF.default_publisher_id or socket.gethostname(),
- name,
- CONF.default_notification_level,
- body)
- return fn(*args, **kwarg)
- return wrapped_func
-
-
-def publisher_id(service, host=None):
- if not host:
- try:
- host = CONF.host
- except AttributeError:
- host = CONF.default_publisher_id or socket.gethostname()
- return "%s.%s" % (service, host)
-
-
-def notify(context, publisher_id, event_type, priority, payload):
- """Sends a notification using the specified driver
-
- :param publisher_id: the source worker_type.host of the message
- :param event_type: the literal type of event (ex. Instance Creation)
- :param priority: patterned after the enumeration of Python logging
- levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
- :param payload: A python dictionary of attributes
-
- Outgoing message format includes the above parameters, and appends the
- following:
-
- message_id
- a UUID representing the id for this notification
-
- timestamp
- the GMT timestamp the notification was sent at
-
- The composite message will be constructed as a dictionary of the above
- attributes, which will then be sent via the transport mechanism defined
- by the driver.
-
- Message example::
-
- {'message_id': str(uuid.uuid4()),
- 'publisher_id': 'compute.host1',
- 'timestamp': timeutils.utcnow(),
- 'priority': 'WARN',
- 'event_type': 'compute.create_instance',
- 'payload': {'instance_id': 12, ... }}
-
- """
- if priority not in log_levels:
- raise BadPriorityException(
- _('%s not in valid priorities') % priority)
-
- # Ensure everything is JSON serializable.
- payload = jsonutils.to_primitive(payload, convert_instances=True)
-
- msg = dict(message_id=str(uuid.uuid4()),
- publisher_id=publisher_id,
- event_type=event_type,
- priority=priority,
- payload=payload,
- timestamp=str(timeutils.utcnow()))
-
- for driver in _get_drivers():
- try:
- driver.notify(context, msg)
- except Exception as e:
- LOG.exception(_("Problem '%(e)s' attempting to "
- "send to notification system. "
- "Payload=%(payload)s")
- % dict(e=e, payload=payload))
-
-
-_drivers = None
-
-
-def _get_drivers():
- """Instantiate, cache, and return drivers based on the CONF."""
- global _drivers
- if _drivers is None:
- _drivers = {}
- for notification_driver in CONF.notification_driver:
- try:
- driver = importutils.import_module(notification_driver)
- _drivers[notification_driver] = driver
- except ImportError:
- LOG.exception(_("Failed to load notifier %s. "
- "These notifications will not be sent.") %
- notification_driver)
- return _drivers.values()
-
-
-def _reset_drivers():
- """Used by unit tests to reset the drivers."""
- global _drivers
- _drivers = None
diff --git a/keystone/openstack/common/notifier/log_notifier.py b/keystone/openstack/common/notifier/log_notifier.py
deleted file mode 100644
index fcf1f98e0..000000000
--- a/keystone/openstack/common/notifier/log_notifier.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common import log as logging
-
-
-CONF = cfg.CONF
-
-
-def notify(_context, message):
- """Notifies the recipient of the desired event given the model.
-
- Log notifications using OpenStack's default logging system.
- """
-
- priority = message.get('priority',
- CONF.default_notification_level)
- priority = priority.lower()
- logger = logging.getLogger(
- 'keystone.openstack.common.notification.%s' %
- message['event_type'])
- getattr(logger, priority)(jsonutils.dumps(message))
diff --git a/keystone/openstack/common/notifier/no_op_notifier.py b/keystone/openstack/common/notifier/no_op_notifier.py
deleted file mode 100644
index 13d946e36..000000000
--- a/keystone/openstack/common/notifier/no_op_notifier.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def notify(_context, message):
- """Notifies the recipient of the desired event given the model."""
- pass
diff --git a/keystone/openstack/common/notifier/rpc_notifier.py b/keystone/openstack/common/notifier/rpc_notifier.py
deleted file mode 100644
index 30b89f21c..000000000
--- a/keystone/openstack/common/notifier/rpc_notifier.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo.config import cfg
-
-from keystone.openstack.common import context as req_context
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import log as logging
-from keystone.openstack.common import rpc
-
-LOG = logging.getLogger(__name__)
-
-notification_topic_opt = cfg.ListOpt(
- 'notification_topics', default=['notifications', ],
- help='AMQP topic used for OpenStack notifications')
-
-CONF = cfg.CONF
-CONF.register_opt(notification_topic_opt)
-
-
-def notify(context, message):
- """Sends a notification via RPC."""
- if not context:
- context = req_context.get_admin_context()
- priority = message.get('priority',
- CONF.default_notification_level)
- priority = priority.lower()
- for topic in CONF.notification_topics:
- topic = '%s.%s' % (topic, priority)
- try:
- rpc.notify(context, topic, message)
- except Exception:
- LOG.exception(_("Could not send notification to %(topic)s. "
- "Payload=%(message)s"),
- {"topic": topic, "message": message})
diff --git a/keystone/openstack/common/notifier/rpc_notifier2.py b/keystone/openstack/common/notifier/rpc_notifier2.py
deleted file mode 100644
index 3d11644d7..000000000
--- a/keystone/openstack/common/notifier/rpc_notifier2.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-'''messaging based notification driver, with message envelopes'''
-
-from oslo.config import cfg
-
-from keystone.openstack.common import context as req_context
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import log as logging
-from keystone.openstack.common import rpc
-
-LOG = logging.getLogger(__name__)
-
-notification_topic_opt = cfg.ListOpt(
- 'topics', default=['notifications', ],
- help='AMQP topic(s) used for OpenStack notifications')
-
-opt_group = cfg.OptGroup(name='rpc_notifier2',
- title='Options for rpc_notifier2')
-
-CONF = cfg.CONF
-CONF.register_group(opt_group)
-CONF.register_opt(notification_topic_opt, opt_group)
-
-
-def notify(context, message):
- """Sends a notification via RPC."""
- if not context:
- context = req_context.get_admin_context()
- priority = message.get('priority',
- CONF.default_notification_level)
- priority = priority.lower()
- for topic in CONF.rpc_notifier2.topics:
- topic = '%s.%s' % (topic, priority)
- try:
- rpc.notify(context, topic, message, envelope=True)
- except Exception:
- LOG.exception(_("Could not send notification to %(topic)s. "
- "Payload=%(message)s"),
- {"topic": topic, "message": message})
diff --git a/keystone/openstack/common/notifier/test_notifier.py b/keystone/openstack/common/notifier/test_notifier.py
deleted file mode 100644
index 96c1746bf..000000000
--- a/keystone/openstack/common/notifier/test_notifier.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-NOTIFICATIONS = []
-
-
-def notify(_context, message):
- """Test notifier, stores notifications in memory for unittests."""
- NOTIFICATIONS.append(message)
diff --git a/keystone/openstack/common/policy.py b/keystone/openstack/common/policy.py
index 7bbf93f21..b1b05b411 100644
--- a/keystone/openstack/common/policy.py
+++ b/keystone/openstack/common/policy.py
@@ -55,6 +55,7 @@ as it allows particular rules to be explicitly disabled.
"""
import abc
+import ast
import re
from oslo.config import cfg
@@ -119,11 +120,16 @@ class Rules(dict):
# If the default rule isn't actually defined, do something
# reasonably intelligent
- if not self.default_rule or self.default_rule not in self:
+ if not self.default_rule:
raise KeyError(key)
if isinstance(self.default_rule, BaseCheck):
return self.default_rule
+
+ # We need to check this or we can get infinite recursion
+ if self.default_rule not in self:
+ raise KeyError(key)
+
elif isinstance(self.default_rule, six.string_types):
return self[self.default_rule]
@@ -839,6 +845,8 @@ class GenericCheck(Check):
tenant:%(tenant_id)s
role:compute:admin
+ True:%(user.enabled)s
+ 'Member':%(role.name)s
"""
# TODO(termie): do dict inspection via dot syntax
@@ -849,6 +857,12 @@ class GenericCheck(Check):
# present in Target return false
return False
- if self.kind in creds:
- return match == six.text_type(creds[self.kind])
- return False
+ try:
+ # Try to interpret self.kind as a literal
+ leftval = ast.literal_eval(self.kind)
+ except ValueError:
+ try:
+ leftval = creds[self.kind]
+ except KeyError:
+ return False
+ return match == six.text_type(leftval)
diff --git a/keystone/openstack/common/rpc/__init__.py b/keystone/openstack/common/rpc/__init__.py
deleted file mode 100644
index 0f3687082..000000000
--- a/keystone/openstack/common/rpc/__init__.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-A remote procedure call (rpc) abstraction.
-
-For some wrappers that add message versioning to rpc, see:
- rpc.dispatcher
- rpc.proxy
-"""
-
-import inspect
-
-from oslo.config import cfg
-
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import importutils
-from keystone.openstack.common import local
-from keystone.openstack.common import log as logging
-
-
-LOG = logging.getLogger(__name__)
-
-
-rpc_opts = [
- cfg.StrOpt('rpc_backend',
- default='%s.impl_kombu' % __package__,
- help="The messaging module to use, defaults to kombu."),
- cfg.IntOpt('rpc_thread_pool_size',
- default=64,
- help='Size of RPC thread pool'),
- cfg.IntOpt('rpc_conn_pool_size',
- default=30,
- help='Size of RPC connection pool'),
- cfg.IntOpt('rpc_response_timeout',
- default=60,
- help='Seconds to wait for a response from call or multicall'),
- cfg.IntOpt('rpc_cast_timeout',
- default=30,
- help='Seconds to wait before a cast expires (TTL). '
- 'Only supported by impl_zmq.'),
- cfg.ListOpt('allowed_rpc_exception_modules',
- default=['nova.exception',
- 'cinder.exception',
- 'exceptions',
- ],
- help='Modules of exceptions that are permitted to be recreated'
- ' upon receiving exception data from an rpc call.'),
- cfg.BoolOpt('fake_rabbit',
- default=False,
- help='If passed, use a fake RabbitMQ provider'),
- cfg.StrOpt('control_exchange',
- default='openstack',
- help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(rpc_opts)
-
-
-def set_defaults(control_exchange):
- cfg.set_defaults(rpc_opts,
- control_exchange=control_exchange)
-
-
-def create_connection(new=True):
- """Create a connection to the message bus used for rpc.
-
- For some example usage of creating a connection and some consumers on that
- connection, see nova.service.
-
- :param new: Whether or not to create a new connection. A new connection
- will be created by default. If new is False, the
- implementation is free to return an existing connection from a
- pool.
-
- :returns: An instance of openstack.common.rpc.common.Connection
- """
- return _get_impl().create_connection(CONF, new=new)
-
-
-def _check_for_lock():
- if not CONF.debug:
- return None
-
- if ((hasattr(local.strong_store, 'locks_held')
- and local.strong_store.locks_held)):
- stack = ' :: '.join([frame[3] for frame in inspect.stack()])
- LOG.warn(_('A RPC is being made while holding a lock. The locks '
- 'currently held are %(locks)s. This is probably a bug. '
- 'Please report it. Include the following: [%(stack)s].'),
- {'locks': local.strong_store.locks_held,
- 'stack': stack})
- return True
-
- return False
-
-
-def call(context, topic, msg, timeout=None, check_for_lock=False):
- """Invoke a remote method that returns something.
-
- :param context: Information that identifies the user that has made this
- request.
- :param topic: The topic to send the rpc message to. This correlates to the
- topic argument of
- openstack.common.rpc.common.Connection.create_consumer()
- and only applies when the consumer was created with
- fanout=False.
- :param msg: This is a dict in the form { "method" : "method_to_invoke",
- "args" : dict_of_kwargs }
- :param timeout: int, number of seconds to use for a response timeout.
- If set, this overrides the rpc_response_timeout option.
- :param check_for_lock: if True, a warning is emitted if a RPC call is made
- with a lock held.
-
- :returns: A dict from the remote method.
-
- :raises: openstack.common.rpc.common.Timeout if a complete response
- is not received before the timeout is reached.
- """
- if check_for_lock:
- _check_for_lock()
- return _get_impl().call(CONF, context, topic, msg, timeout)
-
-
-def cast(context, topic, msg):
- """Invoke a remote method that does not return anything.
-
- :param context: Information that identifies the user that has made this
- request.
- :param topic: The topic to send the rpc message to. This correlates to the
- topic argument of
- openstack.common.rpc.common.Connection.create_consumer()
- and only applies when the consumer was created with
- fanout=False.
- :param msg: This is a dict in the form { "method" : "method_to_invoke",
- "args" : dict_of_kwargs }
-
- :returns: None
- """
- return _get_impl().cast(CONF, context, topic, msg)
-
-
-def fanout_cast(context, topic, msg):
- """Broadcast a remote method invocation with no return.
-
- This method will get invoked on all consumers that were set up with this
- topic name and fanout=True.
-
- :param context: Information that identifies the user that has made this
- request.
- :param topic: The topic to send the rpc message to. This correlates to the
- topic argument of
- openstack.common.rpc.common.Connection.create_consumer()
- and only applies when the consumer was created with
- fanout=True.
- :param msg: This is a dict in the form { "method" : "method_to_invoke",
- "args" : dict_of_kwargs }
-
- :returns: None
- """
- return _get_impl().fanout_cast(CONF, context, topic, msg)
-
-
-def multicall(context, topic, msg, timeout=None, check_for_lock=False):
- """Invoke a remote method and get back an iterator.
-
- In this case, the remote method will be returning multiple values in
- separate messages, so the return values can be processed as the come in via
- an iterator.
-
- :param context: Information that identifies the user that has made this
- request.
- :param topic: The topic to send the rpc message to. This correlates to the
- topic argument of
- openstack.common.rpc.common.Connection.create_consumer()
- and only applies when the consumer was created with
- fanout=False.
- :param msg: This is a dict in the form { "method" : "method_to_invoke",
- "args" : dict_of_kwargs }
- :param timeout: int, number of seconds to use for a response timeout.
- If set, this overrides the rpc_response_timeout option.
- :param check_for_lock: if True, a warning is emitted if a RPC call is made
- with a lock held.
-
- :returns: An iterator. The iterator will yield a tuple (N, X) where N is
- an index that starts at 0 and increases by one for each value
- returned and X is the Nth value that was returned by the remote
- method.
-
- :raises: openstack.common.rpc.common.Timeout if a complete response
- is not received before the timeout is reached.
- """
- if check_for_lock:
- _check_for_lock()
- return _get_impl().multicall(CONF, context, topic, msg, timeout)
-
-
-def notify(context, topic, msg, envelope=False):
- """Send notification event.
-
- :param context: Information that identifies the user that has made this
- request.
- :param topic: The topic to send the notification to.
- :param msg: This is a dict of content of event.
- :param envelope: Set to True to enable message envelope for notifications.
-
- :returns: None
- """
- return _get_impl().notify(cfg.CONF, context, topic, msg, envelope)
-
-
-def cleanup():
- """Clean up resources in use by implementation.
-
- Clean up any resources that have been allocated by the RPC implementation.
- This is typically open connections to a messaging service. This function
- would get called before an application using this API exits to allow
- connections to get torn down cleanly.
-
- :returns: None
- """
- return _get_impl().cleanup()
-
-
-def cast_to_server(context, server_params, topic, msg):
- """Invoke a remote method that does not return anything.
-
- :param context: Information that identifies the user that has made this
- request.
- :param server_params: Connection information
- :param topic: The topic to send the notification to.
- :param msg: This is a dict in the form { "method" : "method_to_invoke",
- "args" : dict_of_kwargs }
-
- :returns: None
- """
- return _get_impl().cast_to_server(CONF, context, server_params, topic,
- msg)
-
-
-def fanout_cast_to_server(context, server_params, topic, msg):
- """Broadcast to a remote method invocation with no return.
-
- :param context: Information that identifies the user that has made this
- request.
- :param server_params: Connection information
- :param topic: The topic to send the notification to.
- :param msg: This is a dict in the form { "method" : "method_to_invoke",
- "args" : dict_of_kwargs }
-
- :returns: None
- """
- return _get_impl().fanout_cast_to_server(CONF, context, server_params,
- topic, msg)
-
-
-def queue_get_for(context, topic, host):
- """Get a queue name for a given topic + host.
-
- This function only works if this naming convention is followed on the
- consumer side, as well. For example, in nova, every instance of the
- nova-foo service calls create_consumer() for two topics:
-
- foo
- foo.<host>
-
- Messages sent to the 'foo' topic are distributed to exactly one instance of
- the nova-foo service. The services are chosen in a round-robin fashion.
- Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
- <host>.
- """
- return '%s.%s' % (topic, host) if host else topic
-
-
-_RPCIMPL = None
-
-
-def _get_impl():
- """Delay import of rpc_backend until configuration is loaded."""
- global _RPCIMPL
- if _RPCIMPL is None:
- try:
- _RPCIMPL = importutils.import_module(CONF.rpc_backend)
- except ImportError:
- # For backwards compatibility with older nova config.
- impl = CONF.rpc_backend.replace('nova.rpc',
- 'nova.openstack.common.rpc')
- _RPCIMPL = importutils.import_module(impl)
- return _RPCIMPL
diff --git a/keystone/openstack/common/rpc/amqp.py b/keystone/openstack/common/rpc/amqp.py
deleted file mode 100644
index a16e12a85..000000000
--- a/keystone/openstack/common/rpc/amqp.py
+++ /dev/null
@@ -1,637 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 - 2012, Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Shared code between AMQP based openstack.common.rpc implementations.
-
-The code in this module is shared between the rpc implementations based on
-AMQP. Specifically, this includes impl_kombu and impl_qpid. impl_carrot also
-uses AMQP, but is deprecated and predates this code.
-"""
-
-import collections
-import inspect
-import sys
-import uuid
-
-from eventlet import greenpool
-from eventlet import pools
-from eventlet import queue
-from eventlet import semaphore
-from oslo.config import cfg
-import six
-
-
-from keystone.openstack.common import excutils
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import local
-from keystone.openstack.common import log as logging
-from keystone.openstack.common.rpc import common as rpc_common
-
-
-amqp_opts = [
- cfg.BoolOpt('amqp_durable_queues',
- default=False,
- deprecated_name='rabbit_durable_queues',
- deprecated_group='DEFAULT',
- help='Use durable queues in amqp.'),
- cfg.BoolOpt('amqp_auto_delete',
- default=False,
- help='Auto-delete queues in amqp.'),
-]
-
-cfg.CONF.register_opts(amqp_opts)
-
-UNIQUE_ID = '_unique_id'
-LOG = logging.getLogger(__name__)
-
-
-class Pool(pools.Pool):
- """Class that implements a Pool of Connections."""
- def __init__(self, conf, connection_cls, *args, **kwargs):
- self.connection_cls = connection_cls
- self.conf = conf
- kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
- kwargs.setdefault("order_as_stack", True)
- super(Pool, self).__init__(*args, **kwargs)
- self.reply_proxy = None
-
- # TODO(comstud): Timeout connections not used in a while
- def create(self):
- LOG.debug(_('Pool creating new connection'))
- return self.connection_cls(self.conf)
-
- def empty(self):
- while self.free_items:
- self.get().close()
- # Force a new connection pool to be created.
- # Note that this was added due to failing unit test cases. The issue
- # is the above "while loop" gets all the cached connections from the
- # pool and closes them, but never returns them to the pool, a pool
- # leak. The unit tests hang waiting for an item to be returned to the
- # pool. The unit tests get here via the tearDown() method. In the run
- # time code, it gets here via cleanup() and only appears in service.py
- # just before doing a sys.exit(), so cleanup() only happens once and
- # the leakage is not a problem.
- self.connection_cls.pool = None
-
-
-_pool_create_sem = semaphore.Semaphore()
-
-
-def get_connection_pool(conf, connection_cls):
- with _pool_create_sem:
- # Make sure only one thread tries to create the connection pool.
- if not connection_cls.pool:
- connection_cls.pool = Pool(conf, connection_cls)
- return connection_cls.pool
-
-
-class ConnectionContext(rpc_common.Connection):
- """The class that is actually returned to the create_connection() caller.
-
- This is essentially a wrapper around Connection that supports 'with'.
- It can also return a new Connection, or one from a pool.
-
- The function will also catch when an instance of this class is to be
- deleted. With that we can return Connections to the pool on exceptions
- and so forth without making the caller be responsible for catching them.
- If possible the function makes sure to return a connection to the pool.
- """
-
- def __init__(self, conf, connection_pool, pooled=True, server_params=None):
- """Create a new connection, or get one from the pool."""
- self.connection = None
- self.conf = conf
- self.connection_pool = connection_pool
- if pooled:
- self.connection = connection_pool.get()
- else:
- self.connection = connection_pool.connection_cls(
- conf,
- server_params=server_params)
- self.pooled = pooled
-
- def __enter__(self):
- """When with ConnectionContext() is used, return self."""
- return self
-
- def _done(self):
- """If the connection came from a pool, clean it up and put it back.
- If it did not come from a pool, close it.
- """
- if self.connection:
- if self.pooled:
- # Reset the connection so it's ready for the next caller
- # to grab from the pool
- self.connection.reset()
- self.connection_pool.put(self.connection)
- else:
- try:
- self.connection.close()
- except Exception:
- pass
- self.connection = None
-
- def __exit__(self, exc_type, exc_value, tb):
- """End of 'with' statement. We're done here."""
- self._done()
-
- def __del__(self):
- """Caller is done with this connection. Make sure we cleaned up."""
- self._done()
-
- def close(self):
- """Caller is done with this connection."""
- self._done()
-
- def create_consumer(self, topic, proxy, fanout=False):
- self.connection.create_consumer(topic, proxy, fanout)
-
- def create_worker(self, topic, proxy, pool_name):
- self.connection.create_worker(topic, proxy, pool_name)
-
- def join_consumer_pool(self, callback, pool_name, topic, exchange_name,
- ack_on_error=True):
- self.connection.join_consumer_pool(callback,
- pool_name,
- topic,
- exchange_name,
- ack_on_error)
-
- def consume_in_thread(self):
- self.connection.consume_in_thread()
-
- def __getattr__(self, key):
- """Proxy all other calls to the Connection instance."""
- if self.connection:
- return getattr(self.connection, key)
- else:
- raise rpc_common.InvalidRPCConnectionReuse()
-
-
-class ReplyProxy(ConnectionContext):
- """Connection class for RPC replies / callbacks."""
- def __init__(self, conf, connection_pool):
- self._call_waiters = {}
- self._num_call_waiters = 0
- self._num_call_waiters_wrn_threshold = 10
- self._reply_q = 'reply_' + uuid.uuid4().hex
- super(ReplyProxy, self).__init__(conf, connection_pool, pooled=False)
- self.declare_direct_consumer(self._reply_q, self._process_data)
- self.consume_in_thread()
-
- def _process_data(self, message_data):
- msg_id = message_data.pop('_msg_id', None)
- waiter = self._call_waiters.get(msg_id)
- if not waiter:
- LOG.warn(_('No calling threads waiting for msg_id : %(msg_id)s'
- ', message : %(data)s'), {'msg_id': msg_id,
- 'data': message_data})
- LOG.warn(_('_call_waiters: %s') % str(self._call_waiters))
- else:
- waiter.put(message_data)
-
- def add_call_waiter(self, waiter, msg_id):
- self._num_call_waiters += 1
- if self._num_call_waiters > self._num_call_waiters_wrn_threshold:
- LOG.warn(_('Number of call waiters is greater than warning '
- 'threshold: %d. There could be a MulticallProxyWaiter '
- 'leak.') % self._num_call_waiters_wrn_threshold)
- self._num_call_waiters_wrn_threshold *= 2
- self._call_waiters[msg_id] = waiter
-
- def del_call_waiter(self, msg_id):
- self._num_call_waiters -= 1
- del self._call_waiters[msg_id]
-
- def get_reply_q(self):
- return self._reply_q
-
-
-def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None,
- failure=None, ending=False, log_failure=True):
- """Sends a reply or an error on the channel signified by msg_id.
-
- Failure should be a sys.exc_info() tuple.
-
- """
- with ConnectionContext(conf, connection_pool) as conn:
- if failure:
- failure = rpc_common.serialize_remote_exception(failure,
- log_failure)
-
- msg = {'result': reply, 'failure': failure}
- if ending:
- msg['ending'] = True
- _add_unique_id(msg)
- # If a reply_q exists, add the msg_id to the reply and pass the
- # reply_q to direct_send() to use it as the response queue.
- # Otherwise use the msg_id for backward compatibility.
- if reply_q:
- msg['_msg_id'] = msg_id
- conn.direct_send(reply_q, rpc_common.serialize_msg(msg))
- else:
- conn.direct_send(msg_id, rpc_common.serialize_msg(msg))
-
-
-class RpcContext(rpc_common.CommonRpcContext):
- """Context that supports replying to a rpc.call."""
- def __init__(self, **kwargs):
- self.msg_id = kwargs.pop('msg_id', None)
- self.reply_q = kwargs.pop('reply_q', None)
- self.conf = kwargs.pop('conf')
- super(RpcContext, self).__init__(**kwargs)
-
- def deepcopy(self):
- values = self.to_dict()
- values['conf'] = self.conf
- values['msg_id'] = self.msg_id
- values['reply_q'] = self.reply_q
- return self.__class__(**values)
-
- def reply(self, reply=None, failure=None, ending=False,
- connection_pool=None, log_failure=True):
- if self.msg_id:
- msg_reply(self.conf, self.msg_id, self.reply_q, connection_pool,
- reply, failure, ending, log_failure)
- if ending:
- self.msg_id = None
-
-
-def unpack_context(conf, msg):
- """Unpack context from msg."""
- context_dict = {}
- for key in list(msg.keys()):
- # NOTE(vish): Some versions of python don't like unicode keys
- # in kwargs.
- key = str(key)
- if key.startswith('_context_'):
- value = msg.pop(key)
- context_dict[key[9:]] = value
- context_dict['msg_id'] = msg.pop('_msg_id', None)
- context_dict['reply_q'] = msg.pop('_reply_q', None)
- context_dict['conf'] = conf
- ctx = RpcContext.from_dict(context_dict)
- rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
- return ctx
-
-
-def pack_context(msg, context):
- """Pack context into msg.
-
- Values for message keys need to be less than 255 chars, so we pull
- context out into a bunch of separate keys. If we want to support
- more arguments in rabbit messages, we may want to do the same
- for args at some point.
-
- """
- if isinstance(context, dict):
- context_d = dict([('_context_%s' % key, value)
- for (key, value) in six.iteritems(context)])
- else:
- context_d = dict([('_context_%s' % key, value)
- for (key, value) in
- six.iteritems(context.to_dict())])
-
- msg.update(context_d)
-
-
-class _MsgIdCache(object):
- """This class checks any duplicate messages."""
-
- # NOTE: This value is considered can be a configuration item, but
- # it is not necessary to change its value in most cases,
- # so let this value as static for now.
- DUP_MSG_CHECK_SIZE = 16
-
- def __init__(self, **kwargs):
- self.prev_msgids = collections.deque([],
- maxlen=self.DUP_MSG_CHECK_SIZE)
-
- def check_duplicate_message(self, message_data):
- """AMQP consumers may read same message twice when exceptions occur
- before ack is returned. This method prevents doing it.
- """
- if UNIQUE_ID in message_data:
- msg_id = message_data[UNIQUE_ID]
- if msg_id not in self.prev_msgids:
- self.prev_msgids.append(msg_id)
- else:
- raise rpc_common.DuplicateMessageError(msg_id=msg_id)
-
-
-def _add_unique_id(msg):
- """Add unique_id for checking duplicate messages."""
- unique_id = uuid.uuid4().hex
- msg.update({UNIQUE_ID: unique_id})
- LOG.debug(_('UNIQUE_ID is %s.') % (unique_id))
-
-
-class _ThreadPoolWithWait(object):
- """Base class for a delayed invocation manager.
-
- Used by the Connection class to start up green threads
- to handle incoming messages.
- """
-
- def __init__(self, conf, connection_pool):
- self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
- self.connection_pool = connection_pool
- self.conf = conf
-
- def wait(self):
- """Wait for all callback threads to exit."""
- self.pool.waitall()
-
-
-class CallbackWrapper(_ThreadPoolWithWait):
- """Wraps a straight callback.
-
- Allows it to be invoked in a green thread.
- """
-
- def __init__(self, conf, callback, connection_pool,
- wait_for_consumers=False):
- """Initiates CallbackWrapper object.
-
- :param conf: cfg.CONF instance
- :param callback: a callable (probably a function)
- :param connection_pool: connection pool as returned by
- get_connection_pool()
- :param wait_for_consumers: wait for all green threads to
- complete and raise the last
- caught exception, if any.
-
- """
- super(CallbackWrapper, self).__init__(
- conf=conf,
- connection_pool=connection_pool,
- )
- self.callback = callback
- self.wait_for_consumers = wait_for_consumers
- self.exc_info = None
-
- def _wrap(self, message_data, **kwargs):
- """Wrap the callback invocation to catch exceptions.
- """
- try:
- self.callback(message_data, **kwargs)
- except Exception:
- self.exc_info = sys.exc_info()
-
- def __call__(self, message_data):
- self.exc_info = None
- self.pool.spawn_n(self._wrap, message_data)
-
- if self.wait_for_consumers:
- self.pool.waitall()
- if self.exc_info:
- raise self.exc_info[1], None, self.exc_info[2]
-
-
-class ProxyCallback(_ThreadPoolWithWait):
- """Calls methods on a proxy object based on method and args."""
-
- def __init__(self, conf, proxy, connection_pool):
- super(ProxyCallback, self).__init__(
- conf=conf,
- connection_pool=connection_pool,
- )
- self.proxy = proxy
- self.msg_id_cache = _MsgIdCache()
-
- def __call__(self, message_data):
- """Consumer callback to call a method on a proxy object.
-
- Parses the message for validity and fires off a thread to call the
- proxy object method.
-
- Message data should be a dictionary with two keys:
- method: string representing the method to call
- args: dictionary of arg: value
-
- Example: {'method': 'echo', 'args': {'value': 42}}
-
- """
- # It is important to clear the context here, because at this point
- # the previous context is stored in local.store.context
- if hasattr(local.store, 'context'):
- del local.store.context
- rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
- self.msg_id_cache.check_duplicate_message(message_data)
- ctxt = unpack_context(self.conf, message_data)
- method = message_data.get('method')
- args = message_data.get('args', {})
- version = message_data.get('version')
- namespace = message_data.get('namespace')
- if not method:
- LOG.warn(_('no method for message: %s') % message_data)
- ctxt.reply(_('No method for message: %s') % message_data,
- connection_pool=self.connection_pool)
- return
- self.pool.spawn_n(self._process_data, ctxt, version, method,
- namespace, args)
-
- def _process_data(self, ctxt, version, method, namespace, args):
- """Process a message in a new thread.
-
- If the proxy object we have has a dispatch method
- (see rpc.dispatcher.RpcDispatcher), pass it the version,
- method, and args and let it dispatch as appropriate. If not, use
- the old behavior of magically calling the specified method on the
- proxy we have here.
- """
- ctxt.update_store()
- try:
- rval = self.proxy.dispatch(ctxt, version, method, namespace,
- **args)
- # Check if the result was a generator
- if inspect.isgenerator(rval):
- for x in rval:
- ctxt.reply(x, None, connection_pool=self.connection_pool)
- else:
- ctxt.reply(rval, None, connection_pool=self.connection_pool)
- # This final None tells multicall that it is done.
- ctxt.reply(ending=True, connection_pool=self.connection_pool)
- except rpc_common.ClientException as e:
- LOG.debug(_('Expected exception during message handling (%s)') %
- e._exc_info[1])
- ctxt.reply(None, e._exc_info,
- connection_pool=self.connection_pool,
- log_failure=False)
- except Exception:
- # sys.exc_info() is deleted by LOG.exception().
- exc_info = sys.exc_info()
- LOG.error(_('Exception during message handling'),
- exc_info=exc_info)
- ctxt.reply(None, exc_info, connection_pool=self.connection_pool)
-
-
-class MulticallProxyWaiter(object):
- def __init__(self, conf, msg_id, timeout, connection_pool):
- self._msg_id = msg_id
- self._timeout = timeout or conf.rpc_response_timeout
- self._reply_proxy = connection_pool.reply_proxy
- self._done = False
- self._got_ending = False
- self._conf = conf
- self._dataqueue = queue.LightQueue()
- # Add this caller to the reply proxy's call_waiters
- self._reply_proxy.add_call_waiter(self, self._msg_id)
- self.msg_id_cache = _MsgIdCache()
-
- def put(self, data):
- self._dataqueue.put(data)
-
- def done(self):
- if self._done:
- return
- self._done = True
- # Remove this caller from reply proxy's call_waiters
- self._reply_proxy.del_call_waiter(self._msg_id)
-
- def _process_data(self, data):
- result = None
- self.msg_id_cache.check_duplicate_message(data)
- if data['failure']:
- failure = data['failure']
- result = rpc_common.deserialize_remote_exception(self._conf,
- failure)
- elif data.get('ending', False):
- self._got_ending = True
- else:
- result = data['result']
- return result
-
- def __iter__(self):
- """Return a result until we get a reply with an 'ending' flag."""
- if self._done:
- raise StopIteration
- while True:
- try:
- data = self._dataqueue.get(timeout=self._timeout)
- result = self._process_data(data)
- except queue.Empty:
- self.done()
- raise rpc_common.Timeout()
- except Exception:
- with excutils.save_and_reraise_exception():
- self.done()
- if self._got_ending:
- self.done()
- raise StopIteration
- if isinstance(result, Exception):
- self.done()
- raise result
- yield result
-
-
-def create_connection(conf, new, connection_pool):
- """Create a connection."""
- return ConnectionContext(conf, connection_pool, pooled=not new)
-
-
-_reply_proxy_create_sem = semaphore.Semaphore()
-
-
-def multicall(conf, context, topic, msg, timeout, connection_pool):
- """Make a call that returns multiple times."""
- LOG.debug(_('Making synchronous call on %s ...'), topic)
- msg_id = uuid.uuid4().hex
- msg.update({'_msg_id': msg_id})
- LOG.debug(_('MSG_ID is %s') % (msg_id))
- _add_unique_id(msg)
- pack_context(msg, context)
-
- with _reply_proxy_create_sem:
- if not connection_pool.reply_proxy:
- connection_pool.reply_proxy = ReplyProxy(conf, connection_pool)
- msg.update({'_reply_q': connection_pool.reply_proxy.get_reply_q()})
- wait_msg = MulticallProxyWaiter(conf, msg_id, timeout, connection_pool)
- with ConnectionContext(conf, connection_pool) as conn:
- conn.topic_send(topic, rpc_common.serialize_msg(msg), timeout)
- return wait_msg
-
-
-def call(conf, context, topic, msg, timeout, connection_pool):
- """Sends a message on a topic and wait for a response."""
- rv = multicall(conf, context, topic, msg, timeout, connection_pool)
- # NOTE(vish): return the last result from the multicall
- rv = list(rv)
- if not rv:
- return
- return rv[-1]
-
-
-def cast(conf, context, topic, msg, connection_pool):
- """Sends a message on a topic without waiting for a response."""
- LOG.debug(_('Making asynchronous cast on %s...'), topic)
- _add_unique_id(msg)
- pack_context(msg, context)
- with ConnectionContext(conf, connection_pool) as conn:
- conn.topic_send(topic, rpc_common.serialize_msg(msg))
-
-
-def fanout_cast(conf, context, topic, msg, connection_pool):
- """Sends a message on a fanout exchange without waiting for a response."""
- LOG.debug(_('Making asynchronous fanout cast...'))
- _add_unique_id(msg)
- pack_context(msg, context)
- with ConnectionContext(conf, connection_pool) as conn:
- conn.fanout_send(topic, rpc_common.serialize_msg(msg))
-
-
-def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
- """Sends a message on a topic to a specific server."""
- _add_unique_id(msg)
- pack_context(msg, context)
- with ConnectionContext(conf, connection_pool, pooled=False,
- server_params=server_params) as conn:
- conn.topic_send(topic, rpc_common.serialize_msg(msg))
-
-
-def fanout_cast_to_server(conf, context, server_params, topic, msg,
- connection_pool):
- """Sends a message on a fanout exchange to a specific server."""
- _add_unique_id(msg)
- pack_context(msg, context)
- with ConnectionContext(conf, connection_pool, pooled=False,
- server_params=server_params) as conn:
- conn.fanout_send(topic, rpc_common.serialize_msg(msg))
-
-
-def notify(conf, context, topic, msg, connection_pool, envelope):
- """Sends a notification event on a topic."""
- LOG.debug(_('Sending %(event_type)s on %(topic)s'),
- dict(event_type=msg.get('event_type'),
- topic=topic))
- _add_unique_id(msg)
- pack_context(msg, context)
- with ConnectionContext(conf, connection_pool) as conn:
- if envelope:
- msg = rpc_common.serialize_msg(msg)
- conn.notify_send(topic, msg)
-
-
-def cleanup(connection_pool):
- if connection_pool:
- connection_pool.empty()
-
-
-def get_control_exchange(conf):
- return conf.control_exchange
diff --git a/keystone/openstack/common/rpc/common.py b/keystone/openstack/common/rpc/common.py
deleted file mode 100644
index 9290b7bd9..000000000
--- a/keystone/openstack/common/rpc/common.py
+++ /dev/null
@@ -1,504 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import sys
-import traceback
-
-from oslo.config import cfg
-import six
-
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import importutils
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common import local
-from keystone.openstack.common import log as logging
-from keystone.openstack.common import versionutils
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-_RPC_ENVELOPE_VERSION = '2.0'
-'''RPC Envelope Version.
-
-This version number applies to the top level structure of messages sent out.
-It does *not* apply to the message payload, which must be versioned
-independently. For example, when using rpc APIs, a version number is applied
-for changes to the API being exposed over rpc. This version number is handled
-in the rpc proxy and dispatcher modules.
-
-This version number applies to the message envelope that is used in the
-serialization done inside the rpc layer. See serialize_msg() and
-deserialize_msg().
-
-The current message format (version 2.0) is very simple. It is::
-
- {
- 'oslo.version': <RPC Envelope Version as a String>,
- 'oslo.message': <Application Message Payload, JSON encoded>
- }
-
-Message format version '1.0' is just considered to be the messages we sent
-without a message envelope.
-
-So, the current message envelope just includes the envelope version. It may
-eventually contain additional information, such as a signature for the message
-payload.
-
-We will JSON encode the application message payload. The message envelope,
-which includes the JSON encoded application message body, will be passed down
-to the messaging libraries as a dict.
-'''
-
-_VERSION_KEY = 'oslo.version'
-_MESSAGE_KEY = 'oslo.message'
-
-_REMOTE_POSTFIX = '_Remote'
-
-
-class RPCException(Exception):
- msg_fmt = _("An unknown RPC related exception occurred.")
-
- def __init__(self, message=None, **kwargs):
- self.kwargs = kwargs
-
- if not message:
- try:
- message = self.msg_fmt % kwargs
-
- except Exception:
- # kwargs doesn't match a variable in the message
- # log the issue and the kwargs
- LOG.exception(_('Exception in string format operation'))
- for name, value in six.iteritems(kwargs):
- LOG.error("%s: %s" % (name, value))
- # at least get the core message out if something happened
- message = self.msg_fmt
-
- super(RPCException, self).__init__(message)
-
-
-class RemoteError(RPCException):
- """Signifies that a remote class has raised an exception.
-
- Contains a string representation of the type of the original exception,
- the value of the original exception, and the traceback. These are
- sent to the parent as a joined string so printing the exception
- contains all of the relevant info.
-
- """
- msg_fmt = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
-
- def __init__(self, exc_type=None, value=None, traceback=None):
- self.exc_type = exc_type
- self.value = value
- self.traceback = traceback
- super(RemoteError, self).__init__(exc_type=exc_type,
- value=value,
- traceback=traceback)
-
-
-class Timeout(RPCException):
- """Signifies that a timeout has occurred.
-
- This exception is raised if the rpc_response_timeout is reached while
- waiting for a response from the remote side.
- """
- msg_fmt = _('Timeout while waiting on RPC response - '
- 'topic: "%(topic)s", RPC method: "%(method)s" '
- 'info: "%(info)s"')
-
- def __init__(self, info=None, topic=None, method=None):
- """Initiates Timeout object.
-
- :param info: Extra info to convey to the user
- :param topic: The topic that the rpc call was sent to
- :param rpc_method_name: The name of the rpc method being
- called
- """
- self.info = info
- self.topic = topic
- self.method = method
- super(Timeout, self).__init__(
- None,
- info=info or _('<unknown>'),
- topic=topic or _('<unknown>'),
- method=method or _('<unknown>'))
-
-
-class DuplicateMessageError(RPCException):
- msg_fmt = _("Found duplicate message(%(msg_id)s). Skipping it.")
-
-
-class InvalidRPCConnectionReuse(RPCException):
- msg_fmt = _("Invalid reuse of an RPC connection.")
-
-
-class UnsupportedRpcVersion(RPCException):
- msg_fmt = _("Specified RPC version, %(version)s, not supported by "
- "this endpoint.")
-
-
-class UnsupportedRpcEnvelopeVersion(RPCException):
- msg_fmt = _("Specified RPC envelope version, %(version)s, "
- "not supported by this endpoint.")
-
-
-class RpcVersionCapError(RPCException):
- msg_fmt = _("Specified RPC version cap, %(version_cap)s, is too low")
-
-
-class Connection(object):
- """A connection, returned by rpc.create_connection().
-
- This class represents a connection to the message bus used for rpc.
- An instance of this class should never be created by users of the rpc API.
- Use rpc.create_connection() instead.
- """
- def close(self):
- """Close the connection.
-
- This method must be called when the connection will no longer be used.
- It will ensure that any resources associated with the connection, such
- as a network connection, and cleaned up.
- """
- raise NotImplementedError()
-
- def create_consumer(self, topic, proxy, fanout=False):
- """Create a consumer on this connection.
-
- A consumer is associated with a message queue on the backend message
- bus. The consumer will read messages from the queue, unpack them, and
- dispatch them to the proxy object. The contents of the message pulled
- off of the queue will determine which method gets called on the proxy
- object.
-
- :param topic: This is a name associated with what to consume from.
- Multiple instances of a service may consume from the same
- topic. For example, all instances of nova-compute consume
- from a queue called "compute". In that case, the
- messages will get distributed amongst the consumers in a
- round-robin fashion if fanout=False. If fanout=True,
- every consumer associated with this topic will get a
- copy of every message.
- :param proxy: The object that will handle all incoming messages.
- :param fanout: Whether or not this is a fanout topic. See the
- documentation for the topic parameter for some
- additional comments on this.
- """
- raise NotImplementedError()
-
- def create_worker(self, topic, proxy, pool_name):
- """Create a worker on this connection.
-
- A worker is like a regular consumer of messages directed to a
- topic, except that it is part of a set of such consumers (the
- "pool") which may run in parallel. Every pool of workers will
- receive a given message, but only one worker in the pool will
- be asked to process it. Load is distributed across the members
- of the pool in round-robin fashion.
-
- :param topic: This is a name associated with what to consume from.
- Multiple instances of a service may consume from the same
- topic.
- :param proxy: The object that will handle all incoming messages.
- :param pool_name: String containing the name of the pool of workers
- """
- raise NotImplementedError()
-
- def join_consumer_pool(self, callback, pool_name, topic, exchange_name):
- """Register as a member of a group of consumers.
-
- Uses given topic from the specified exchange.
- Exactly one member of a given pool will receive each message.
-
- A message will be delivered to multiple pools, if more than
- one is created.
-
- :param callback: Callable to be invoked for each message.
- :type callback: callable accepting one argument
- :param pool_name: The name of the consumer pool.
- :type pool_name: str
- :param topic: The routing topic for desired messages.
- :type topic: str
- :param exchange_name: The name of the message exchange where
- the client should attach. Defaults to
- the configured exchange.
- :type exchange_name: str
- """
- raise NotImplementedError()
-
- def consume_in_thread(self):
- """Spawn a thread to handle incoming messages.
-
- Spawn a thread that will be responsible for handling all incoming
- messages for consumers that were set up on this connection.
-
- Message dispatching inside of this is expected to be implemented in a
- non-blocking manner. An example implementation would be having this
- thread pull messages in for all of the consumers, but utilize a thread
- pool for dispatching the messages to the proxy objects.
- """
- raise NotImplementedError()
-
-
-def _safe_log(log_func, msg, msg_data):
- """Sanitizes the msg_data field before logging."""
- SANITIZE = ['_context_auth_token', 'auth_token', 'new_pass']
-
- def _fix_passwords(d):
- """Sanitizes the password fields in the dictionary."""
- for k in six.iterkeys(d):
- if k.lower().find('password') != -1:
- d[k] = '<SANITIZED>'
- elif k.lower() in SANITIZE:
- d[k] = '<SANITIZED>'
- elif isinstance(d[k], dict):
- _fix_passwords(d[k])
- return d
-
- return log_func(msg, _fix_passwords(copy.deepcopy(msg_data)))
-
-
-def serialize_remote_exception(failure_info, log_failure=True):
- """Prepares exception data to be sent over rpc.
-
- Failure_info should be a sys.exc_info() tuple.
-
- """
- tb = traceback.format_exception(*failure_info)
- failure = failure_info[1]
- if log_failure:
- LOG.error(_("Returning exception %s to caller"),
- six.text_type(failure))
- LOG.error(tb)
-
- kwargs = {}
- if hasattr(failure, 'kwargs'):
- kwargs = failure.kwargs
-
- # NOTE(matiu): With cells, it's possible to re-raise remote, remote
- # exceptions. Lets turn it back into the original exception type.
- cls_name = str(failure.__class__.__name__)
- mod_name = str(failure.__class__.__module__)
- if (cls_name.endswith(_REMOTE_POSTFIX) and
- mod_name.endswith(_REMOTE_POSTFIX)):
- cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
- mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
-
- data = {
- 'class': cls_name,
- 'module': mod_name,
- 'message': six.text_type(failure),
- 'tb': tb,
- 'args': failure.args,
- 'kwargs': kwargs
- }
-
- json_data = jsonutils.dumps(data)
-
- return json_data
-
-
-def deserialize_remote_exception(conf, data):
- failure = jsonutils.loads(str(data))
-
- trace = failure.get('tb', [])
- message = failure.get('message', "") + "\n" + "\n".join(trace)
- name = failure.get('class')
- module = failure.get('module')
-
- # NOTE(ameade): We DO NOT want to allow just any module to be imported, in
- # order to prevent arbitrary code execution.
- if module not in conf.allowed_rpc_exception_modules:
- return RemoteError(name, failure.get('message'), trace)
-
- try:
- mod = importutils.import_module(module)
- klass = getattr(mod, name)
- if not issubclass(klass, Exception):
- raise TypeError("Can only deserialize Exceptions")
-
- failure = klass(*failure.get('args', []), **failure.get('kwargs', {}))
- except (AttributeError, TypeError, ImportError):
- return RemoteError(name, failure.get('message'), trace)
-
- ex_type = type(failure)
- str_override = lambda self: message
- new_ex_type = type(ex_type.__name__ + _REMOTE_POSTFIX, (ex_type,),
- {'__str__': str_override, '__unicode__': str_override})
- new_ex_type.__module__ = '%s%s' % (module, _REMOTE_POSTFIX)
- try:
- # NOTE(ameade): Dynamically create a new exception type and swap it in
- # as the new type for the exception. This only works on user defined
- # Exceptions and not core python exceptions. This is important because
- # we cannot necessarily change an exception message so we must override
- # the __str__ method.
- failure.__class__ = new_ex_type
- except TypeError:
- # NOTE(ameade): If a core exception then just add the traceback to the
- # first exception argument.
- failure.args = (message,) + failure.args[1:]
- return failure
-
-
-class CommonRpcContext(object):
- def __init__(self, **kwargs):
- self.values = kwargs
-
- def __getattr__(self, key):
- try:
- return self.values[key]
- except KeyError:
- raise AttributeError(key)
-
- def to_dict(self):
- return copy.deepcopy(self.values)
-
- @classmethod
- def from_dict(cls, values):
- return cls(**values)
-
- def deepcopy(self):
- return self.from_dict(self.to_dict())
-
- def update_store(self):
- local.store.context = self
-
- def elevated(self, read_deleted=None, overwrite=False):
- """Return a version of this context with admin flag set."""
- # TODO(russellb) This method is a bit of a nova-ism. It makes
- # some assumptions about the data in the request context sent
- # across rpc, while the rest of this class does not. We could get
- # rid of this if we changed the nova code that uses this to
- # convert the RpcContext back to its native RequestContext doing
- # something like nova.context.RequestContext.from_dict(ctxt.to_dict())
-
- context = self.deepcopy()
- context.values['is_admin'] = True
-
- context.values.setdefault('roles', [])
-
- if 'admin' not in context.values['roles']:
- context.values['roles'].append('admin')
-
- if read_deleted is not None:
- context.values['read_deleted'] = read_deleted
-
- return context
-
-
-class ClientException(Exception):
- """Encapsulates actual exception expected to be hit by a RPC proxy object.
-
- Merely instantiating it records the current exception information, which
- will be passed back to the RPC client without exceptional logging.
- """
- def __init__(self):
- self._exc_info = sys.exc_info()
-
-
-def catch_client_exception(exceptions, func, *args, **kwargs):
- try:
- return func(*args, **kwargs)
- except Exception as e:
- if type(e) in exceptions:
- raise ClientException()
- else:
- raise
-
-
-def client_exceptions(*exceptions):
- """Decorator for manager methods that raise expected exceptions.
-
- Marking a Manager method with this decorator allows the declaration
- of expected exceptions that the RPC layer should not consider fatal,
- and not log as if they were generated in a real error scenario. Note
- that this will cause listed exceptions to be wrapped in a
- ClientException, which is used internally by the RPC layer.
- """
- def outer(func):
- def inner(*args, **kwargs):
- return catch_client_exception(exceptions, func, *args, **kwargs)
- return inner
- return outer
-
-
-# TODO(sirp): we should deprecate this in favor of
-# using `versionutils.is_compatible` directly
-def version_is_compatible(imp_version, version):
- """Determine whether versions are compatible.
-
- :param imp_version: The version implemented
- :param version: The version requested by an incoming message.
- """
- return versionutils.is_compatible(version, imp_version)
-
-
-def serialize_msg(raw_msg):
- # NOTE(russellb) See the docstring for _RPC_ENVELOPE_VERSION for more
- # information about this format.
- msg = {_VERSION_KEY: _RPC_ENVELOPE_VERSION,
- _MESSAGE_KEY: jsonutils.dumps(raw_msg)}
-
- return msg
-
-
-def deserialize_msg(msg):
- # NOTE(russellb): Hang on to your hats, this road is about to
- # get a little bumpy.
- #
- # Robustness Principle:
- # "Be strict in what you send, liberal in what you accept."
- #
- # At this point we have to do a bit of guessing about what it
- # is we just received. Here is the set of possibilities:
- #
- # 1) We received a dict. This could be 2 things:
- #
- # a) Inspect it to see if it looks like a standard message envelope.
- # If so, great!
- #
- # b) If it doesn't look like a standard message envelope, it could either
- # be a notification, or a message from before we added a message
- # envelope (referred to as version 1.0).
- # Just return the message as-is.
- #
- # 2) It's any other non-dict type. Just return it and hope for the best.
- # This case covers return values from rpc.call() from before message
- # envelopes were used. (messages to call a method were always a dict)
-
- if not isinstance(msg, dict):
- # See #2 above.
- return msg
-
- base_envelope_keys = (_VERSION_KEY, _MESSAGE_KEY)
- if not all(map(lambda key: key in msg, base_envelope_keys)):
- # See #1.b above.
- return msg
-
- # At this point we think we have the message envelope
- # format we were expecting. (#1.a above)
-
- if not version_is_compatible(_RPC_ENVELOPE_VERSION, msg[_VERSION_KEY]):
- raise UnsupportedRpcEnvelopeVersion(version=msg[_VERSION_KEY])
-
- raw_msg = jsonutils.loads(msg[_MESSAGE_KEY])
-
- return raw_msg
diff --git a/keystone/openstack/common/rpc/dispatcher.py b/keystone/openstack/common/rpc/dispatcher.py
deleted file mode 100644
index 2bcfe79af..000000000
--- a/keystone/openstack/common/rpc/dispatcher.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Code for rpc message dispatching.
-
-Messages that come in have a version number associated with them. RPC API
-version numbers are in the form:
-
- Major.Minor
-
-For a given message with version X.Y, the receiver must be marked as able to
-handle messages of version A.B, where:
-
- A = X
-
- B >= Y
-
-The Major version number would be incremented for an almost completely new API.
-The Minor version number would be incremented for backwards compatible changes
-to an existing API. A backwards compatible change could be something like
-adding a new method, adding an argument to an existing method (but not
-requiring it), or changing the type for an existing argument (but still
-handling the old type as well).
-
-The conversion over to a versioned API must be done on both the client side and
-server side of the API at the same time. However, as the code stands today,
-there can be both versioned and unversioned APIs implemented in the same code
-base.
-
-EXAMPLES
-========
-
-Nova was the first project to use versioned rpc APIs. Consider the compute rpc
-API as an example. The client side is in nova/compute/rpcapi.py and the server
-side is in nova/compute/manager.py.
-
-
-Example 1) Adding a new method.
--------------------------------
-
-Adding a new method is a backwards compatible change. It should be added to
-nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
-X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
-have a specific version specified to indicate the minimum API version that must
-be implemented for the method to be supported. For example::
-
- def get_host_uptime(self, ctxt, host):
- topic = _compute_topic(self.topic, ctxt, host, None)
- return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
- version='1.1')
-
-In this case, version '1.1' is the first version that supported the
-get_host_uptime() method.
-
-
-Example 2) Adding a new parameter.
-----------------------------------
-
-Adding a new parameter to an rpc method can be made backwards compatible. The
-RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
-The implementation of the method must not expect the parameter to be present.::
-
- def some_remote_method(self, arg1, arg2, newarg=None):
- # The code needs to deal with newarg=None for cases
- # where an older client sends a message without it.
- pass
-
-On the client side, the same changes should be made as in example 1. The
-minimum version that supports the new parameter should be specified.
-"""
-
-import six
-
-from keystone.openstack.common.rpc import common as rpc_common
-from keystone.openstack.common.rpc import serializer as rpc_serializer
-
-
-class RpcDispatcher(object):
- """Dispatch rpc messages according to the requested API version.
-
- This class can be used as the top level 'manager' for a service. It
- contains a list of underlying managers that have an API_VERSION attribute.
- """
-
- def __init__(self, callbacks, serializer=None):
- """Initialize the rpc dispatcher.
-
- :param callbacks: List of proxy objects that are an instance
- of a class with rpc methods exposed. Each proxy
- object should have an RPC_API_VERSION attribute.
- :param serializer: The Serializer object that will be used to
- deserialize arguments before the method call and
- to serialize the result after it returns.
- """
- self.callbacks = callbacks
- if serializer is None:
- serializer = rpc_serializer.NoOpSerializer()
- self.serializer = serializer
- super(RpcDispatcher, self).__init__()
-
- def _deserialize_args(self, context, kwargs):
- """Helper method called to deserialize args before dispatch.
-
- This calls our serializer on each argument, returning a new set of
- args that have been deserialized.
-
- :param context: The request context
- :param kwargs: The arguments to be deserialized
- :returns: A new set of deserialized args
- """
- new_kwargs = dict()
- for argname, arg in six.iteritems(kwargs):
- new_kwargs[argname] = self.serializer.deserialize_entity(context,
- arg)
- return new_kwargs
-
- def dispatch(self, ctxt, version, method, namespace, **kwargs):
- """Dispatch a message based on a requested version.
-
- :param ctxt: The request context
- :param version: The requested API version from the incoming message
- :param method: The method requested to be called by the incoming
- message.
- :param namespace: The namespace for the requested method. If None,
- the dispatcher will look for a method on a callback
- object with no namespace set.
- :param kwargs: A dict of keyword arguments to be passed to the method.
-
- :returns: Whatever is returned by the underlying method that gets
- called.
- """
- if not version:
- version = '1.0'
-
- had_compatible = False
- for proxyobj in self.callbacks:
- # Check for namespace compatibility
- try:
- cb_namespace = proxyobj.RPC_API_NAMESPACE
- except AttributeError:
- cb_namespace = None
-
- if namespace != cb_namespace:
- continue
-
- # Check for version compatibility
- try:
- rpc_api_version = proxyobj.RPC_API_VERSION
- except AttributeError:
- rpc_api_version = '1.0'
-
- is_compatible = rpc_common.version_is_compatible(rpc_api_version,
- version)
- had_compatible = had_compatible or is_compatible
-
- if not hasattr(proxyobj, method):
- continue
- if is_compatible:
- kwargs = self._deserialize_args(ctxt, kwargs)
- result = getattr(proxyobj, method)(ctxt, **kwargs)
- return self.serializer.serialize_entity(ctxt, result)
-
- if had_compatible:
- raise AttributeError("No such RPC function '%s'" % method)
- else:
- raise rpc_common.UnsupportedRpcVersion(version=version)
diff --git a/keystone/openstack/common/rpc/impl_fake.py b/keystone/openstack/common/rpc/impl_fake.py
deleted file mode 100644
index d50468977..000000000
--- a/keystone/openstack/common/rpc/impl_fake.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Fake RPC implementation which calls proxy methods directly with no
-queues. Casts will block, but this is very useful for tests.
-"""
-
-import inspect
-# NOTE(russellb): We specifically want to use json, not our own jsonutils.
-# jsonutils has some extra logic to automatically convert objects to primitive
-# types so that they can be serialized. We want to catch all cases where
-# non-primitive types make it into this code and treat it as an error.
-import json
-import time
-
-import eventlet
-
-from keystone.openstack.common.rpc import common as rpc_common
-
-CONSUMERS = {}
-
-
-class RpcContext(rpc_common.CommonRpcContext):
- def __init__(self, **kwargs):
- super(RpcContext, self).__init__(**kwargs)
- self._response = []
- self._done = False
-
- def deepcopy(self):
- values = self.to_dict()
- new_inst = self.__class__(**values)
- new_inst._response = self._response
- new_inst._done = self._done
- return new_inst
-
- def reply(self, reply=None, failure=None, ending=False):
- if ending:
- self._done = True
- if not self._done:
- self._response.append((reply, failure))
-
-
-class Consumer(object):
- def __init__(self, topic, proxy):
- self.topic = topic
- self.proxy = proxy
-
- def call(self, context, version, method, namespace, args, timeout):
- done = eventlet.event.Event()
-
- def _inner():
- ctxt = RpcContext.from_dict(context.to_dict())
- try:
- rval = self.proxy.dispatch(context, version, method,
- namespace, **args)
- res = []
- # Caller might have called ctxt.reply() manually
- for (reply, failure) in ctxt._response:
- if failure:
- raise failure[0], failure[1], failure[2]
- res.append(reply)
- # if ending not 'sent'...we might have more data to
- # return from the function itself
- if not ctxt._done:
- if inspect.isgenerator(rval):
- for val in rval:
- res.append(val)
- else:
- res.append(rval)
- done.send(res)
- except rpc_common.ClientException as e:
- done.send_exception(e._exc_info[1])
- except Exception as e:
- done.send_exception(e)
-
- thread = eventlet.greenthread.spawn(_inner)
-
- if timeout:
- start_time = time.time()
- while not done.ready():
- eventlet.greenthread.sleep(1)
- cur_time = time.time()
- if (cur_time - start_time) > timeout:
- thread.kill()
- raise rpc_common.Timeout()
-
- return done.wait()
-
-
-class Connection(object):
- """Connection object."""
-
- def __init__(self):
- self.consumers = []
-
- def create_consumer(self, topic, proxy, fanout=False):
- consumer = Consumer(topic, proxy)
- self.consumers.append(consumer)
- if topic not in CONSUMERS:
- CONSUMERS[topic] = []
- CONSUMERS[topic].append(consumer)
-
- def close(self):
- for consumer in self.consumers:
- CONSUMERS[consumer.topic].remove(consumer)
- self.consumers = []
-
- def consume_in_thread(self):
- pass
-
-
-def create_connection(conf, new=True):
- """Create a connection."""
- return Connection()
-
-
-def check_serialize(msg):
- """Make sure a message intended for rpc can be serialized."""
- json.dumps(msg)
-
-
-def multicall(conf, context, topic, msg, timeout=None):
- """Make a call that returns multiple times."""
-
- check_serialize(msg)
-
- method = msg.get('method')
- if not method:
- return
- args = msg.get('args', {})
- version = msg.get('version', None)
- namespace = msg.get('namespace', None)
-
- try:
- consumer = CONSUMERS[topic][0]
- except (KeyError, IndexError):
- raise rpc_common.Timeout("No consumers available")
- else:
- return consumer.call(context, version, method, namespace, args,
- timeout)
-
-
-def call(conf, context, topic, msg, timeout=None):
- """Sends a message on a topic and wait for a response."""
- rv = multicall(conf, context, topic, msg, timeout)
- # NOTE(vish): return the last result from the multicall
- rv = list(rv)
- if not rv:
- return
- return rv[-1]
-
-
-def cast(conf, context, topic, msg):
- check_serialize(msg)
- try:
- call(conf, context, topic, msg)
- except Exception:
- pass
-
-
-def notify(conf, context, topic, msg, envelope):
- check_serialize(msg)
-
-
-def cleanup():
- pass
-
-
-def fanout_cast(conf, context, topic, msg):
- """Cast to all consumers of a topic."""
- check_serialize(msg)
- method = msg.get('method')
- if not method:
- return
- args = msg.get('args', {})
- version = msg.get('version', None)
- namespace = msg.get('namespace', None)
-
- for consumer in CONSUMERS.get(topic, []):
- try:
- consumer.call(context, version, method, namespace, args, None)
- except Exception:
- pass
diff --git a/keystone/openstack/common/rpc/impl_kombu.py b/keystone/openstack/common/rpc/impl_kombu.py
deleted file mode 100644
index ec1cb2780..000000000
--- a/keystone/openstack/common/rpc/impl_kombu.py
+++ /dev/null
@@ -1,855 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-import itertools
-import socket
-import ssl
-import time
-import uuid
-
-import eventlet
-import greenlet
-import kombu
-import kombu.connection
-import kombu.entity
-import kombu.messaging
-from oslo.config import cfg
-import six
-
-from keystone.openstack.common import excutils
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import network_utils
-from keystone.openstack.common.rpc import amqp as rpc_amqp
-from keystone.openstack.common.rpc import common as rpc_common
-from keystone.openstack.common import sslutils
-
-kombu_opts = [
- cfg.StrOpt('kombu_ssl_version',
- default='',
- help='SSL version to use (valid only if SSL enabled). '
- 'valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may '
- 'be available on some distributions'
- ),
- cfg.StrOpt('kombu_ssl_keyfile',
- default='',
- help='SSL key file (valid only if SSL enabled)'),
- cfg.StrOpt('kombu_ssl_certfile',
- default='',
- help='SSL cert file (valid only if SSL enabled)'),
- cfg.StrOpt('kombu_ssl_ca_certs',
- default='',
- help=('SSL certification authority file '
- '(valid only if SSL enabled)')),
- cfg.StrOpt('rabbit_host',
- default='localhost',
- help='The RabbitMQ broker address where a single node is used'),
- cfg.IntOpt('rabbit_port',
- default=5672,
- help='The RabbitMQ broker port where a single node is used'),
- cfg.ListOpt('rabbit_hosts',
- default=['$rabbit_host:$rabbit_port'],
- help='RabbitMQ HA cluster host:port pairs'),
- cfg.BoolOpt('rabbit_use_ssl',
- default=False,
- help='connect over SSL for RabbitMQ'),
- cfg.StrOpt('rabbit_userid',
- default='guest',
- help='the RabbitMQ userid'),
- cfg.StrOpt('rabbit_password',
- default='guest',
- help='the RabbitMQ password',
- secret=True),
- cfg.StrOpt('rabbit_virtual_host',
- default='/',
- help='the RabbitMQ virtual host'),
- cfg.IntOpt('rabbit_retry_interval',
- default=1,
- help='how frequently to retry connecting with RabbitMQ'),
- cfg.IntOpt('rabbit_retry_backoff',
- default=2,
- help='how long to backoff for between retries when connecting '
- 'to RabbitMQ'),
- cfg.IntOpt('rabbit_max_retries',
- default=0,
- help='maximum retries with trying to connect to RabbitMQ '
- '(the default of 0 implies an infinite retry count)'),
- cfg.BoolOpt('rabbit_ha_queues',
- default=False,
- help='use H/A queues in RabbitMQ (x-ha-policy: all).'
- 'You need to wipe RabbitMQ database when '
- 'changing this option.'),
-
-]
-
-cfg.CONF.register_opts(kombu_opts)
-
-LOG = rpc_common.LOG
-
-
-def _get_queue_arguments(conf):
- """Construct the arguments for declaring a queue.
-
- If the rabbit_ha_queues option is set, we declare a mirrored queue
- as described here:
-
- http://www.rabbitmq.com/ha.html
-
- Setting x-ha-policy to all means that the queue will be mirrored
- to all nodes in the cluster.
- """
- return {'x-ha-policy': 'all'} if conf.rabbit_ha_queues else {}
-
-
-class ConsumerBase(object):
- """Consumer base class."""
-
- def __init__(self, channel, callback, tag, **kwargs):
- """Declare a queue on an amqp channel.
-
- 'channel' is the amqp channel to use
- 'callback' is the callback to call when messages are received
- 'tag' is a unique ID for the consumer on the channel
-
- queue name, exchange name, and other kombu options are
- passed in here as a dictionary.
- """
- self.callback = callback
- self.tag = str(tag)
- self.kwargs = kwargs
- self.queue = None
- self.ack_on_error = kwargs.get('ack_on_error', True)
- self.reconnect(channel)
-
- def reconnect(self, channel):
- """Re-declare the queue after a rabbit reconnect."""
- self.channel = channel
- self.kwargs['channel'] = channel
- self.queue = kombu.entity.Queue(**self.kwargs)
- self.queue.declare()
-
- def _callback_handler(self, message, callback):
- """Call callback with deserialized message.
-
- Messages that are processed without exception are ack'ed.
-
- If the message processing generates an exception, it will be
- ack'ed if ack_on_error=True. Otherwise it will be .requeue()'ed.
- """
-
- try:
- msg = rpc_common.deserialize_msg(message.payload)
- callback(msg)
- except Exception:
- if self.ack_on_error:
- LOG.exception(_("Failed to process message"
- " ... skipping it."))
- message.ack()
- else:
- LOG.exception(_("Failed to process message"
- " ... will requeue."))
- message.requeue()
- else:
- message.ack()
-
- def consume(self, *args, **kwargs):
- """Actually declare the consumer on the amqp channel. This will
- start the flow of messages from the queue. Using the
- Connection.iterconsume() iterator will process the messages,
- calling the appropriate callback.
-
- If a callback is specified in kwargs, use that. Otherwise,
- use the callback passed during __init__()
-
- If kwargs['nowait'] is True, then this call will block until
- a message is read.
-
- """
-
- options = {'consumer_tag': self.tag}
- options['nowait'] = kwargs.get('nowait', False)
- callback = kwargs.get('callback', self.callback)
- if not callback:
- raise ValueError("No callback defined")
-
- def _callback(raw_message):
- message = self.channel.message_to_python(raw_message)
- self._callback_handler(message, callback)
-
- self.queue.consume(*args, callback=_callback, **options)
-
- def cancel(self):
- """Cancel the consuming from the queue, if it has started."""
- try:
- self.queue.cancel(self.tag)
- except KeyError as e:
- # NOTE(comstud): Kludge to get around a amqplib bug
- if str(e) != "u'%s'" % self.tag:
- raise
- self.queue = None
-
-
-class DirectConsumer(ConsumerBase):
- """Queue/consumer class for 'direct'."""
-
- def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
- """Init a 'direct' queue.
-
- 'channel' is the amqp channel to use
- 'msg_id' is the msg_id to listen on
- 'callback' is the callback to call when messages are received
- 'tag' is a unique ID for the consumer on the channel
-
- Other kombu options may be passed
- """
- # Default options
- options = {'durable': False,
- 'queue_arguments': _get_queue_arguments(conf),
- 'auto_delete': True,
- 'exclusive': False}
- options.update(kwargs)
- exchange = kombu.entity.Exchange(name=msg_id,
- type='direct',
- durable=options['durable'],
- auto_delete=options['auto_delete'])
- super(DirectConsumer, self).__init__(channel,
- callback,
- tag,
- name=msg_id,
- exchange=exchange,
- routing_key=msg_id,
- **options)
-
-
-class TopicConsumer(ConsumerBase):
- """Consumer class for 'topic'."""
-
- def __init__(self, conf, channel, topic, callback, tag, name=None,
- exchange_name=None, **kwargs):
- """Init a 'topic' queue.
-
- :param channel: the amqp channel to use
- :param topic: the topic to listen on
- :paramtype topic: str
- :param callback: the callback to call when messages are received
- :param tag: a unique ID for the consumer on the channel
- :param name: optional queue name, defaults to topic
- :paramtype name: str
-
- Other kombu options may be passed as keyword arguments
- """
- # Default options
- options = {'durable': conf.amqp_durable_queues,
- 'queue_arguments': _get_queue_arguments(conf),
- 'auto_delete': conf.amqp_auto_delete,
- 'exclusive': False}
- options.update(kwargs)
- exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
- exchange = kombu.entity.Exchange(name=exchange_name,
- type='topic',
- durable=options['durable'],
- auto_delete=options['auto_delete'])
- super(TopicConsumer, self).__init__(channel,
- callback,
- tag,
- name=name or topic,
- exchange=exchange,
- routing_key=topic,
- **options)
-
-
-class FanoutConsumer(ConsumerBase):
- """Consumer class for 'fanout'."""
-
- def __init__(self, conf, channel, topic, callback, tag, **kwargs):
- """Init a 'fanout' queue.
-
- 'channel' is the amqp channel to use
- 'topic' is the topic to listen on
- 'callback' is the callback to call when messages are received
- 'tag' is a unique ID for the consumer on the channel
-
- Other kombu options may be passed
- """
- unique = uuid.uuid4().hex
- exchange_name = '%s_fanout' % topic
- queue_name = '%s_fanout_%s' % (topic, unique)
-
- # Default options
- options = {'durable': False,
- 'queue_arguments': _get_queue_arguments(conf),
- 'auto_delete': True,
- 'exclusive': False}
- options.update(kwargs)
- exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
- durable=options['durable'],
- auto_delete=options['auto_delete'])
- super(FanoutConsumer, self).__init__(channel, callback, tag,
- name=queue_name,
- exchange=exchange,
- routing_key=topic,
- **options)
-
-
-class Publisher(object):
- """Base Publisher class."""
-
- def __init__(self, channel, exchange_name, routing_key, **kwargs):
- """Init the Publisher class with the exchange_name, routing_key,
- and other options
- """
- self.exchange_name = exchange_name
- self.routing_key = routing_key
- self.kwargs = kwargs
- self.reconnect(channel)
-
- def reconnect(self, channel):
- """Re-establish the Producer after a rabbit reconnection."""
- self.exchange = kombu.entity.Exchange(name=self.exchange_name,
- **self.kwargs)
- self.producer = kombu.messaging.Producer(exchange=self.exchange,
- channel=channel,
- routing_key=self.routing_key)
-
- def send(self, msg, timeout=None):
- """Send a message."""
- if timeout:
- #
- # AMQP TTL is in milliseconds when set in the header.
- #
- self.producer.publish(msg, headers={'ttl': (timeout * 1000)})
- else:
- self.producer.publish(msg)
-
-
-class DirectPublisher(Publisher):
- """Publisher class for 'direct'."""
- def __init__(self, conf, channel, msg_id, **kwargs):
- """init a 'direct' publisher.
-
- Kombu options may be passed as keyword args to override defaults
- """
-
- options = {'durable': False,
- 'auto_delete': True,
- 'exclusive': False}
- options.update(kwargs)
- super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
- type='direct', **options)
-
-
-class TopicPublisher(Publisher):
- """Publisher class for 'topic'."""
- def __init__(self, conf, channel, topic, **kwargs):
- """init a 'topic' publisher.
-
- Kombu options may be passed as keyword args to override defaults
- """
- options = {'durable': conf.amqp_durable_queues,
- 'auto_delete': conf.amqp_auto_delete,
- 'exclusive': False}
- options.update(kwargs)
- exchange_name = rpc_amqp.get_control_exchange(conf)
- super(TopicPublisher, self).__init__(channel,
- exchange_name,
- topic,
- type='topic',
- **options)
-
-
-class FanoutPublisher(Publisher):
- """Publisher class for 'fanout'."""
- def __init__(self, conf, channel, topic, **kwargs):
- """init a 'fanout' publisher.
-
- Kombu options may be passed as keyword args to override defaults
- """
- options = {'durable': False,
- 'auto_delete': True,
- 'exclusive': False}
- options.update(kwargs)
- super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
- None, type='fanout', **options)
-
-
-class NotifyPublisher(TopicPublisher):
- """Publisher class for 'notify'."""
-
- def __init__(self, conf, channel, topic, **kwargs):
- self.durable = kwargs.pop('durable', conf.amqp_durable_queues)
- self.queue_arguments = _get_queue_arguments(conf)
- super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
-
- def reconnect(self, channel):
- super(NotifyPublisher, self).reconnect(channel)
-
- # NOTE(jerdfelt): Normally the consumer would create the queue, but
- # we do this to ensure that messages don't get dropped if the
- # consumer is started after we do
- queue = kombu.entity.Queue(channel=channel,
- exchange=self.exchange,
- durable=self.durable,
- name=self.routing_key,
- routing_key=self.routing_key,
- queue_arguments=self.queue_arguments)
- queue.declare()
-
-
-class Connection(object):
- """Connection object."""
-
- pool = None
-
- def __init__(self, conf, server_params=None):
- self.consumers = []
- self.consumer_thread = None
- self.proxy_callbacks = []
- self.conf = conf
- self.max_retries = self.conf.rabbit_max_retries
- # Try forever?
- if self.max_retries <= 0:
- self.max_retries = None
- self.interval_start = self.conf.rabbit_retry_interval
- self.interval_stepping = self.conf.rabbit_retry_backoff
- # max retry-interval = 30 seconds
- self.interval_max = 30
- self.memory_transport = False
-
- if server_params is None:
- server_params = {}
- # Keys to translate from server_params to kombu params
- server_params_to_kombu_params = {'username': 'userid'}
-
- ssl_params = self._fetch_ssl_params()
- params_list = []
- for adr in self.conf.rabbit_hosts:
- hostname, port = network_utils.parse_host_port(
- adr, default_port=self.conf.rabbit_port)
-
- params = {
- 'hostname': hostname,
- 'port': port,
- 'userid': self.conf.rabbit_userid,
- 'password': self.conf.rabbit_password,
- 'virtual_host': self.conf.rabbit_virtual_host,
- }
-
- for sp_key, value in six.iteritems(server_params):
- p_key = server_params_to_kombu_params.get(sp_key, sp_key)
- params[p_key] = value
-
- if self.conf.fake_rabbit:
- params['transport'] = 'memory'
- if self.conf.rabbit_use_ssl:
- params['ssl'] = ssl_params
-
- params_list.append(params)
-
- self.params_list = params_list
-
- self.memory_transport = self.conf.fake_rabbit
-
- self.connection = None
- self.reconnect()
-
- def _fetch_ssl_params(self):
- """Handles fetching what ssl params should be used for the connection
- (if any).
- """
- ssl_params = dict()
-
- # http://docs.python.org/library/ssl.html - ssl.wrap_socket
- if self.conf.kombu_ssl_version:
- ssl_params['ssl_version'] = sslutils.validate_ssl_version(
- self.conf.kombu_ssl_version)
- if self.conf.kombu_ssl_keyfile:
- ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
- if self.conf.kombu_ssl_certfile:
- ssl_params['certfile'] = self.conf.kombu_ssl_certfile
- if self.conf.kombu_ssl_ca_certs:
- ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
- # We might want to allow variations in the
- # future with this?
- ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
-
- # Return the extended behavior or just have the default behavior
- return ssl_params or True
-
- def _connect(self, params):
- """Connect to rabbit. Re-establish any queues that may have
- been declared before if we are reconnecting. Exceptions should
- be handled by the caller.
- """
- if self.connection:
- LOG.info(_("Reconnecting to AMQP server on "
- "%(hostname)s:%(port)d") % params)
- try:
- self.connection.release()
- except self.connection_errors:
- pass
- # Setting this in case the next statement fails, though
- # it shouldn't be doing any network operations, yet.
- self.connection = None
- self.connection = kombu.connection.BrokerConnection(**params)
- self.connection_errors = self.connection.connection_errors
- if self.memory_transport:
- # Kludge to speed up tests.
- self.connection.transport.polling_interval = 0.0
- self.consumer_num = itertools.count(1)
- self.connection.connect()
- self.channel = self.connection.channel()
- # work around 'memory' transport bug in 1.1.3
- if self.memory_transport:
- self.channel._new_queue('ae.undeliver')
- for consumer in self.consumers:
- consumer.reconnect(self.channel)
- LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d') %
- params)
-
- def reconnect(self):
- """Handles reconnecting and re-establishing queues.
- Will retry up to self.max_retries number of times.
- self.max_retries = 0 means to retry forever.
- Sleep between tries, starting at self.interval_start
- seconds, backing off self.interval_stepping number of seconds
- each attempt.
- """
-
- attempt = 0
- while True:
- params = self.params_list[attempt % len(self.params_list)]
- attempt += 1
- try:
- self._connect(params)
- return
- except (IOError, self.connection_errors) as e:
- pass
- except Exception as e:
- # NOTE(comstud): Unfortunately it's possible for amqplib
- # to return an error not covered by its transport
- # connection_errors in the case of a timeout waiting for
- # a protocol response. (See paste link in LP888621)
- # So, we check all exceptions for 'timeout' in them
- # and try to reconnect in this case.
- if 'timeout' not in str(e):
- raise
-
- log_info = {}
- log_info['err_str'] = str(e)
- log_info['max_retries'] = self.max_retries
- log_info.update(params)
-
- if self.max_retries and attempt == self.max_retries:
- msg = _('Unable to connect to AMQP server on '
- '%(hostname)s:%(port)d after %(max_retries)d '
- 'tries: %(err_str)s') % log_info
- LOG.error(msg)
- raise rpc_common.RPCException(msg)
-
- if attempt == 1:
- sleep_time = self.interval_start or 1
- elif attempt > 1:
- sleep_time += self.interval_stepping
- if self.interval_max:
- sleep_time = min(sleep_time, self.interval_max)
-
- log_info['sleep_time'] = sleep_time
- LOG.error(_('AMQP server on %(hostname)s:%(port)d is '
- 'unreachable: %(err_str)s. Trying again in '
- '%(sleep_time)d seconds.') % log_info)
- time.sleep(sleep_time)
-
- def ensure(self, error_callback, method, *args, **kwargs):
- while True:
- try:
- return method(*args, **kwargs)
- except (self.connection_errors, socket.timeout, IOError) as e:
- if error_callback:
- error_callback(e)
- except Exception as e:
- # NOTE(comstud): Unfortunately it's possible for amqplib
- # to return an error not covered by its transport
- # connection_errors in the case of a timeout waiting for
- # a protocol response. (See paste link in LP888621)
- # So, we check all exceptions for 'timeout' in them
- # and try to reconnect in this case.
- if 'timeout' not in str(e):
- raise
- if error_callback:
- error_callback(e)
- self.reconnect()
-
- def get_channel(self):
- """Convenience call for bin/clear_rabbit_queues."""
- return self.channel
-
- def close(self):
- """Close/release this connection."""
- self.cancel_consumer_thread()
- self.wait_on_proxy_callbacks()
- self.connection.release()
- self.connection = None
-
- def reset(self):
- """Reset a connection so it can be used again."""
- self.cancel_consumer_thread()
- self.wait_on_proxy_callbacks()
- self.channel.close()
- self.channel = self.connection.channel()
- # work around 'memory' transport bug in 1.1.3
- if self.memory_transport:
- self.channel._new_queue('ae.undeliver')
- self.consumers = []
-
- def declare_consumer(self, consumer_cls, topic, callback):
- """Create a Consumer using the class that was passed in and
- add it to our list of consumers
- """
-
- def _connect_error(exc):
- log_info = {'topic': topic, 'err_str': str(exc)}
- LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
- "%(err_str)s") % log_info)
-
- def _declare_consumer():
- consumer = consumer_cls(self.conf, self.channel, topic, callback,
- six.next(self.consumer_num))
- self.consumers.append(consumer)
- return consumer
-
- return self.ensure(_connect_error, _declare_consumer)
-
- def iterconsume(self, limit=None, timeout=None):
- """Return an iterator that will consume from all queues/consumers."""
-
- info = {'do_consume': True}
-
- def _error_callback(exc):
- if isinstance(exc, socket.timeout):
- LOG.debug(_('Timed out waiting for RPC response: %s') %
- str(exc))
- raise rpc_common.Timeout()
- else:
- LOG.exception(_('Failed to consume message from queue: %s') %
- str(exc))
- info['do_consume'] = True
-
- def _consume():
- if info['do_consume']:
- queues_head = self.consumers[:-1] # not fanout.
- queues_tail = self.consumers[-1] # fanout
- for queue in queues_head:
- queue.consume(nowait=True)
- queues_tail.consume(nowait=False)
- info['do_consume'] = False
- return self.connection.drain_events(timeout=timeout)
-
- for iteration in itertools.count(0):
- if limit and iteration >= limit:
- raise StopIteration
- yield self.ensure(_error_callback, _consume)
-
- def cancel_consumer_thread(self):
- """Cancel a consumer thread."""
- if self.consumer_thread is not None:
- self.consumer_thread.kill()
- try:
- self.consumer_thread.wait()
- except greenlet.GreenletExit:
- pass
- self.consumer_thread = None
-
- def wait_on_proxy_callbacks(self):
- """Wait for all proxy callback threads to exit."""
- for proxy_cb in self.proxy_callbacks:
- proxy_cb.wait()
-
- def publisher_send(self, cls, topic, msg, timeout=None, **kwargs):
- """Send to a publisher based on the publisher class."""
-
- def _error_callback(exc):
- log_info = {'topic': topic, 'err_str': str(exc)}
- LOG.exception(_("Failed to publish message to topic "
- "'%(topic)s': %(err_str)s") % log_info)
-
- def _publish():
- publisher = cls(self.conf, self.channel, topic, **kwargs)
- publisher.send(msg, timeout)
-
- self.ensure(_error_callback, _publish)
-
- def declare_direct_consumer(self, topic, callback):
- """Create a 'direct' queue.
- In nova's use, this is generally a msg_id queue used for
- responses for call/multicall
- """
- self.declare_consumer(DirectConsumer, topic, callback)
-
- def declare_topic_consumer(self, topic, callback=None, queue_name=None,
- exchange_name=None, ack_on_error=True):
- """Create a 'topic' consumer."""
- self.declare_consumer(functools.partial(TopicConsumer,
- name=queue_name,
- exchange_name=exchange_name,
- ack_on_error=ack_on_error,
- ),
- topic, callback)
-
- def declare_fanout_consumer(self, topic, callback):
- """Create a 'fanout' consumer."""
- self.declare_consumer(FanoutConsumer, topic, callback)
-
- def direct_send(self, msg_id, msg):
- """Send a 'direct' message."""
- self.publisher_send(DirectPublisher, msg_id, msg)
-
- def topic_send(self, topic, msg, timeout=None):
- """Send a 'topic' message."""
- self.publisher_send(TopicPublisher, topic, msg, timeout)
-
- def fanout_send(self, topic, msg):
- """Send a 'fanout' message."""
- self.publisher_send(FanoutPublisher, topic, msg)
-
- def notify_send(self, topic, msg, **kwargs):
- """Send a notify message on a topic."""
- self.publisher_send(NotifyPublisher, topic, msg, None, **kwargs)
-
- def consume(self, limit=None):
- """Consume from all queues/consumers."""
- it = self.iterconsume(limit=limit)
- while True:
- try:
- six.next(it)
- except StopIteration:
- return
-
- def consume_in_thread(self):
- """Consumer from all queues/consumers in a greenthread."""
- @excutils.forever_retry_uncaught_exceptions
- def _consumer_thread():
- try:
- self.consume()
- except greenlet.GreenletExit:
- return
- if self.consumer_thread is None:
- self.consumer_thread = eventlet.spawn(_consumer_thread)
- return self.consumer_thread
-
- def create_consumer(self, topic, proxy, fanout=False):
- """Create a consumer that calls a method in a proxy object."""
- proxy_cb = rpc_amqp.ProxyCallback(
- self.conf, proxy,
- rpc_amqp.get_connection_pool(self.conf, Connection))
- self.proxy_callbacks.append(proxy_cb)
-
- if fanout:
- self.declare_fanout_consumer(topic, proxy_cb)
- else:
- self.declare_topic_consumer(topic, proxy_cb)
-
- def create_worker(self, topic, proxy, pool_name):
- """Create a worker that calls a method in a proxy object."""
- proxy_cb = rpc_amqp.ProxyCallback(
- self.conf, proxy,
- rpc_amqp.get_connection_pool(self.conf, Connection))
- self.proxy_callbacks.append(proxy_cb)
- self.declare_topic_consumer(topic, proxy_cb, pool_name)
-
- def join_consumer_pool(self, callback, pool_name, topic,
- exchange_name=None, ack_on_error=True):
- """Register as a member of a group of consumers for a given topic from
- the specified exchange.
-
- Exactly one member of a given pool will receive each message.
-
- A message will be delivered to multiple pools, if more than
- one is created.
- """
- callback_wrapper = rpc_amqp.CallbackWrapper(
- conf=self.conf,
- callback=callback,
- connection_pool=rpc_amqp.get_connection_pool(self.conf,
- Connection),
- wait_for_consumers=not ack_on_error
- )
- self.proxy_callbacks.append(callback_wrapper)
- self.declare_topic_consumer(
- queue_name=pool_name,
- topic=topic,
- exchange_name=exchange_name,
- callback=callback_wrapper,
- ack_on_error=ack_on_error,
- )
-
-
-def create_connection(conf, new=True):
- """Create a connection."""
- return rpc_amqp.create_connection(
- conf, new,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def multicall(conf, context, topic, msg, timeout=None):
- """Make a call that returns multiple times."""
- return rpc_amqp.multicall(
- conf, context, topic, msg, timeout,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def call(conf, context, topic, msg, timeout=None):
- """Sends a message on a topic and wait for a response."""
- return rpc_amqp.call(
- conf, context, topic, msg, timeout,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast(conf, context, topic, msg):
- """Sends a message on a topic without waiting for a response."""
- return rpc_amqp.cast(
- conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast(conf, context, topic, msg):
- """Sends a message on a fanout exchange without waiting for a response."""
- return rpc_amqp.fanout_cast(
- conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast_to_server(conf, context, server_params, topic, msg):
- """Sends a message on a topic to a specific server."""
- return rpc_amqp.cast_to_server(
- conf, context, server_params, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast_to_server(conf, context, server_params, topic, msg):
- """Sends a message on a fanout exchange to a specific server."""
- return rpc_amqp.fanout_cast_to_server(
- conf, context, server_params, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def notify(conf, context, topic, msg, envelope):
- """Sends a notification event on a topic."""
- return rpc_amqp.notify(
- conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection),
- envelope)
-
-
-def cleanup():
- return rpc_amqp.cleanup(Connection.pool)
diff --git a/keystone/openstack/common/rpc/impl_qpid.py b/keystone/openstack/common/rpc/impl_qpid.py
deleted file mode 100644
index da4440ee2..000000000
--- a/keystone/openstack/common/rpc/impl_qpid.py
+++ /dev/null
@@ -1,821 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# Copyright 2011 - 2012, Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import functools
-import itertools
-import time
-
-import eventlet
-import greenlet
-from oslo.config import cfg
-import six
-
-from keystone.openstack.common import excutils
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import importutils
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common import log as logging
-from keystone.openstack.common.rpc import amqp as rpc_amqp
-from keystone.openstack.common.rpc import common as rpc_common
-
-qpid_codec = importutils.try_import("qpid.codec010")
-qpid_messaging = importutils.try_import("qpid.messaging")
-qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
-
-LOG = logging.getLogger(__name__)
-
-qpid_opts = [
- cfg.StrOpt('qpid_hostname',
- default='localhost',
- help='Qpid broker hostname'),
- cfg.IntOpt('qpid_port',
- default=5672,
- help='Qpid broker port'),
- cfg.ListOpt('qpid_hosts',
- default=['$qpid_hostname:$qpid_port'],
- help='Qpid HA cluster host:port pairs'),
- cfg.StrOpt('qpid_username',
- default='',
- help='Username for qpid connection'),
- cfg.StrOpt('qpid_password',
- default='',
- help='Password for qpid connection',
- secret=True),
- cfg.StrOpt('qpid_sasl_mechanisms',
- default='',
- help='Space separated list of SASL mechanisms to use for auth'),
- cfg.IntOpt('qpid_heartbeat',
- default=60,
- help='Seconds between connection keepalive heartbeats'),
- cfg.StrOpt('qpid_protocol',
- default='tcp',
- help="Transport to use, either 'tcp' or 'ssl'"),
- cfg.BoolOpt('qpid_tcp_nodelay',
- default=True,
- help='Disable Nagle algorithm'),
- # NOTE(russellb) If any additional versions are added (beyond 1 and 2),
- # this file could probably use some additional refactoring so that the
- # differences between each version are split into different classes.
- cfg.IntOpt('qpid_topology_version',
- default=1,
- help="The qpid topology version to use. Version 1 is what "
- "was originally used by impl_qpid. Version 2 includes "
- "some backwards-incompatible changes that allow broker "
- "federation to work. Users should update to version 2 "
- "when they are able to take everything down, as it "
- "requires a clean break."),
-]
-
-cfg.CONF.register_opts(qpid_opts)
-
-JSON_CONTENT_TYPE = 'application/json; charset=utf8'
-
-
-def raise_invalid_topology_version(conf):
- msg = (_("Invalid value for qpid_topology_version: %d") %
- conf.qpid_topology_version)
- LOG.error(msg)
- raise Exception(msg)
-
-
-class ConsumerBase(object):
- """Consumer base class."""
-
- def __init__(self, conf, session, callback, node_name, node_opts,
- link_name, link_opts):
- """Declare a queue on an amqp session.
-
- 'session' is the amqp session to use
- 'callback' is the callback to call when messages are received
- 'node_name' is the first part of the Qpid address string, before ';'
- 'node_opts' will be applied to the "x-declare" section of "node"
- in the address string.
- 'link_name' goes into the "name" field of the "link" in the address
- string
- 'link_opts' will be applied to the "x-declare" section of "link"
- in the address string.
- """
- self.callback = callback
- self.receiver = None
- self.session = None
-
- if conf.qpid_topology_version == 1:
- addr_opts = {
- "create": "always",
- "node": {
- "type": "topic",
- "x-declare": {
- "durable": True,
- "auto-delete": True,
- },
- },
- "link": {
- "durable": True,
- "x-declare": {
- "durable": False,
- "auto-delete": True,
- "exclusive": False,
- },
- },
- }
- addr_opts["node"]["x-declare"].update(node_opts)
- elif conf.qpid_topology_version == 2:
- addr_opts = {
- "link": {
- "x-declare": {
- "auto-delete": True,
- "exclusive": False,
- },
- },
- }
- else:
- raise_invalid_topology_version()
-
- addr_opts["link"]["x-declare"].update(link_opts)
- if link_name:
- addr_opts["link"]["name"] = link_name
-
- self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
-
- self.connect(session)
-
- def connect(self, session):
- """Declare the receiver on connect."""
- self._declare_receiver(session)
-
- def reconnect(self, session):
- """Re-declare the receiver after a qpid reconnect."""
- self._declare_receiver(session)
-
- def _declare_receiver(self, session):
- self.session = session
- self.receiver = session.receiver(self.address)
- self.receiver.capacity = 1
-
- def _unpack_json_msg(self, msg):
- """Load the JSON data in msg if msg.content_type indicates that it
- is necessary. Put the loaded data back into msg.content and
- update msg.content_type appropriately.
-
- A Qpid Message containing a dict will have a content_type of
- 'amqp/map', whereas one containing a string that needs to be converted
- back from JSON will have a content_type of JSON_CONTENT_TYPE.
-
- :param msg: a Qpid Message object
- :returns: None
- """
- if msg.content_type == JSON_CONTENT_TYPE:
- msg.content = jsonutils.loads(msg.content)
- msg.content_type = 'amqp/map'
-
- def consume(self):
- """Fetch the message and pass it to the callback object."""
- message = self.receiver.fetch()
- try:
- self._unpack_json_msg(message)
- msg = rpc_common.deserialize_msg(message.content)
- self.callback(msg)
- except Exception:
- LOG.exception(_("Failed to process message... skipping it."))
- finally:
- # TODO(sandy): Need support for optional ack_on_error.
- self.session.acknowledge(message)
-
- def get_receiver(self):
- return self.receiver
-
- def get_node_name(self):
- return self.address.split(';')[0]
-
-
-class DirectConsumer(ConsumerBase):
- """Queue/consumer class for 'direct'."""
-
- def __init__(self, conf, session, msg_id, callback):
- """Init a 'direct' queue.
-
- 'session' is the amqp session to use
- 'msg_id' is the msg_id to listen on
- 'callback' is the callback to call when messages are received
- """
-
- link_opts = {
- "auto-delete": conf.amqp_auto_delete,
- "exclusive": True,
- "durable": conf.amqp_durable_queues,
- }
-
- if conf.qpid_topology_version == 1:
- node_name = "%s/%s" % (msg_id, msg_id)
- node_opts = {"type": "direct"}
- link_name = msg_id
- elif conf.qpid_topology_version == 2:
- node_name = "amq.direct/%s" % msg_id
- node_opts = {}
- link_name = None
- else:
- raise_invalid_topology_version()
-
- super(DirectConsumer, self).__init__(conf, session, callback,
- node_name, node_opts, link_name,
- link_opts)
-
-
-class TopicConsumer(ConsumerBase):
- """Consumer class for 'topic'."""
-
- def __init__(self, conf, session, topic, callback, name=None,
- exchange_name=None):
- """Init a 'topic' queue.
-
- :param session: the amqp session to use
- :param topic: is the topic to listen on
- :paramtype topic: str
- :param callback: the callback to call when messages are received
- :param name: optional queue name, defaults to topic
- """
-
- exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
- link_opts = {
- "auto-delete": conf.amqp_auto_delete,
- "durable": conf.amqp_durable_queues,
- }
-
- if conf.qpid_topology_version == 1:
- node_name = "%s/%s" % (exchange_name, topic)
- elif conf.qpid_topology_version == 2:
- node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
- else:
- raise_invalid_topology_version()
-
- super(TopicConsumer, self).__init__(conf, session, callback, node_name,
- {}, name or topic, link_opts)
-
-
-class FanoutConsumer(ConsumerBase):
- """Consumer class for 'fanout'."""
-
- def __init__(self, conf, session, topic, callback):
- """Init a 'fanout' queue.
-
- 'session' is the amqp session to use
- 'topic' is the topic to listen on
- 'callback' is the callback to call when messages are received
- """
- self.conf = conf
-
- link_opts = {"exclusive": True}
-
- if conf.qpid_topology_version == 1:
- node_name = "%s_fanout" % topic
- node_opts = {"durable": False, "type": "fanout"}
- elif conf.qpid_topology_version == 2:
- node_name = "amq.topic/fanout/%s" % topic
- node_opts = {}
- else:
- raise_invalid_topology_version()
-
- super(FanoutConsumer, self).__init__(conf, session, callback,
- node_name, node_opts, None,
- link_opts)
-
-
-class Publisher(object):
- """Base Publisher class."""
-
- def __init__(self, conf, session, node_name, node_opts=None):
- """Init the Publisher class with the exchange_name, routing_key,
- and other options
- """
- self.sender = None
- self.session = session
-
- if conf.qpid_topology_version == 1:
- addr_opts = {
- "create": "always",
- "node": {
- "type": "topic",
- "x-declare": {
- "durable": False,
- # auto-delete isn't implemented for exchanges in qpid,
- # but put in here anyway
- "auto-delete": True,
- },
- },
- }
- if node_opts:
- addr_opts["node"]["x-declare"].update(node_opts)
-
- self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
- elif conf.qpid_topology_version == 2:
- self.address = node_name
- else:
- raise_invalid_topology_version()
-
- self.reconnect(session)
-
- def reconnect(self, session):
- """Re-establish the Sender after a reconnection."""
- self.sender = session.sender(self.address)
-
- def _pack_json_msg(self, msg):
- """Qpid cannot serialize dicts containing strings longer than 65535
- characters. This function dumps the message content to a JSON
- string, which Qpid is able to handle.
-
- :param msg: May be either a Qpid Message object or a bare dict.
- :returns: A Qpid Message with its content field JSON encoded.
- """
- try:
- msg.content = jsonutils.dumps(msg.content)
- except AttributeError:
- # Need to have a Qpid message so we can set the content_type.
- msg = qpid_messaging.Message(jsonutils.dumps(msg))
- msg.content_type = JSON_CONTENT_TYPE
- return msg
-
- def send(self, msg):
- """Send a message."""
- try:
- # Check if Qpid can encode the message
- check_msg = msg
- if not hasattr(check_msg, 'content_type'):
- check_msg = qpid_messaging.Message(msg)
- content_type = check_msg.content_type
- enc, dec = qpid_messaging.message.get_codec(content_type)
- enc(check_msg.content)
- except qpid_codec.CodecException:
- # This means the message couldn't be serialized as a dict.
- msg = self._pack_json_msg(msg)
- self.sender.send(msg)
-
-
-class DirectPublisher(Publisher):
- """Publisher class for 'direct'."""
- def __init__(self, conf, session, msg_id):
- """Init a 'direct' publisher."""
-
- if conf.qpid_topology_version == 1:
- node_name = msg_id
- node_opts = {"type": "direct"}
- elif conf.qpid_topology_version == 2:
- node_name = "amq.direct/%s" % msg_id
- node_opts = {}
- else:
- raise_invalid_topology_version()
-
- super(DirectPublisher, self).__init__(conf, session, node_name,
- node_opts)
-
-
-class TopicPublisher(Publisher):
- """Publisher class for 'topic'."""
- def __init__(self, conf, session, topic):
- """Init a 'topic' publisher.
- """
- exchange_name = rpc_amqp.get_control_exchange(conf)
-
- if conf.qpid_topology_version == 1:
- node_name = "%s/%s" % (exchange_name, topic)
- elif conf.qpid_topology_version == 2:
- node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
- else:
- raise_invalid_topology_version()
-
- super(TopicPublisher, self).__init__(conf, session, node_name)
-
-
-class FanoutPublisher(Publisher):
- """Publisher class for 'fanout'."""
- def __init__(self, conf, session, topic):
- """Init a 'fanout' publisher.
- """
-
- if conf.qpid_topology_version == 1:
- node_name = "%s_fanout" % topic
- node_opts = {"type": "fanout"}
- elif conf.qpid_topology_version == 2:
- node_name = "amq.topic/fanout/%s" % topic
- node_opts = {}
- else:
- raise_invalid_topology_version()
-
- super(FanoutPublisher, self).__init__(conf, session, node_name,
- node_opts)
-
-
-class NotifyPublisher(Publisher):
- """Publisher class for notifications."""
- def __init__(self, conf, session, topic):
- """Init a 'topic' publisher.
- """
- exchange_name = rpc_amqp.get_control_exchange(conf)
- node_opts = {"durable": True}
-
- if conf.qpid_topology_version == 1:
- node_name = "%s/%s" % (exchange_name, topic)
- elif conf.qpid_topology_version == 2:
- node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
- else:
- raise_invalid_topology_version()
-
- super(NotifyPublisher, self).__init__(conf, session, node_name,
- node_opts)
-
-
-class Connection(object):
- """Connection object."""
-
- pool = None
-
- def __init__(self, conf, server_params=None):
- if not qpid_messaging:
- raise ImportError("Failed to import qpid.messaging")
-
- self.session = None
- self.consumers = {}
- self.consumer_thread = None
- self.proxy_callbacks = []
- self.conf = conf
-
- if server_params and 'hostname' in server_params:
- # NOTE(russellb) This enables support for cast_to_server.
- server_params['qpid_hosts'] = [
- '%s:%d' % (server_params['hostname'],
- server_params.get('port', 5672))
- ]
-
- params = {
- 'qpid_hosts': self.conf.qpid_hosts,
- 'username': self.conf.qpid_username,
- 'password': self.conf.qpid_password,
- }
- params.update(server_params or {})
-
- self.brokers = params['qpid_hosts']
- self.username = params['username']
- self.password = params['password']
- self.connection_create(self.brokers[0])
- self.reconnect()
-
- def connection_create(self, broker):
- # Create the connection - this does not open the connection
- self.connection = qpid_messaging.Connection(broker)
-
- # Check if flags are set and if so set them for the connection
- # before we call open
- self.connection.username = self.username
- self.connection.password = self.password
-
- self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
- # Reconnection is done by self.reconnect()
- self.connection.reconnect = False
- self.connection.heartbeat = self.conf.qpid_heartbeat
- self.connection.transport = self.conf.qpid_protocol
- self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
-
- def _register_consumer(self, consumer):
- self.consumers[str(consumer.get_receiver())] = consumer
-
- def _lookup_consumer(self, receiver):
- return self.consumers[str(receiver)]
-
- def reconnect(self):
- """Handles reconnecting and re-establishing sessions and queues."""
- attempt = 0
- delay = 1
- while True:
- # Close the session if necessary
- if self.connection.opened():
- try:
- self.connection.close()
- except qpid_exceptions.ConnectionError:
- pass
-
- broker = self.brokers[attempt % len(self.brokers)]
- attempt += 1
-
- try:
- self.connection_create(broker)
- self.connection.open()
- except qpid_exceptions.ConnectionError as e:
- msg_dict = dict(e=e, delay=delay)
- msg = _("Unable to connect to AMQP server: %(e)s. "
- "Sleeping %(delay)s seconds") % msg_dict
- LOG.error(msg)
- time.sleep(delay)
- delay = min(2 * delay, 60)
- else:
- LOG.info(_('Connected to AMQP server on %s'), broker)
- break
-
- self.session = self.connection.session()
-
- if self.consumers:
- consumers = self.consumers
- self.consumers = {}
-
- for consumer in six.itervalues(consumers):
- consumer.reconnect(self.session)
- self._register_consumer(consumer)
-
- LOG.debug(_("Re-established AMQP queues"))
-
- def ensure(self, error_callback, method, *args, **kwargs):
- while True:
- try:
- return method(*args, **kwargs)
- except (qpid_exceptions.Empty,
- qpid_exceptions.ConnectionError) as e:
- if error_callback:
- error_callback(e)
- self.reconnect()
-
- def close(self):
- """Close/release this connection."""
- self.cancel_consumer_thread()
- self.wait_on_proxy_callbacks()
- try:
- self.connection.close()
- except Exception:
- # NOTE(dripton) Logging exceptions that happen during cleanup just
- # causes confusion; there's really nothing useful we can do with
- # them.
- pass
- self.connection = None
-
- def reset(self):
- """Reset a connection so it can be used again."""
- self.cancel_consumer_thread()
- self.wait_on_proxy_callbacks()
- self.session.close()
- self.session = self.connection.session()
- self.consumers = {}
-
- def declare_consumer(self, consumer_cls, topic, callback):
- """Create a Consumer using the class that was passed in and
- add it to our list of consumers
- """
- def _connect_error(exc):
- log_info = {'topic': topic, 'err_str': str(exc)}
- LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
- "%(err_str)s") % log_info)
-
- def _declare_consumer():
- consumer = consumer_cls(self.conf, self.session, topic, callback)
- self._register_consumer(consumer)
- return consumer
-
- return self.ensure(_connect_error, _declare_consumer)
-
- def iterconsume(self, limit=None, timeout=None):
- """Return an iterator that will consume from all queues/consumers."""
-
- def _error_callback(exc):
- if isinstance(exc, qpid_exceptions.Empty):
- LOG.debug(_('Timed out waiting for RPC response: %s') %
- str(exc))
- raise rpc_common.Timeout()
- else:
- LOG.exception(_('Failed to consume message from queue: %s') %
- str(exc))
-
- def _consume():
- nxt_receiver = self.session.next_receiver(timeout=timeout)
- try:
- self._lookup_consumer(nxt_receiver).consume()
- except Exception:
- LOG.exception(_("Error processing message. Skipping it."))
-
- for iteration in itertools.count(0):
- if limit and iteration >= limit:
- raise StopIteration
- yield self.ensure(_error_callback, _consume)
-
- def cancel_consumer_thread(self):
- """Cancel a consumer thread."""
- if self.consumer_thread is not None:
- self.consumer_thread.kill()
- try:
- self.consumer_thread.wait()
- except greenlet.GreenletExit:
- pass
- self.consumer_thread = None
-
- def wait_on_proxy_callbacks(self):
- """Wait for all proxy callback threads to exit."""
- for proxy_cb in self.proxy_callbacks:
- proxy_cb.wait()
-
- def publisher_send(self, cls, topic, msg):
- """Send to a publisher based on the publisher class."""
-
- def _connect_error(exc):
- log_info = {'topic': topic, 'err_str': str(exc)}
- LOG.exception(_("Failed to publish message to topic "
- "'%(topic)s': %(err_str)s") % log_info)
-
- def _publisher_send():
- publisher = cls(self.conf, self.session, topic)
- publisher.send(msg)
-
- return self.ensure(_connect_error, _publisher_send)
-
- def declare_direct_consumer(self, topic, callback):
- """Create a 'direct' queue.
- In nova's use, this is generally a msg_id queue used for
- responses for call/multicall
- """
- self.declare_consumer(DirectConsumer, topic, callback)
-
- def declare_topic_consumer(self, topic, callback=None, queue_name=None,
- exchange_name=None):
- """Create a 'topic' consumer."""
- self.declare_consumer(functools.partial(TopicConsumer,
- name=queue_name,
- exchange_name=exchange_name,
- ),
- topic, callback)
-
- def declare_fanout_consumer(self, topic, callback):
- """Create a 'fanout' consumer."""
- self.declare_consumer(FanoutConsumer, topic, callback)
-
- def direct_send(self, msg_id, msg):
- """Send a 'direct' message."""
- self.publisher_send(DirectPublisher, msg_id, msg)
-
- def topic_send(self, topic, msg, timeout=None):
- """Send a 'topic' message."""
- #
- # We want to create a message with attributes, e.g. a TTL. We
- # don't really need to keep 'msg' in its JSON format any longer
- # so let's create an actual qpid message here and get some
- # value-add on the go.
- #
- # WARNING: Request timeout happens to be in the same units as
- # qpid's TTL (seconds). If this changes in the future, then this
- # will need to be altered accordingly.
- #
- qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
- self.publisher_send(TopicPublisher, topic, qpid_message)
-
- def fanout_send(self, topic, msg):
- """Send a 'fanout' message."""
- self.publisher_send(FanoutPublisher, topic, msg)
-
- def notify_send(self, topic, msg, **kwargs):
- """Send a notify message on a topic."""
- self.publisher_send(NotifyPublisher, topic, msg)
-
- def consume(self, limit=None):
- """Consume from all queues/consumers."""
- it = self.iterconsume(limit=limit)
- while True:
- try:
- six.next(it)
- except StopIteration:
- return
-
- def consume_in_thread(self):
- """Consumer from all queues/consumers in a greenthread."""
- @excutils.forever_retry_uncaught_exceptions
- def _consumer_thread():
- try:
- self.consume()
- except greenlet.GreenletExit:
- return
- if self.consumer_thread is None:
- self.consumer_thread = eventlet.spawn(_consumer_thread)
- return self.consumer_thread
-
- def create_consumer(self, topic, proxy, fanout=False):
- """Create a consumer that calls a method in a proxy object."""
- proxy_cb = rpc_amqp.ProxyCallback(
- self.conf, proxy,
- rpc_amqp.get_connection_pool(self.conf, Connection))
- self.proxy_callbacks.append(proxy_cb)
-
- if fanout:
- consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
- else:
- consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
-
- self._register_consumer(consumer)
-
- return consumer
-
- def create_worker(self, topic, proxy, pool_name):
- """Create a worker that calls a method in a proxy object."""
- proxy_cb = rpc_amqp.ProxyCallback(
- self.conf, proxy,
- rpc_amqp.get_connection_pool(self.conf, Connection))
- self.proxy_callbacks.append(proxy_cb)
-
- consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
- name=pool_name)
-
- self._register_consumer(consumer)
-
- return consumer
-
- def join_consumer_pool(self, callback, pool_name, topic,
- exchange_name=None, ack_on_error=True):
- """Register as a member of a group of consumers for a given topic from
- the specified exchange.
-
- Exactly one member of a given pool will receive each message.
-
- A message will be delivered to multiple pools, if more than
- one is created.
- """
- callback_wrapper = rpc_amqp.CallbackWrapper(
- conf=self.conf,
- callback=callback,
- connection_pool=rpc_amqp.get_connection_pool(self.conf,
- Connection),
- wait_for_consumers=not ack_on_error
- )
- self.proxy_callbacks.append(callback_wrapper)
-
- consumer = TopicConsumer(conf=self.conf,
- session=self.session,
- topic=topic,
- callback=callback_wrapper,
- name=pool_name,
- exchange_name=exchange_name)
-
- self._register_consumer(consumer)
- return consumer
-
-
-def create_connection(conf, new=True):
- """Create a connection."""
- return rpc_amqp.create_connection(
- conf, new,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def multicall(conf, context, topic, msg, timeout=None):
- """Make a call that returns multiple times."""
- return rpc_amqp.multicall(
- conf, context, topic, msg, timeout,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def call(conf, context, topic, msg, timeout=None):
- """Sends a message on a topic and wait for a response."""
- return rpc_amqp.call(
- conf, context, topic, msg, timeout,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast(conf, context, topic, msg):
- """Sends a message on a topic without waiting for a response."""
- return rpc_amqp.cast(
- conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast(conf, context, topic, msg):
- """Sends a message on a fanout exchange without waiting for a response."""
- return rpc_amqp.fanout_cast(
- conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def cast_to_server(conf, context, server_params, topic, msg):
- """Sends a message on a topic to a specific server."""
- return rpc_amqp.cast_to_server(
- conf, context, server_params, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def fanout_cast_to_server(conf, context, server_params, topic, msg):
- """Sends a message on a fanout exchange to a specific server."""
- return rpc_amqp.fanout_cast_to_server(
- conf, context, server_params, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection))
-
-
-def notify(conf, context, topic, msg, envelope):
- """Sends a notification event on a topic."""
- return rpc_amqp.notify(conf, context, topic, msg,
- rpc_amqp.get_connection_pool(conf, Connection),
- envelope)
-
-
-def cleanup():
- return rpc_amqp.cleanup(Connection.pool)
diff --git a/keystone/openstack/common/rpc/impl_zmq.py b/keystone/openstack/common/rpc/impl_zmq.py
deleted file mode 100644
index 394def1c0..000000000
--- a/keystone/openstack/common/rpc/impl_zmq.py
+++ /dev/null
@@ -1,818 +0,0 @@
-# Copyright 2011 Cloudscaling Group, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import pprint
-import re
-import socket
-import sys
-import types
-import uuid
-
-import eventlet
-import greenlet
-from oslo.config import cfg
-import six
-from six import moves
-
-from keystone.openstack.common import excutils
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import importutils
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common.rpc import common as rpc_common
-
-zmq = importutils.try_import('eventlet.green.zmq')
-
-# for convenience, are not modified.
-pformat = pprint.pformat
-Timeout = eventlet.timeout.Timeout
-LOG = rpc_common.LOG
-RemoteError = rpc_common.RemoteError
-RPCException = rpc_common.RPCException
-
-zmq_opts = [
- cfg.StrOpt('rpc_zmq_bind_address', default='*',
- help='ZeroMQ bind address. Should be a wildcard (*), '
- 'an ethernet interface, or IP. '
- 'The "host" option should point or resolve to this '
- 'address.'),
-
- # The module.Class to use for matchmaking.
- cfg.StrOpt(
- 'rpc_zmq_matchmaker',
- default=('keystone.openstack.common.rpc.'
- 'matchmaker.MatchMakerLocalhost'),
- help='MatchMaker driver',
- ),
-
- # The following port is unassigned by IANA as of 2012-05-21
- cfg.IntOpt('rpc_zmq_port', default=9501,
- help='ZeroMQ receiver listening port'),
-
- cfg.IntOpt('rpc_zmq_contexts', default=1,
- help='Number of ZeroMQ contexts, defaults to 1'),
-
- cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
- help='Maximum number of ingress messages to locally buffer '
- 'per topic. Default is unlimited.'),
-
- cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
- help='Directory for holding IPC sockets'),
-
- cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
- help='Name of this node. Must be a valid hostname, FQDN, or '
- 'IP address. Must match "host" option, if running Nova.')
-]
-
-
-CONF = cfg.CONF
-CONF.register_opts(zmq_opts)
-
-ZMQ_CTX = None # ZeroMQ Context, must be global.
-matchmaker = None # memoized matchmaker object
-
-
-def _serialize(data):
- """Serialization wrapper.
-
- We prefer using JSON, but it cannot encode all types.
- Error if a developer passes us bad data.
- """
- try:
- return jsonutils.dumps(data, ensure_ascii=True)
- except TypeError:
- with excutils.save_and_reraise_exception():
- LOG.error(_("JSON serialization failed."))
-
-
-def _deserialize(data):
- """Deserialization wrapper."""
- LOG.debug(_("Deserializing: %s"), data)
- return jsonutils.loads(data)
-
-
-class ZmqSocket(object):
- """A tiny wrapper around ZeroMQ.
-
- Simplifies the send/recv protocol and connection management.
- Can be used as a Context (supports the 'with' statement).
- """
-
- def __init__(self, addr, zmq_type, bind=True, subscribe=None):
- self.sock = _get_ctxt().socket(zmq_type)
- self.addr = addr
- self.type = zmq_type
- self.subscriptions = []
-
- # Support failures on sending/receiving on wrong socket type.
- self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
- self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
- self.can_sub = zmq_type in (zmq.SUB, )
-
- # Support list, str, & None for subscribe arg (cast to list)
- do_sub = {
- list: subscribe,
- str: [subscribe],
- type(None): []
- }[type(subscribe)]
-
- for f in do_sub:
- self.subscribe(f)
-
- str_data = {'addr': addr, 'type': self.socket_s(),
- 'subscribe': subscribe, 'bind': bind}
-
- LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
- LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
- LOG.debug(_("-> bind: %(bind)s"), str_data)
-
- try:
- if bind:
- self.sock.bind(addr)
- else:
- self.sock.connect(addr)
- except Exception:
- raise RPCException(_("Could not open socket."))
-
- def socket_s(self):
- """Get socket type as string."""
- t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
- 'DEALER')
- return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
-
- def subscribe(self, msg_filter):
- """Subscribe."""
- if not self.can_sub:
- raise RPCException("Cannot subscribe on this socket.")
- LOG.debug(_("Subscribing to %s"), msg_filter)
-
- try:
- self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
- except Exception:
- return
-
- self.subscriptions.append(msg_filter)
-
- def unsubscribe(self, msg_filter):
- """Unsubscribe."""
- if msg_filter not in self.subscriptions:
- return
- self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
- self.subscriptions.remove(msg_filter)
-
- def close(self):
- if self.sock is None or self.sock.closed:
- return
-
- # We must unsubscribe, or we'll leak descriptors.
- if self.subscriptions:
- for f in self.subscriptions:
- try:
- self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
- except Exception:
- pass
- self.subscriptions = []
-
- try:
- # Default is to linger
- self.sock.close()
- except Exception:
- # While this is a bad thing to happen,
- # it would be much worse if some of the code calling this
- # were to fail. For now, lets log, and later evaluate
- # if we can safely raise here.
- LOG.error(_("ZeroMQ socket could not be closed."))
- self.sock = None
-
- def recv(self, **kwargs):
- if not self.can_recv:
- raise RPCException(_("You cannot recv on this socket."))
- return self.sock.recv_multipart(**kwargs)
-
- def send(self, data, **kwargs):
- if not self.can_send:
- raise RPCException(_("You cannot send on this socket."))
- self.sock.send_multipart(data, **kwargs)
-
-
-class ZmqClient(object):
- """Client for ZMQ sockets."""
-
- def __init__(self, addr):
- self.outq = ZmqSocket(addr, zmq.PUSH, bind=False)
-
- def cast(self, msg_id, topic, data, envelope):
- msg_id = msg_id or 0
-
- if not envelope:
- self.outq.send(map(bytes,
- (msg_id, topic, 'cast', _serialize(data))))
- return
-
- rpc_envelope = rpc_common.serialize_msg(data[1], envelope)
- zmq_msg = moves.reduce(lambda x, y: x + y, rpc_envelope.items())
- self.outq.send(map(bytes,
- (msg_id, topic, 'impl_zmq_v2', data[0]) + zmq_msg))
-
- def close(self):
- self.outq.close()
-
-
-class RpcContext(rpc_common.CommonRpcContext):
- """Context that supports replying to a rpc.call."""
- def __init__(self, **kwargs):
- self.replies = []
- super(RpcContext, self).__init__(**kwargs)
-
- def deepcopy(self):
- values = self.to_dict()
- values['replies'] = self.replies
- return self.__class__(**values)
-
- def reply(self, reply=None, failure=None, ending=False):
- if ending:
- return
- self.replies.append(reply)
-
- @classmethod
- def marshal(self, ctx):
- ctx_data = ctx.to_dict()
- return _serialize(ctx_data)
-
- @classmethod
- def unmarshal(self, data):
- return RpcContext.from_dict(_deserialize(data))
-
-
-class InternalContext(object):
- """Used by ConsumerBase as a private context for - methods."""
-
- def __init__(self, proxy):
- self.proxy = proxy
- self.msg_waiter = None
-
- def _get_response(self, ctx, proxy, topic, data):
- """Process a curried message and cast the result to topic."""
- LOG.debug(_("Running func with context: %s"), ctx.to_dict())
- data.setdefault('version', None)
- data.setdefault('args', {})
-
- try:
- result = proxy.dispatch(
- ctx, data['version'], data['method'],
- data.get('namespace'), **data['args'])
- return ConsumerBase.normalize_reply(result, ctx.replies)
- except greenlet.GreenletExit:
- # ignore these since they are just from shutdowns
- pass
- except rpc_common.ClientException as e:
- LOG.debug(_("Expected exception during message handling (%s)") %
- e._exc_info[1])
- return {'exc':
- rpc_common.serialize_remote_exception(e._exc_info,
- log_failure=False)}
- except Exception:
- LOG.error(_("Exception during message handling"))
- return {'exc':
- rpc_common.serialize_remote_exception(sys.exc_info())}
-
- def reply(self, ctx, proxy,
- msg_id=None, context=None, topic=None, msg=None):
- """Reply to a casted call."""
- # NOTE(ewindisch): context kwarg exists for Grizzly compat.
- # this may be able to be removed earlier than
- # 'I' if ConsumerBase.process were refactored.
- if type(msg) is list:
- payload = msg[-1]
- else:
- payload = msg
-
- response = ConsumerBase.normalize_reply(
- self._get_response(ctx, proxy, topic, payload),
- ctx.replies)
-
- LOG.debug(_("Sending reply"))
- _multi_send(_cast, ctx, topic, {
- 'method': '-process_reply',
- 'args': {
- 'msg_id': msg_id, # Include for Folsom compat.
- 'response': response
- }
- }, _msg_id=msg_id)
-
-
-class ConsumerBase(object):
- """Base Consumer."""
-
- def __init__(self):
- self.private_ctx = InternalContext(None)
-
- @classmethod
- def normalize_reply(self, result, replies):
- #TODO(ewindisch): re-evaluate and document this method.
- if isinstance(result, types.GeneratorType):
- return list(result)
- elif replies:
- return replies
- else:
- return [result]
-
- def process(self, proxy, ctx, data):
- data.setdefault('version', None)
- data.setdefault('args', {})
-
- # Method starting with - are
- # processed internally. (non-valid method name)
- method = data.get('method')
- if not method:
- LOG.error(_("RPC message did not include method."))
- return
-
- # Internal method
- # uses internal context for safety.
- if method == '-reply':
- self.private_ctx.reply(ctx, proxy, **data['args'])
- return
-
- proxy.dispatch(ctx, data['version'],
- data['method'], data.get('namespace'), **data['args'])
-
-
-class ZmqBaseReactor(ConsumerBase):
- """A consumer class implementing a centralized casting broker (PULL-PUSH).
-
- Used for RoundRobin requests.
- """
-
- def __init__(self, conf):
- super(ZmqBaseReactor, self).__init__()
-
- self.proxies = {}
- self.threads = []
- self.sockets = []
- self.subscribe = {}
-
- self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
-
- def register(self, proxy, in_addr, zmq_type_in,
- in_bind=True, subscribe=None):
-
- LOG.info(_("Registering reactor"))
-
- if zmq_type_in not in (zmq.PULL, zmq.SUB):
- raise RPCException("Bad input socktype")
-
- # Items push in.
- inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
- subscribe=subscribe)
-
- self.proxies[inq] = proxy
- self.sockets.append(inq)
-
- LOG.info(_("In reactor registered"))
-
- def consume_in_thread(self):
- @excutils.forever_retry_uncaught_exceptions
- def _consume(sock):
- LOG.info(_("Consuming socket"))
- while True:
- self.consume(sock)
-
- for k in self.proxies.keys():
- self.threads.append(
- self.pool.spawn(_consume, k)
- )
-
- def wait(self):
- for t in self.threads:
- t.wait()
-
- def close(self):
- for s in self.sockets:
- s.close()
-
- for t in self.threads:
- t.kill()
-
-
-class ZmqProxy(ZmqBaseReactor):
- """A consumer class implementing a topic-based proxy.
-
- Forwards to IPC sockets.
- """
-
- def __init__(self, conf):
- super(ZmqProxy, self).__init__(conf)
- pathsep = set((os.path.sep or '', os.path.altsep or '', '/', '\\'))
- self.badchars = re.compile(r'[%s]' % re.escape(''.join(pathsep)))
-
- self.topic_proxy = {}
-
- def consume(self, sock):
- ipc_dir = CONF.rpc_zmq_ipc_dir
-
- data = sock.recv(copy=False)
- topic = data[1].bytes
-
- if topic.startswith('fanout~'):
- sock_type = zmq.PUB
- topic = topic.split('.', 1)[0]
- elif topic.startswith('zmq_replies'):
- sock_type = zmq.PUB
- else:
- sock_type = zmq.PUSH
-
- if topic not in self.topic_proxy:
- def publisher(waiter):
- LOG.info(_("Creating proxy for topic: %s"), topic)
-
- try:
- # The topic is received over the network,
- # don't trust this input.
- if self.badchars.search(topic) is not None:
- emsg = _("Topic contained dangerous characters.")
- LOG.warn(emsg)
- raise RPCException(emsg)
-
- out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
- (ipc_dir, topic),
- sock_type, bind=True)
- except RPCException:
- waiter.send_exception(*sys.exc_info())
- return
-
- self.topic_proxy[topic] = eventlet.queue.LightQueue(
- CONF.rpc_zmq_topic_backlog)
- self.sockets.append(out_sock)
-
- # It takes some time for a pub socket to open,
- # before we can have any faith in doing a send() to it.
- if sock_type == zmq.PUB:
- eventlet.sleep(.5)
-
- waiter.send(True)
-
- while(True):
- data = self.topic_proxy[topic].get()
- out_sock.send(data, copy=False)
-
- wait_sock_creation = eventlet.event.Event()
- eventlet.spawn(publisher, wait_sock_creation)
-
- try:
- wait_sock_creation.wait()
- except RPCException:
- LOG.error(_("Topic socket file creation failed."))
- return
-
- try:
- self.topic_proxy[topic].put_nowait(data)
- except eventlet.queue.Full:
- LOG.error(_("Local per-topic backlog buffer full for topic "
- "%(topic)s. Dropping message.") % {'topic': topic})
-
- def consume_in_thread(self):
- """Runs the ZmqProxy service."""
- ipc_dir = CONF.rpc_zmq_ipc_dir
- consume_in = "tcp://%s:%s" % \
- (CONF.rpc_zmq_bind_address,
- CONF.rpc_zmq_port)
- consumption_proxy = InternalContext(None)
-
- try:
- os.makedirs(ipc_dir)
- except os.error:
- if not os.path.isdir(ipc_dir):
- with excutils.save_and_reraise_exception():
- LOG.error(_("Required IPC directory does not exist at"
- " %s") % (ipc_dir, ))
- try:
- self.register(consumption_proxy,
- consume_in,
- zmq.PULL)
- except zmq.ZMQError:
- if os.access(ipc_dir, os.X_OK):
- with excutils.save_and_reraise_exception():
- LOG.error(_("Permission denied to IPC directory at"
- " %s") % (ipc_dir, ))
- with excutils.save_and_reraise_exception():
- LOG.error(_("Could not create ZeroMQ receiver daemon. "
- "Socket may already be in use."))
-
- super(ZmqProxy, self).consume_in_thread()
-
-
-def unflatten_envelope(packenv):
- """Unflattens the RPC envelope.
-
- Takes a list and returns a dictionary.
- i.e. [1,2,3,4] => {1: 2, 3: 4}
- """
- i = iter(packenv)
- h = {}
- try:
- while True:
- k = six.next(i)
- h[k] = six.next(i)
- except StopIteration:
- return h
-
-
-class ZmqReactor(ZmqBaseReactor):
- """A consumer class implementing a consumer for messages.
-
- Can also be used as a 1:1 proxy
- """
-
- def __init__(self, conf):
- super(ZmqReactor, self).__init__(conf)
-
- def consume(self, sock):
- #TODO(ewindisch): use zero-copy (i.e. references, not copying)
- data = sock.recv()
- LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
-
- proxy = self.proxies[sock]
-
- if data[2] == 'cast': # Legacy protocol
- packenv = data[3]
-
- ctx, msg = _deserialize(packenv)
- request = rpc_common.deserialize_msg(msg)
- ctx = RpcContext.unmarshal(ctx)
- elif data[2] == 'impl_zmq_v2':
- packenv = data[4:]
-
- msg = unflatten_envelope(packenv)
- request = rpc_common.deserialize_msg(msg)
-
- # Unmarshal only after verifying the message.
- ctx = RpcContext.unmarshal(data[3])
- else:
- LOG.error(_("ZMQ Envelope version unsupported or unknown."))
- return
-
- self.pool.spawn_n(self.process, proxy, ctx, request)
-
-
-class Connection(rpc_common.Connection):
- """Manages connections and threads."""
-
- def __init__(self, conf):
- self.topics = []
- self.reactor = ZmqReactor(conf)
-
- def create_consumer(self, topic, proxy, fanout=False):
- # Register with matchmaker.
- _get_matchmaker().register(topic, CONF.rpc_zmq_host)
-
- # Subscription scenarios
- if fanout:
- sock_type = zmq.SUB
- subscribe = ('', fanout)[type(fanout) == str]
- topic = 'fanout~' + topic.split('.', 1)[0]
- else:
- sock_type = zmq.PULL
- subscribe = None
- topic = '.'.join((topic.split('.', 1)[0], CONF.rpc_zmq_host))
-
- if topic in self.topics:
- LOG.info(_("Skipping topic registration. Already registered."))
- return
-
- # Receive messages from (local) proxy
- inaddr = "ipc://%s/zmq_topic_%s" % \
- (CONF.rpc_zmq_ipc_dir, topic)
-
- LOG.debug(_("Consumer is a zmq.%s"),
- ['PULL', 'SUB'][sock_type == zmq.SUB])
-
- self.reactor.register(proxy, inaddr, sock_type,
- subscribe=subscribe, in_bind=False)
- self.topics.append(topic)
-
- def close(self):
- _get_matchmaker().stop_heartbeat()
- for topic in self.topics:
- _get_matchmaker().unregister(topic, CONF.rpc_zmq_host)
-
- self.reactor.close()
- self.topics = []
-
- def wait(self):
- self.reactor.wait()
-
- def consume_in_thread(self):
- _get_matchmaker().start_heartbeat()
- self.reactor.consume_in_thread()
-
-
-def _cast(addr, context, topic, msg, timeout=None, envelope=False,
- _msg_id=None):
- timeout_cast = timeout or CONF.rpc_cast_timeout
- payload = [RpcContext.marshal(context), msg]
-
- with Timeout(timeout_cast, exception=rpc_common.Timeout):
- try:
- conn = ZmqClient(addr)
-
- # assumes cast can't return an exception
- conn.cast(_msg_id, topic, payload, envelope)
- except zmq.ZMQError:
- raise RPCException("Cast failed. ZMQ Socket Exception")
- finally:
- if 'conn' in vars():
- conn.close()
-
-
-def _call(addr, context, topic, msg, timeout=None,
- envelope=False):
- # timeout_response is how long we wait for a response
- timeout = timeout or CONF.rpc_response_timeout
-
- # The msg_id is used to track replies.
- msg_id = uuid.uuid4().hex
-
- # Replies always come into the reply service.
- reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
-
- LOG.debug(_("Creating payload"))
- # Curry the original request into a reply method.
- mcontext = RpcContext.marshal(context)
- payload = {
- 'method': '-reply',
- 'args': {
- 'msg_id': msg_id,
- 'topic': reply_topic,
- # TODO(ewindisch): safe to remove mcontext in I.
- 'msg': [mcontext, msg]
- }
- }
-
- LOG.debug(_("Creating queue socket for reply waiter"))
-
- # Messages arriving async.
- # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
- with Timeout(timeout, exception=rpc_common.Timeout):
- try:
- msg_waiter = ZmqSocket(
- "ipc://%s/zmq_topic_zmq_replies.%s" %
- (CONF.rpc_zmq_ipc_dir,
- CONF.rpc_zmq_host),
- zmq.SUB, subscribe=msg_id, bind=False
- )
-
- LOG.debug(_("Sending cast"))
- _cast(addr, context, topic, payload, envelope)
-
- LOG.debug(_("Cast sent; Waiting reply"))
- # Blocks until receives reply
- msg = msg_waiter.recv()
- LOG.debug(_("Received message: %s"), msg)
- LOG.debug(_("Unpacking response"))
-
- if msg[2] == 'cast': # Legacy version
- raw_msg = _deserialize(msg[-1])[-1]
- elif msg[2] == 'impl_zmq_v2':
- rpc_envelope = unflatten_envelope(msg[4:])
- raw_msg = rpc_common.deserialize_msg(rpc_envelope)
- else:
- raise rpc_common.UnsupportedRpcEnvelopeVersion(
- _("Unsupported or unknown ZMQ envelope returned."))
-
- responses = raw_msg['args']['response']
- # ZMQError trumps the Timeout error.
- except zmq.ZMQError:
- raise RPCException("ZMQ Socket Error")
- except (IndexError, KeyError):
- raise RPCException(_("RPC Message Invalid."))
- finally:
- if 'msg_waiter' in vars():
- msg_waiter.close()
-
- # It seems we don't need to do all of the following,
- # but perhaps it would be useful for multicall?
- # One effect of this is that we're checking all
- # responses for Exceptions.
- for resp in responses:
- if isinstance(resp, types.DictType) and 'exc' in resp:
- raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
-
- return responses[-1]
-
-
-def _multi_send(method, context, topic, msg, timeout=None,
- envelope=False, _msg_id=None):
- """Wraps the sending of messages.
-
- Dispatches to the matchmaker and sends message to all relevant hosts.
- """
- conf = CONF
- LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
-
- queues = _get_matchmaker().queues(topic)
- LOG.debug(_("Sending message(s) to: %s"), queues)
-
- # Don't stack if we have no matchmaker results
- if not queues:
- LOG.warn(_("No matchmaker results. Not casting."))
- # While not strictly a timeout, callers know how to handle
- # this exception and a timeout isn't too big a lie.
- raise rpc_common.Timeout(_("No match from matchmaker."))
-
- # This supports brokerless fanout (addresses > 1)
- for queue in queues:
- (_topic, ip_addr) = queue
- _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
-
- if method.__name__ == '_cast':
- eventlet.spawn_n(method, _addr, context,
- _topic, msg, timeout, envelope,
- _msg_id)
- return
- return method(_addr, context, _topic, msg, timeout,
- envelope)
-
-
-def create_connection(conf, new=True):
- return Connection(conf)
-
-
-def multicall(conf, *args, **kwargs):
- """Multiple calls."""
- return _multi_send(_call, *args, **kwargs)
-
-
-def call(conf, *args, **kwargs):
- """Send a message, expect a response."""
- data = _multi_send(_call, *args, **kwargs)
- return data[-1]
-
-
-def cast(conf, *args, **kwargs):
- """Send a message expecting no reply."""
- _multi_send(_cast, *args, **kwargs)
-
-
-def fanout_cast(conf, context, topic, msg, **kwargs):
- """Send a message to all listening and expect no reply."""
- # NOTE(ewindisch): fanout~ is used because it avoid splitting on .
- # and acts as a non-subtle hint to the matchmaker and ZmqProxy.
- _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
-
-
-def notify(conf, context, topic, msg, envelope):
- """Send notification event.
-
- Notifications are sent to topic-priority.
- This differs from the AMQP drivers which send to topic.priority.
- """
- # NOTE(ewindisch): dot-priority in rpc notifier does not
- # work with our assumptions.
- topic = topic.replace('.', '-')
- cast(conf, context, topic, msg, envelope=envelope)
-
-
-def cleanup():
- """Clean up resources in use by implementation."""
- global ZMQ_CTX
- if ZMQ_CTX:
- ZMQ_CTX.term()
- ZMQ_CTX = None
-
- global matchmaker
- matchmaker = None
-
-
-def _get_ctxt():
- if not zmq:
- raise ImportError("Failed to import eventlet.green.zmq")
-
- global ZMQ_CTX
- if not ZMQ_CTX:
- ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
- return ZMQ_CTX
-
-
-def _get_matchmaker(*args, **kwargs):
- global matchmaker
- if not matchmaker:
- mm = CONF.rpc_zmq_matchmaker
- if mm.endswith('matchmaker.MatchMakerRing'):
- mm.replace('matchmaker', 'matchmaker_ring')
- LOG.warn(_('rpc_zmq_matchmaker = %(orig)s is deprecated; use'
- ' %(new)s instead') % dict(
- orig=CONF.rpc_zmq_matchmaker, new=mm))
- matchmaker = importutils.import_object(mm, *args, **kwargs)
- return matchmaker
diff --git a/keystone/openstack/common/rpc/matchmaker.py b/keystone/openstack/common/rpc/matchmaker.py
deleted file mode 100644
index b5ea8deb2..000000000
--- a/keystone/openstack/common/rpc/matchmaker.py
+++ /dev/null
@@ -1,322 +0,0 @@
-# Copyright 2011 Cloudscaling Group, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-The MatchMaker classes should except a Topic or Fanout exchange key and
-return keys for direct exchanges, per (approximate) AMQP parlance.
-"""
-
-import contextlib
-
-import eventlet
-from oslo.config import cfg
-
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import log as logging
-
-
-matchmaker_opts = [
- cfg.IntOpt('matchmaker_heartbeat_freq',
- default=300,
- help='Heartbeat frequency'),
- cfg.IntOpt('matchmaker_heartbeat_ttl',
- default=600,
- help='Heartbeat time-to-live.'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(matchmaker_opts)
-LOG = logging.getLogger(__name__)
-contextmanager = contextlib.contextmanager
-
-
-class MatchMakerException(Exception):
- """Signified a match could not be found."""
- message = _("Match not found by MatchMaker.")
-
-
-class Exchange(object):
- """Implements lookups.
-
- Subclass this to support hashtables, dns, etc.
- """
- def __init__(self):
- pass
-
- def run(self, key):
- raise NotImplementedError()
-
-
-class Binding(object):
- """A binding on which to perform a lookup."""
- def __init__(self):
- pass
-
- def test(self, key):
- raise NotImplementedError()
-
-
-class MatchMakerBase(object):
- """Match Maker Base Class.
-
- Build off HeartbeatMatchMakerBase if building a heartbeat-capable
- MatchMaker.
- """
- def __init__(self):
- # Array of tuples. Index [2] toggles negation, [3] is last-if-true
- self.bindings = []
-
- self.no_heartbeat_msg = _('Matchmaker does not implement '
- 'registration or heartbeat.')
-
- def register(self, key, host):
- """Register a host on a backend.
-
- Heartbeats, if applicable, may keepalive registration.
- """
- pass
-
- def ack_alive(self, key, host):
- """Acknowledge that a key.host is alive.
-
- Used internally for updating heartbeats, but may also be used
- publicly to acknowledge a system is alive (i.e. rpc message
- successfully sent to host)
- """
- pass
-
- def is_alive(self, topic, host):
- """Checks if a host is alive."""
- pass
-
- def expire(self, topic, host):
- """Explicitly expire a host's registration."""
- pass
-
- def send_heartbeats(self):
- """Send all heartbeats.
-
- Use start_heartbeat to spawn a heartbeat greenthread,
- which loops this method.
- """
- pass
-
- def unregister(self, key, host):
- """Unregister a topic."""
- pass
-
- def start_heartbeat(self):
- """Spawn heartbeat greenthread."""
- pass
-
- def stop_heartbeat(self):
- """Destroys the heartbeat greenthread."""
- pass
-
- def add_binding(self, binding, rule, last=True):
- self.bindings.append((binding, rule, False, last))
-
- #NOTE(ewindisch): kept the following method in case we implement the
- # underlying support.
- #def add_negate_binding(self, binding, rule, last=True):
- # self.bindings.append((binding, rule, True, last))
-
- def queues(self, key):
- workers = []
-
- # bit is for negate bindings - if we choose to implement it.
- # last stops processing rules if this matches.
- for (binding, exchange, bit, last) in self.bindings:
- if binding.test(key):
- workers.extend(exchange.run(key))
-
- # Support last.
- if last:
- return workers
- return workers
-
-
-class HeartbeatMatchMakerBase(MatchMakerBase):
- """Base for a heart-beat capable MatchMaker.
-
- Provides common methods for registering, unregistering, and maintaining
- heartbeats.
- """
- def __init__(self):
- self.hosts = set()
- self._heart = None
- self.host_topic = {}
-
- super(HeartbeatMatchMakerBase, self).__init__()
-
- def send_heartbeats(self):
- """Send all heartbeats.
-
- Use start_heartbeat to spawn a heartbeat greenthread,
- which loops this method.
- """
- for key, host in self.host_topic:
- self.ack_alive(key, host)
-
- def ack_alive(self, key, host):
- """Acknowledge that a host.topic is alive.
-
- Used internally for updating heartbeats, but may also be used
- publicly to acknowledge a system is alive (i.e. rpc message
- successfully sent to host)
- """
- raise NotImplementedError("Must implement ack_alive")
-
- def backend_register(self, key, host):
- """Implements registration logic.
-
- Called by register(self,key,host)
- """
- raise NotImplementedError("Must implement backend_register")
-
- def backend_unregister(self, key, key_host):
- """Implements de-registration logic.
-
- Called by unregister(self,key,host)
- """
- raise NotImplementedError("Must implement backend_unregister")
-
- def register(self, key, host):
- """Register a host on a backend.
-
- Heartbeats, if applicable, may keepalive registration.
- """
- self.hosts.add(host)
- self.host_topic[(key, host)] = host
- key_host = '.'.join((key, host))
-
- self.backend_register(key, key_host)
-
- self.ack_alive(key, host)
-
- def unregister(self, key, host):
- """Unregister a topic."""
- if (key, host) in self.host_topic:
- del self.host_topic[(key, host)]
-
- self.hosts.discard(host)
- self.backend_unregister(key, '.'.join((key, host)))
-
- LOG.info(_("Matchmaker unregistered: %(key)s, %(host)s"),
- {'key': key, 'host': host})
-
- def start_heartbeat(self):
- """Implementation of MatchMakerBase.start_heartbeat.
-
- Launches greenthread looping send_heartbeats(),
- yielding for CONF.matchmaker_heartbeat_freq seconds
- between iterations.
- """
- if not self.hosts:
- raise MatchMakerException(
- _("Register before starting heartbeat."))
-
- def do_heartbeat():
- while True:
- self.send_heartbeats()
- eventlet.sleep(CONF.matchmaker_heartbeat_freq)
-
- self._heart = eventlet.spawn(do_heartbeat)
-
- def stop_heartbeat(self):
- """Destroys the heartbeat greenthread."""
- if self._heart:
- self._heart.kill()
-
-
-class DirectBinding(Binding):
- """Specifies a host in the key via a '.' character.
-
- Although dots are used in the key, the behavior here is
- that it maps directly to a host, thus direct.
- """
- def test(self, key):
- return '.' in key
-
-
-class TopicBinding(Binding):
- """Where a 'bare' key without dots.
-
- AMQP generally considers topic exchanges to be those *with* dots,
- but we deviate here in terminology as the behavior here matches
- that of a topic exchange (whereas where there are dots, behavior
- matches that of a direct exchange.
- """
- def test(self, key):
- return '.' not in key
-
-
-class FanoutBinding(Binding):
- """Match on fanout keys, where key starts with 'fanout.' string."""
- def test(self, key):
- return key.startswith('fanout~')
-
-
-class StubExchange(Exchange):
- """Exchange that does nothing."""
- def run(self, key):
- return [(key, None)]
-
-
-class LocalhostExchange(Exchange):
- """Exchange where all direct topics are local."""
- def __init__(self, host='localhost'):
- self.host = host
- super(Exchange, self).__init__()
-
- def run(self, key):
- return [('.'.join((key.split('.')[0], self.host)), self.host)]
-
-
-class DirectExchange(Exchange):
- """Exchange where all topic keys are split, sending to second half.
-
- i.e. "compute.host" sends a message to "compute.host" running on "host"
- """
- def __init__(self):
- super(Exchange, self).__init__()
-
- def run(self, key):
- e = key.split('.', 1)[1]
- return [(key, e)]
-
-
-class MatchMakerLocalhost(MatchMakerBase):
- """Match Maker where all bare topics resolve to localhost.
-
- Useful for testing.
- """
- def __init__(self, host='localhost'):
- super(MatchMakerLocalhost, self).__init__()
- self.add_binding(FanoutBinding(), LocalhostExchange(host))
- self.add_binding(DirectBinding(), DirectExchange())
- self.add_binding(TopicBinding(), LocalhostExchange(host))
-
-
-class MatchMakerStub(MatchMakerBase):
- """Match Maker where topics are untouched.
-
- Useful for testing, or for AMQP/brokered queues.
- Will not work where knowledge of hosts is known (i.e. zeromq)
- """
- def __init__(self):
- super(MatchMakerStub, self).__init__()
-
- self.add_binding(FanoutBinding(), StubExchange())
- self.add_binding(DirectBinding(), StubExchange())
- self.add_binding(TopicBinding(), StubExchange())
diff --git a/keystone/openstack/common/rpc/matchmaker_redis.py b/keystone/openstack/common/rpc/matchmaker_redis.py
deleted file mode 100644
index 6ed074d38..000000000
--- a/keystone/openstack/common/rpc/matchmaker_redis.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright 2013 Cloudscaling Group, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-The MatchMaker classes should accept a Topic or Fanout exchange key and
-return keys for direct exchanges, per (approximate) AMQP parlance.
-"""
-
-from oslo.config import cfg
-
-from keystone.openstack.common import importutils
-from keystone.openstack.common import log as logging
-from keystone.openstack.common.rpc import matchmaker as mm_common
-
-redis = importutils.try_import('redis')
-
-
-matchmaker_redis_opts = [
- cfg.StrOpt('host',
- default='127.0.0.1',
- help='Host to locate redis'),
- cfg.IntOpt('port',
- default=6379,
- help='Use this port to connect to redis host.'),
- cfg.StrOpt('password',
- default=None,
- help='Password for Redis server. (optional)'),
-]
-
-CONF = cfg.CONF
-opt_group = cfg.OptGroup(name='matchmaker_redis',
- title='Options for Redis-based MatchMaker')
-CONF.register_group(opt_group)
-CONF.register_opts(matchmaker_redis_opts, opt_group)
-LOG = logging.getLogger(__name__)
-
-
-class RedisExchange(mm_common.Exchange):
- def __init__(self, matchmaker):
- self.matchmaker = matchmaker
- self.redis = matchmaker.redis
- super(RedisExchange, self).__init__()
-
-
-class RedisTopicExchange(RedisExchange):
- """Exchange where all topic keys are split, sending to second half.
-
- i.e. "compute.host" sends a message to "compute" running on "host"
- """
- def run(self, topic):
- while True:
- member_name = self.redis.srandmember(topic)
-
- if not member_name:
- # If this happens, there are no
- # longer any members.
- break
-
- if not self.matchmaker.is_alive(topic, member_name):
- continue
-
- host = member_name.split('.', 1)[1]
- return [(member_name, host)]
- return []
-
-
-class RedisFanoutExchange(RedisExchange):
- """Return a list of all hosts."""
- def run(self, topic):
- topic = topic.split('~', 1)[1]
- hosts = self.redis.smembers(topic)
- good_hosts = filter(
- lambda host: self.matchmaker.is_alive(topic, host), hosts)
-
- return [(x, x.split('.', 1)[1]) for x in good_hosts]
-
-
-class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
- """MatchMaker registering and looking-up hosts with a Redis server."""
- def __init__(self):
- super(MatchMakerRedis, self).__init__()
-
- if not redis:
- raise ImportError("Failed to import module redis.")
-
- self.redis = redis.Redis(
- host=CONF.matchmaker_redis.host,
- port=CONF.matchmaker_redis.port,
- password=CONF.matchmaker_redis.password)
-
- self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
- self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
- self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
-
- def ack_alive(self, key, host):
- topic = "%s.%s" % (key, host)
- if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
- # If we could not update the expiration, the key
- # might have been pruned. Re-register, creating a new
- # key in Redis.
- self.register(self.topic_host[host], host)
-
- def is_alive(self, topic, host):
- if self.redis.ttl(host) == -1:
- self.expire(topic, host)
- return False
- return True
-
- def expire(self, topic, host):
- with self.redis.pipeline() as pipe:
- pipe.multi()
- pipe.delete(host)
- pipe.srem(topic, host)
- pipe.execute()
-
- def backend_register(self, key, key_host):
- with self.redis.pipeline() as pipe:
- pipe.multi()
- pipe.sadd(key, key_host)
-
- # No value is needed, we just
- # care if it exists. Sets aren't viable
- # because only keys can expire.
- pipe.set(key_host, '')
-
- pipe.execute()
-
- def backend_unregister(self, key, key_host):
- with self.redis.pipeline() as pipe:
- pipe.multi()
- pipe.srem(key, key_host)
- pipe.delete(key_host)
- pipe.execute()
diff --git a/keystone/openstack/common/rpc/matchmaker_ring.py b/keystone/openstack/common/rpc/matchmaker_ring.py
deleted file mode 100644
index 7165252c4..000000000
--- a/keystone/openstack/common/rpc/matchmaker_ring.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright 2011-2013 Cloudscaling Group, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-The MatchMaker classes should except a Topic or Fanout exchange key and
-return keys for direct exchanges, per (approximate) AMQP parlance.
-"""
-
-import itertools
-import json
-
-from oslo.config import cfg
-
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import log as logging
-from keystone.openstack.common.rpc import matchmaker as mm
-
-
-matchmaker_opts = [
- # Matchmaker ring file
- cfg.StrOpt('ringfile',
- deprecated_name='matchmaker_ringfile',
- deprecated_group='DEFAULT',
- default='/etc/oslo/matchmaker_ring.json',
- help='Matchmaker ring file (JSON)'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(matchmaker_opts, 'matchmaker_ring')
-LOG = logging.getLogger(__name__)
-
-
-class RingExchange(mm.Exchange):
- """Match Maker where hosts are loaded from a static JSON formatted file.
-
- __init__ takes optional ring dictionary argument, otherwise
- loads the ringfile from CONF.mathcmaker_ringfile.
- """
- def __init__(self, ring=None):
- super(RingExchange, self).__init__()
-
- if ring:
- self.ring = ring
- else:
- fh = open(CONF.matchmaker_ring.ringfile, 'r')
- self.ring = json.load(fh)
- fh.close()
-
- self.ring0 = {}
- for k in self.ring.keys():
- self.ring0[k] = itertools.cycle(self.ring[k])
-
- def _ring_has(self, key):
- return key in self.ring0
-
-
-class RoundRobinRingExchange(RingExchange):
- """A Topic Exchange based on a hashmap."""
- def __init__(self, ring=None):
- super(RoundRobinRingExchange, self).__init__(ring)
-
- def run(self, key):
- if not self._ring_has(key):
- LOG.warn(
- _("No key defining hosts for topic '%s', "
- "see ringfile") % (key, )
- )
- return []
- host = next(self.ring0[key])
- return [(key + '.' + host, host)]
-
-
-class FanoutRingExchange(RingExchange):
- """Fanout Exchange based on a hashmap."""
- def __init__(self, ring=None):
- super(FanoutRingExchange, self).__init__(ring)
-
- def run(self, key):
- # Assume starts with "fanout~", strip it for lookup.
- nkey = key.split('fanout~')[1:][0]
- if not self._ring_has(nkey):
- LOG.warn(
- _("No key defining hosts for topic '%s', "
- "see ringfile") % (nkey, )
- )
- return []
- return map(lambda x: (key + '.' + x, x), self.ring[nkey])
-
-
-class MatchMakerRing(mm.MatchMakerBase):
- """Match Maker where hosts are loaded from a static hashmap."""
- def __init__(self, ring=None):
- super(MatchMakerRing, self).__init__()
- self.add_binding(mm.FanoutBinding(), FanoutRingExchange(ring))
- self.add_binding(mm.DirectBinding(), mm.DirectExchange())
- self.add_binding(mm.TopicBinding(), RoundRobinRingExchange(ring))
diff --git a/keystone/openstack/common/rpc/proxy.py b/keystone/openstack/common/rpc/proxy.py
deleted file mode 100644
index ee1709284..000000000
--- a/keystone/openstack/common/rpc/proxy.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# Copyright 2012-2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-A helper class for proxy objects to remote APIs.
-
-For more information about rpc API version numbers, see:
- rpc/dispatcher.py
-"""
-
-import six
-
-from keystone.openstack.common import rpc
-from keystone.openstack.common.rpc import common as rpc_common
-from keystone.openstack.common.rpc import serializer as rpc_serializer
-
-
-class RpcProxy(object):
- """A helper class for rpc clients.
-
- This class is a wrapper around the RPC client API. It allows you to
- specify the topic and API version in a single place. This is intended to
- be used as a base class for a class that implements the client side of an
- rpc API.
- """
-
- # The default namespace, which can be overridden in a subclass.
- RPC_API_NAMESPACE = None
-
- def __init__(self, topic, default_version, version_cap=None,
- serializer=None):
- """Initialize an RpcProxy.
-
- :param topic: The topic to use for all messages.
- :param default_version: The default API version to request in all
- outgoing messages. This can be overridden on a per-message
- basis.
- :param version_cap: Optionally cap the maximum version used for sent
- messages.
- :param serializer: Optionaly (de-)serialize entities with a
- provided helper.
- """
- self.topic = topic
- self.default_version = default_version
- self.version_cap = version_cap
- if serializer is None:
- serializer = rpc_serializer.NoOpSerializer()
- self.serializer = serializer
- super(RpcProxy, self).__init__()
-
- def _set_version(self, msg, vers):
- """Helper method to set the version in a message.
-
- :param msg: The message having a version added to it.
- :param vers: The version number to add to the message.
- """
- v = vers if vers else self.default_version
- if (self.version_cap and not
- rpc_common.version_is_compatible(self.version_cap, v)):
- raise rpc_common.RpcVersionCapError(version_cap=self.version_cap)
- msg['version'] = v
-
- def _get_topic(self, topic):
- """Return the topic to use for a message."""
- return topic if topic else self.topic
-
- def can_send_version(self, version):
- """Check to see if a version is compatible with the version cap."""
- return (not self.version_cap or
- rpc_common.version_is_compatible(self.version_cap, version))
-
- @staticmethod
- def make_namespaced_msg(method, namespace, **kwargs):
- return {'method': method, 'namespace': namespace, 'args': kwargs}
-
- def make_msg(self, method, **kwargs):
- return self.make_namespaced_msg(method, self.RPC_API_NAMESPACE,
- **kwargs)
-
- def _serialize_msg_args(self, context, kwargs):
- """Helper method called to serialize message arguments.
-
- This calls our serializer on each argument, returning a new
- set of args that have been serialized.
-
- :param context: The request context
- :param kwargs: The arguments to serialize
- :returns: A new set of serialized arguments
- """
- new_kwargs = dict()
- for argname, arg in six.iteritems(kwargs):
- new_kwargs[argname] = self.serializer.serialize_entity(context,
- arg)
- return new_kwargs
-
- def call(self, context, msg, topic=None, version=None, timeout=None):
- """rpc.call() a remote method.
-
- :param context: The request context
- :param msg: The message to send, including the method and args.
- :param topic: Override the topic for this message.
- :param version: (Optional) Override the requested API version in this
- message.
- :param timeout: (Optional) A timeout to use when waiting for the
- response. If no timeout is specified, a default timeout will be
- used that is usually sufficient.
-
- :returns: The return value from the remote method.
- """
- self._set_version(msg, version)
- msg['args'] = self._serialize_msg_args(context, msg['args'])
- real_topic = self._get_topic(topic)
- try:
- result = rpc.call(context, real_topic, msg, timeout)
- return self.serializer.deserialize_entity(context, result)
- except rpc.common.Timeout as exc:
- raise rpc.common.Timeout(
- exc.info, real_topic, msg.get('method'))
-
- def multicall(self, context, msg, topic=None, version=None, timeout=None):
- """rpc.multicall() a remote method.
-
- :param context: The request context
- :param msg: The message to send, including the method and args.
- :param topic: Override the topic for this message.
- :param version: (Optional) Override the requested API version in this
- message.
- :param timeout: (Optional) A timeout to use when waiting for the
- response. If no timeout is specified, a default timeout will be
- used that is usually sufficient.
-
- :returns: An iterator that lets you process each of the returned values
- from the remote method as they arrive.
- """
- self._set_version(msg, version)
- msg['args'] = self._serialize_msg_args(context, msg['args'])
- real_topic = self._get_topic(topic)
- try:
- result = rpc.multicall(context, real_topic, msg, timeout)
- return self.serializer.deserialize_entity(context, result)
- except rpc.common.Timeout as exc:
- raise rpc.common.Timeout(
- exc.info, real_topic, msg.get('method'))
-
- def cast(self, context, msg, topic=None, version=None):
- """rpc.cast() a remote method.
-
- :param context: The request context
- :param msg: The message to send, including the method and args.
- :param topic: Override the topic for this message.
- :param version: (Optional) Override the requested API version in this
- message.
-
- :returns: None. rpc.cast() does not wait on any return value from the
- remote method.
- """
- self._set_version(msg, version)
- msg['args'] = self._serialize_msg_args(context, msg['args'])
- rpc.cast(context, self._get_topic(topic), msg)
-
- def fanout_cast(self, context, msg, topic=None, version=None):
- """rpc.fanout_cast() a remote method.
-
- :param context: The request context
- :param msg: The message to send, including the method and args.
- :param topic: Override the topic for this message.
- :param version: (Optional) Override the requested API version in this
- message.
-
- :returns: None. rpc.fanout_cast() does not wait on any return value
- from the remote method.
- """
- self._set_version(msg, version)
- msg['args'] = self._serialize_msg_args(context, msg['args'])
- rpc.fanout_cast(context, self._get_topic(topic), msg)
-
- def cast_to_server(self, context, server_params, msg, topic=None,
- version=None):
- """rpc.cast_to_server() a remote method.
-
- :param context: The request context
- :param server_params: Server parameters. See rpc.cast_to_server() for
- details.
- :param msg: The message to send, including the method and args.
- :param topic: Override the topic for this message.
- :param version: (Optional) Override the requested API version in this
- message.
-
- :returns: None. rpc.cast_to_server() does not wait on any
- return values.
- """
- self._set_version(msg, version)
- msg['args'] = self._serialize_msg_args(context, msg['args'])
- rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
-
- def fanout_cast_to_server(self, context, server_params, msg, topic=None,
- version=None):
- """rpc.fanout_cast_to_server() a remote method.
-
- :param context: The request context
- :param server_params: Server parameters. See rpc.cast_to_server() for
- details.
- :param msg: The message to send, including the method and args.
- :param topic: Override the topic for this message.
- :param version: (Optional) Override the requested API version in this
- message.
-
- :returns: None. rpc.fanout_cast_to_server() does not wait on any
- return values.
- """
- self._set_version(msg, version)
- msg['args'] = self._serialize_msg_args(context, msg['args'])
- rpc.fanout_cast_to_server(context, server_params,
- self._get_topic(topic), msg)
diff --git a/keystone/openstack/common/rpc/serializer.py b/keystone/openstack/common/rpc/serializer.py
deleted file mode 100644
index 9bc6e2a3a..000000000
--- a/keystone/openstack/common/rpc/serializer.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Provides the definition of an RPC serialization handler"""
-
-import abc
-
-import six
-
-
-@six.add_metaclass(abc.ABCMeta)
-class Serializer(object):
- """Generic (de-)serialization definition base class."""
-
- @abc.abstractmethod
- def serialize_entity(self, context, entity):
- """Serialize something to primitive form.
-
- :param context: Security context
- :param entity: Entity to be serialized
- :returns: Serialized form of entity
- """
- pass
-
- @abc.abstractmethod
- def deserialize_entity(self, context, entity):
- """Deserialize something from primitive form.
-
- :param context: Security context
- :param entity: Primitive to be deserialized
- :returns: Deserialized form of entity
- """
- pass
-
-
-class NoOpSerializer(Serializer):
- """A serializer that does nothing."""
-
- def serialize_entity(self, context, entity):
- return entity
-
- def deserialize_entity(self, context, entity):
- return entity
diff --git a/keystone/openstack/common/rpc/service.py b/keystone/openstack/common/rpc/service.py
deleted file mode 100644
index f3e8bec8b..000000000
--- a/keystone/openstack/common/rpc/service.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright 2011 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystone.openstack.common.gettextutils import _ # noqa
-from keystone.openstack.common import log as logging
-from keystone.openstack.common import rpc
-from keystone.openstack.common.rpc import dispatcher as rpc_dispatcher
-from keystone.openstack.common import service
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Service(service.Service):
- """Service object for binaries running on hosts.
-
- A service enables rpc by listening to queues based on topic and host.
- """
- def __init__(self, host, topic, manager=None, serializer=None):
- super(Service, self).__init__()
- self.host = host
- self.topic = topic
- self.serializer = serializer
- if manager is None:
- self.manager = self
- else:
- self.manager = manager
-
- def start(self):
- super(Service, self).start()
-
- self.conn = rpc.create_connection(new=True)
- LOG.debug(_("Creating Consumer connection for Service %s") %
- self.topic)
-
- dispatcher = rpc_dispatcher.RpcDispatcher([self.manager],
- self.serializer)
-
- # Share this same connection for these Consumers
- self.conn.create_consumer(self.topic, dispatcher, fanout=False)
-
- node_topic = '%s.%s' % (self.topic, self.host)
- self.conn.create_consumer(node_topic, dispatcher, fanout=False)
-
- self.conn.create_consumer(self.topic, dispatcher, fanout=True)
-
- # Hook to allow the manager to do other initializations after
- # the rpc connection is created.
- if callable(getattr(self.manager, 'initialize_service_hook', None)):
- self.manager.initialize_service_hook(self)
-
- # Consume from all consumers in a thread
- self.conn.consume_in_thread()
-
- def stop(self):
- # Try to shut the connection down, but if we get any sort of
- # errors, go ahead and ignore them.. as we're shutting down anyway
- try:
- self.conn.close()
- except Exception:
- pass
- super(Service, self).stop()
diff --git a/keystone/openstack/common/rpc/zmq_receiver.py b/keystone/openstack/common/rpc/zmq_receiver.py
deleted file mode 100644
index 70120bc13..000000000
--- a/keystone/openstack/common/rpc/zmq_receiver.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import eventlet
-eventlet.monkey_patch()
-
-import contextlib
-import sys
-
-from oslo.config import cfg
-
-from keystone.openstack.common import log as logging
-from keystone.openstack.common import rpc
-from keystone.openstack.common.rpc import impl_zmq
-
-CONF = cfg.CONF
-CONF.register_opts(rpc.rpc_opts)
-CONF.register_opts(impl_zmq.zmq_opts)
-
-
-def main():
- CONF(sys.argv[1:], project='oslo')
- logging.setup("oslo")
-
- with contextlib.closing(impl_zmq.ZmqProxy(CONF)) as reactor:
- reactor.consume_in_thread()
- reactor.wait()
diff --git a/keystone/policy/__init__.py b/keystone/policy/__init__.py
index 5e4837545..0756589d4 100644
--- a/keystone/policy/__init__.py
+++ b/keystone/policy/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/policy/backends/rules.py b/keystone/policy/backends/rules.py
index cc4edf47c..2f3aa0df4 100644
--- a/keystone/policy/backends/rules.py
+++ b/keystone/policy/backends/rules.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
diff --git a/keystone/policy/backends/sql.py b/keystone/policy/backends/sql.py
index 1823be9c5..e670e6b38 100644
--- a/keystone/policy/backends/sql.py
+++ b/keystone/policy/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,8 +13,9 @@
# under the License.
from keystone.common import sql
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session as db_session
from keystone.policy.backends import rules
@@ -30,10 +29,11 @@ class PolicyModel(sql.ModelBase, sql.DictBase):
extra = sql.Column(sql.JsonBlob())
-class Policy(sql.Base, rules.Policy):
+class Policy(rules.Policy):
# Internal interface to manage the database
def db_sync(self, version=None):
- migration.db_sync(version=version)
+ migration.db_sync(
+ migration_helpers.find_migrate_repo(), version=version)
@sql.handle_conflicts(conflict_type='policy')
def create_policy(self, policy_id, policy):
diff --git a/keystone/policy/controllers.py b/keystone/policy/controllers.py
index a6527491d..44b9a4725 100644
--- a/keystone/policy/controllers.py
+++ b/keystone/policy/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -35,10 +33,7 @@ class PolicyV3(controller.V3Controller):
@controller.filterprotected('type')
def list_policies(self, context, filters):
hints = PolicyV3.build_driver_hints(context, filters)
- # We don't bother passing the hints in, since this would be
- # a highly unlikely filter to use - wrap_collection() can
- # handle if required.
- refs = self.policy_api.list_policies()
+ refs = self.policy_api.list_policies(hints=hints)
return PolicyV3.wrap_collection(context, refs, hints=hints)
@controller.protected()
diff --git a/keystone/policy/core.py b/keystone/policy/core.py
index 2c4a8b244..a3d85df26 100644
--- a/keystone/policy/core.py
+++ b/keystone/policy/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -55,6 +53,13 @@ class Manager(manager.Manager):
except exception.NotFound:
raise exception.PolicyNotFound(policy_id=policy_id)
+ @manager.response_truncated
+ def list_policies(self, hints=None):
+ # NOTE(henry-nash): Since the advantage of filtering or list limiting
+ # of policies at the driver level is minimal, we leave this to the
+ # caller.
+ return self.driver.list_policies()
+
def delete_policy(self, policy_id):
try:
return self.driver.delete_policy(policy_id)
@@ -65,6 +70,9 @@ class Manager(manager.Manager):
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
+ def _get_list_limit(self):
+ return CONF.policy.list_limit or CONF.list_limit
+
@abc.abstractmethod
def enforce(self, context, credentials, action, target):
"""Verify that a user is authorized to perform action.
diff --git a/keystone/policy/routers.py b/keystone/policy/routers.py
index da9c6cb0b..96a524bd7 100644
--- a/keystone/policy/routers.py
+++ b/keystone/policy/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/routers.py b/keystone/routers.py
index 9bf6b0027..73cd47cd0 100644
--- a/keystone/routers.py
+++ b/keystone/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/service.py b/keystone/service.py
index 106845331..bb2dd0baa 100644
--- a/keystone/service.py
+++ b/keystone/service.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import sys
+
import functools
import routes
@@ -75,8 +75,8 @@ def fail_gracefully(f):
# exception message is printed to all logs
LOG.critical(e)
+ sys.exit(1)
- exit(1)
return wrapper
diff --git a/keystone/tests/__init__.py b/keystone/tests/__init__.py
index 9773cf264..ae37851a1 100644
--- a/keystone/tests/__init__.py
+++ b/keystone/tests/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/_ldap_livetest.py b/keystone/tests/_ldap_livetest.py
index bb0870690..1ab24ddba 100644
--- a/keystone/tests/_ldap_livetest.py
+++ b/keystone/tests/_ldap_livetest.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -60,16 +58,16 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
create_object(CONF.ldap.suffix, tree_dn_attrs)
create_object(CONF.ldap.user_tree_dn,
{'objectclass': 'organizationalUnit',
- 'ou': 'Users'})
+ 'ou': 'Users'})
create_object(CONF.ldap.role_tree_dn,
{'objectclass': 'organizationalUnit',
- 'ou': 'Roles'})
+ 'ou': 'Roles'})
create_object(CONF.ldap.tenant_tree_dn,
{'objectclass': 'organizationalUnit',
- 'ou': 'Projects'})
+ 'ou': 'Projects'})
create_object(CONF.ldap.group_tree_dn,
{'objectclass': 'organizationalUnit',
- 'ou': 'UserGroups'})
+ 'ou': 'UserGroups'})
def _set_config(self):
self.config([tests.dirs.etc('keystone.conf.sample'),
@@ -223,7 +221,7 @@ class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity):
def test_user_enable_attribute_mask(self):
self.opt_in_group(
'ldap',
- user_enable_emulation=False,
+ user_enabled_emulation=False,
user_enabled_attribute='employeeType')
super(LiveLDAPIdentity, self).test_user_enable_attribute_mask()
diff --git a/keystone/tests/_ldap_tls_livetest.py b/keystone/tests/_ldap_tls_livetest.py
index 3d5d01991..02c1b8961 100644
--- a/keystone/tests/_ldap_tls_livetest.py
+++ b/keystone/tests/_ldap_tls_livetest.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
diff --git a/keystone/tests/_sql_livetest.py b/keystone/tests/_sql_livetest.py
index b6e1a0d5e..ad17d3938 100644
--- a/keystone/tests/_sql_livetest.py
+++ b/keystone/tests/_sql_livetest.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/backend_liveldap.conf b/keystone/tests/backend_liveldap.conf
index 297d96d6a..08901cb0b 100644
--- a/keystone/tests/backend_liveldap.conf
+++ b/keystone/tests/backend_liveldap.conf
@@ -1,6 +1,6 @@
[ldap]
url = ldap://localhost
-user = dc=Manager,dc=openstack,dc=org
+user = cn=Manager,dc=openstack,dc=org
password = test
suffix = dc=openstack,dc=org
group_tree_dn = ou=UserGroups,dc=openstack,dc=org
diff --git a/keystone/tests/contrib/__init__.py b/keystone/tests/contrib/__init__.py
index 8c6d5f866..e69de29bb 100644
--- a/keystone/tests/contrib/__init__.py
+++ b/keystone/tests/contrib/__init__.py
@@ -1,13 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/tests/contrib/kds/__init__.py b/keystone/tests/contrib/kds/__init__.py
index 8c6d5f866..e69de29bb 100644
--- a/keystone/tests/contrib/kds/__init__.py
+++ b/keystone/tests/contrib/kds/__init__.py
@@ -1,13 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/tests/contrib/kds/api/__init__.py b/keystone/tests/contrib/kds/api/__init__.py
index 8c6d5f866..e69de29bb 100644
--- a/keystone/tests/contrib/kds/api/__init__.py
+++ b/keystone/tests/contrib/kds/api/__init__.py
@@ -1,13 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/keystone/tests/contrib/kds/api/base.py b/keystone/tests/contrib/kds/api/base.py
index 566abb609..622d7a36d 100644
--- a/keystone/tests/contrib/kds/api/base.py
+++ b/keystone/tests/contrib/kds/api/base.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/contrib/kds/api/test.py b/keystone/tests/contrib/kds/api/test.py
index b49fc19f5..d67e738e5 100644
--- a/keystone/tests/contrib/kds/api/test.py
+++ b/keystone/tests/contrib/kds/api/test.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -17,6 +15,14 @@ from keystone.tests.contrib.kds.api import base
class SimpleTest(base.BaseTestCase):
- def test_simple(self):
- resp = self.get("/")
- self.assertEqual(resp.json['hello'], 'world')
+ def test_version(self):
+ resp = self.get('/')
+ versions = resp.json['versions']
+ self.assertEqual(resp.status_code, 300)
+
+ host = 'http://localhost' # webtest default
+
+ self.assertEqual(versions[0]['status'], 'stable')
+ self.assertEqual(versions[0]['id'], 'v1.0')
+ self.assertEqual(versions[0]['links'][0]['href'], '%s/v1/' % host)
+ self.assertEqual(versions[0]['links'][0]['rel'], 'self')
diff --git a/keystone/tests/contrib/kds/api/v1/__init__.py b/keystone/tests/contrib/kds/api/v1/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/tests/contrib/kds/api/v1/__init__.py
diff --git a/keystone/tests/contrib/kds/api/v1/base.py b/keystone/tests/contrib/kds/api/v1/base.py
new file mode 100644
index 000000000..f5d2dee17
--- /dev/null
+++ b/keystone/tests/contrib/kds/api/v1/base.py
@@ -0,0 +1,29 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests.contrib.kds.api import base
+
+
+def v1_url(*args):
+ return base.urljoin('v1', *args)
+
+
+class BaseTestCase(base.BaseTestCase):
+
+ def get(self, url, *args, **kwargs):
+ return super(BaseTestCase, self).get(v1_url(url), *args, **kwargs)
+
+ def post(self, url, *args, **kwargs):
+ return super(BaseTestCase, self).post(v1_url(url), *args, **kwargs)
+
+ def put(self, url, *args, **kwargs):
+ return super(BaseTestCase, self).put(v1_url(url), *args, **kwargs)
diff --git a/keystone/tests/contrib/kds/api/v1/test.py b/keystone/tests/contrib/kds/api/v1/test.py
new file mode 100644
index 000000000..e25158811
--- /dev/null
+++ b/keystone/tests/contrib/kds/api/v1/test.py
@@ -0,0 +1,28 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystone.tests.contrib.kds.api.v1 import base
+
+
+class TestVersion(base.BaseTestCase):
+
+ def test_versions(self):
+ resp = self.get('/')
+ version = resp.json['version']
+ self.assertEqual(resp.status_code, 200)
+
+ host = 'http://localhost' # webtest default
+
+ self.assertEqual(version['id'], 'v1.0')
+ self.assertEqual(version['status'], 'stable')
+ self.assertEqual(version['links'][0]['href'], '%s/v1/' % host)
+ self.assertEqual(version['links'][0]['rel'], 'self')
diff --git a/keystone/tests/contrib/kds/base.py b/keystone/tests/contrib/kds/base.py
index 4cc4e7e56..fd8d509f8 100644
--- a/keystone/tests/contrib/kds/base.py
+++ b/keystone/tests/contrib/kds/base.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/contrib/kds/db/base.py b/keystone/tests/contrib/kds/db/base.py
index 2c0679452..e27784f9d 100644
--- a/keystone/tests/contrib/kds/db/base.py
+++ b/keystone/tests/contrib/kds/db/base.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/contrib/kds/db/test_host_key.py b/keystone/tests/contrib/kds/db/test_host_key.py
index 0ac2234ed..c8fa3511e 100644
--- a/keystone/tests/contrib/kds/db/test_host_key.py
+++ b/keystone/tests/contrib/kds/db/test_host_key.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/contrib/kds/fixture/__init__.py b/keystone/tests/contrib/kds/fixture/__init__.py
index 313ae2796..6e37652b0 100644
--- a/keystone/tests/contrib/kds/fixture/__init__.py
+++ b/keystone/tests/contrib/kds/fixture/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -18,4 +16,4 @@ from keystone.tests.contrib.kds.fixture import sqlitedb
SqliteDb = sqlitedb.SqliteDb
KvsDb = kvsdb.KvsDb
-__all__ = [SqliteDb, KvsDb]
+__all__ = ['SqliteDb', 'KvsDb']
diff --git a/keystone/tests/contrib/kds/fixture/kvsdb.py b/keystone/tests/contrib/kds/fixture/kvsdb.py
index 1426b281f..c6f2d508c 100644
--- a/keystone/tests/contrib/kds/fixture/kvsdb.py
+++ b/keystone/tests/contrib/kds/fixture/kvsdb.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/contrib/kds/fixture/sqlitedb.py b/keystone/tests/contrib/kds/fixture/sqlitedb.py
index 660660587..677345d78 100644
--- a/keystone/tests/contrib/kds/fixture/sqlitedb.py
+++ b/keystone/tests/contrib/kds/fixture/sqlitedb.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/contrib/kds/paths.py b/keystone/tests/contrib/kds/paths.py
index 41b48e5cf..41af315fd 100644
--- a/keystone/tests/contrib/kds/paths.py
+++ b/keystone/tests/contrib/kds/paths.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/core.py b/keystone/tests/core.py
index ace658504..04ebd11b1 100644
--- a/keystone/tests/core.py
+++ b/keystone/tests/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,7 +13,7 @@
# under the License.
from __future__ import absolute_import
-
+import atexit
import functools
import os
import re
@@ -31,8 +29,9 @@ from paste import deploy
import six
import testtools
from testtools import testcase
+import webob
-
+from keystone.openstack.common.fixture import mockpatch
from keystone.openstack.common import gettextutils
# NOTE(blk-u):
@@ -51,20 +50,21 @@ from keystone.common import environment
environment.use_eventlet()
from keystone import auth
-from keystone.common import cache
from keystone.common import dependency
from keystone.common import kvs
from keystone.common.kvs import core as kvs_core
from keystone.common import sql
-from keystone.common import utils
-from keystone.common import wsgi
+from keystone.common.sql import migration_helpers
+from keystone.common import utils as common_utils
from keystone import config
from keystone import exception
from keystone import notifications
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
from keystone import service
+from keystone.tests import fixtures as ksfixtures
# NOTE(dstanek): Tests inheriting from TestCase depend on having the
# policy_file command-line option declared before setUp runs. Importing the
@@ -132,12 +132,12 @@ def checkout_vendor(repo, rev):
return revdir
if not os.path.exists(revdir):
- utils.git('clone', repo, revdir)
+ common_utils.git('clone', repo, revdir)
os.chdir(revdir)
- utils.git('checkout', '-q', 'master')
- utils.git('pull', '-q')
- utils.git('checkout', '-q', rev)
+ common_utils.git('checkout', '-q', 'master')
+ common_utils.git('pull', '-q')
+ common_utils.git('checkout', '-q', rev)
# write out a modified time
with open(modcheck, 'w') as fd:
@@ -152,16 +152,27 @@ def setup_database():
db = dirs.tmp('test.db')
pristine = dirs.tmp('test.db.pristine')
- try:
- if os.path.exists(db):
- os.unlink(db)
- if not os.path.exists(pristine):
- sql.migration.db_sync()
- shutil.copyfile(db, pristine)
- else:
- shutil.copyfile(pristine, db)
- except Exception:
- pass
+ if os.path.exists(db):
+ os.unlink(db)
+ if not os.path.exists(pristine):
+ migration.db_sync(migration_helpers.find_migrate_repo())
+ shutil.copyfile(db, pristine)
+ else:
+ shutil.copyfile(pristine, db)
+
+
+def teardown_database():
+ session.cleanup()
+
+
+@atexit.register
+def remove_test_databases():
+ db = dirs.tmp('test.db')
+ if os.path.exists(db):
+ os.unlink(db)
+ pristine = dirs.tmp('test.db.pristine')
+ if os.path.exists(pristine):
+ os.unlink(pristine)
def generate_paste_config(extension_name):
@@ -186,10 +197,6 @@ def remove_generated_paste_config(extension_name):
os.remove(paste_file_to_remove)
-def teardown_database():
- session.cleanup()
-
-
def skip_if_cache_disabled(*sections):
"""This decorator is used to skip a test if caching is disabled either
globally or for the specific section.
@@ -225,6 +232,10 @@ def skip_if_cache_disabled(*sections):
return wrapper
+class UnexpectedExit(Exception):
+ pass
+
+
class TestClient(object):
def __init__(self, app=None, token=None):
self.app = app
@@ -237,7 +248,7 @@ class TestClient(object):
if self.token:
headers.setdefault('X-Auth-Token', self.token)
- req = wsgi.Request.blank(path)
+ req = webob.Request.blank(path)
req.method = method
for k, v in six.iteritems(headers):
req.headers[k] = v
@@ -316,21 +327,17 @@ class TestCase(testtools.TestCase):
self.addCleanup(CONF.reset)
+ self.exit_patch = self.useFixture(mockpatch.PatchObject(sys, 'exit'))
+ self.exit_patch.mock.side_effect = UnexpectedExit
+
self.config([dirs.etc('keystone.conf.sample'),
dirs.tests('test_overrides.conf')])
self.opt(policy_file=dirs.etc('policy.json'))
- # NOTE(morganfainberg): The only way to reconfigure the
- # CacheRegion object on each setUp() call is to remove the
- # .backend property.
- self.addCleanup(delattr, cache.REGION, 'backend')
-
- # ensure the cache region instance is setup
- cache.configure_cache_region(cache.REGION)
-
self.logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
warnings.filterwarnings('ignore', category=DeprecationWarning)
+ self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
@@ -342,6 +349,7 @@ class TestCase(testtools.TestCase):
# Ensure Notification subscriotions and resource types are empty
self.addCleanup(notifications.SUBSCRIBERS.clear)
+ self.addCleanup(notifications._reset_notifier)
# Reset the auth-plugin registry
self.addCleanup(self.clear_auth_plugin_registry)
@@ -387,6 +395,14 @@ class TestCase(testtools.TestCase):
for manager_name, manager in six.iteritems(drivers):
setattr(self, manager_name, manager)
+ # The credential backend only supports SQL, so we always have to load
+ # the tables.
+ self.engine = session.get_engine()
+ self.addCleanup(session.cleanup)
+
+ sql.ModelBase.metadata.create_all(bind=self.engine)
+ self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
+
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
@@ -476,8 +492,8 @@ class TestCase(testtools.TestCase):
self.assertTrue(len(l))
def assertDictEqual(self, d1, d2, msg=None):
- self.assertTrue(isinstance(d1, dict), 'First argument is not a dict')
- self.assertTrue(isinstance(d2, dict), 'Second argument is not a dict')
+ self.assertIsInstance(d1, dict)
+ self.assertIsInstance(d2, dict)
self.assertEqual(d1, d2, msg)
def assertRaisesRegexp(self, expected_exception, expected_regexp,
@@ -491,10 +507,10 @@ class TestCase(testtools.TestCase):
expected_regexp = re.compile(expected_regexp)
if isinstance(exc_value.args[0], gettextutils.Message):
- if not expected_regexp.search(unicode(exc_value)):
+ if not expected_regexp.search(six.text_type(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
- (expected_regexp.pattern, unicode(exc_value)))
+ (expected_regexp.pattern, six.text_type(exc_value)))
else:
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
@@ -505,7 +521,7 @@ class TestCase(testtools.TestCase):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
- raise self.failureException, "%s not raised" % excName
+ raise self.failureException("%s not raised" % excName)
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
diff --git a/keystone/tests/default_catalog.templates b/keystone/tests/default_catalog.templates
index f26c949ac..7efe7d55a 100644
--- a/keystone/tests/default_catalog.templates
+++ b/keystone/tests/default_catalog.templates
@@ -1,4 +1,4 @@
-# config for TemplatedCatalog, using camelCase because I don't want to do
+# config for templated.Catalog, using camelCase because I don't want to do
# translations for keystone compat
catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
diff --git a/keystone/tests/default_fixtures.py b/keystone/tests/default_fixtures.py
index 0d47f3acd..f25a85507 100644
--- a/keystone/tests/default_fixtures.py
+++ b/keystone/tests/default_fixtures.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/fakeldap.py b/keystone/tests/fakeldap.py
index f2ec27066..347ad5213 100644
--- a/keystone/tests/fakeldap.py
+++ b/keystone/tests/fakeldap.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@@ -26,9 +24,9 @@ library to work with nova.
import re
import shelve
+import six
import ldap
-import six
from keystone.common import utils
from keystone.openstack.common import log
@@ -41,6 +39,8 @@ SCOPE_NAMES = {
ldap.SCOPE_SUBTREE: 'SCOPE_SUBTREE',
}
+#http://msdn.microsoft.com/en-us/library/windows/desktop/aa366991(v=vs.85).aspx
+CONTROL_TREEDELETE = '1.2.840.113556.1.4.805'
LOG = log.getLogger(__name__)
@@ -238,9 +238,16 @@ class FakeLdap(object):
if server_fail:
raise ldap.SERVER_DOWN
- key = '%s%s' % (self.__prefix, dn)
- LOG.debug('delete item: dn=%s', dn)
try:
+ if CONTROL_TREEDELETE in [c.controlType for c in serverctrls]:
+ LOG.debug('FakeLdap subtree_delete item: dn=%s', dn)
+ children = [k for k, v in six.iteritems(self.db)
+ if re.match('%s.*,%s' % (self.__prefix, dn), k)]
+ for c in children:
+ del self.db[c]
+
+ LOG.debug(_('FakeLdap delete item: dn=%s'), dn)
+ key = '%s%s' % (self.__prefix, dn)
del self.db[key]
except KeyError:
LOG.debug('delete item failed: dn=%s not found.', dn)
diff --git a/keystone/tests/filtering.py b/keystone/tests/filtering.py
index 419a82633..47deb6911 100644
--- a/keystone/tests/filtering.py
+++ b/keystone/tests/filtering.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/fixtures/__init__.py b/keystone/tests/fixtures/__init__.py
index e69de29bb..d54164c4e 100644
--- a/keystone/tests/fixtures/__init__.py
+++ b/keystone/tests/fixtures/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from keystone.tests.fixtures.cache import Cache # flake8: noqa
diff --git a/keystone/tests/fixtures/appserver.py b/keystone/tests/fixtures/appserver.py
index 7d8d90f86..63b86dae4 100644
--- a/keystone/tests/fixtures/appserver.py
+++ b/keystone/tests/fixtures/appserver.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/keystone/tests/fixtures/cache.py b/keystone/tests/fixtures/cache.py
new file mode 100644
index 000000000..b12940779
--- /dev/null
+++ b/keystone/tests/fixtures/cache.py
@@ -0,0 +1,38 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import fixtures
+
+from keystone.common import cache
+
+
+class Cache(fixtures.Fixture):
+ """A fixture for setting up and tearing down the cache between test cases.
+ """
+
+ def setUp(self):
+ # NOTE(dstanek): We must remove the existing cache backend in the
+ # setUp instead of the tearDown because it defaults to a no-op cache
+ # and we want the configure call below to create the correct backend.
+
+ # NOTE(morganfainberg): The only way to reconfigure the CacheRegion
+ # object on each setUp() call is to remove the .backend property.
+ if hasattr(cache.REGION, 'backend'):
+ del cache.REGION.backend
+
+ # ensure the cache region instance is setup
+ cache.configure_cache_region(cache.REGION)
+
+ super(Cache, self).setUp()
diff --git a/keystone/tests/legacy_d5.mysql b/keystone/tests/legacy_d5.mysql
deleted file mode 100644
index 57b31febe..000000000
--- a/keystone/tests/legacy_d5.mysql
+++ /dev/null
@@ -1,281 +0,0 @@
--- MySQL dump 10.13 Distrib 5.1.54, for debian-linux-gnu (x86_64)
---
--- Host: localhost Database: keystone
--- ------------------------------------------------------
--- Server version 5.1.54-1ubuntu4
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `credentials`
---
-
-DROP TABLE IF EXISTS `credentials`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `credentials` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `user_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- `type` varchar(20) DEFAULT NULL,
- `key` varchar(255) DEFAULT NULL,
- `secret` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`),
- KEY `tenant_id` (`tenant_id`),
- KEY `user_id` (`user_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=3 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `credentials`
---
-
-LOCK TABLES `credentials` WRITE;
-/*!40000 ALTER TABLE `credentials` DISABLE KEYS */;
-INSERT INTO `credentials` VALUES (1,1,1,'EC2','admin','secrete'),(2,2,2,'EC2','demo','secrete');
-/*!40000 ALTER TABLE `credentials` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `endpoint_templates`
---
-
-DROP TABLE IF EXISTS `endpoint_templates`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoint_templates` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `region` varchar(255) DEFAULT NULL,
- `service_id` int(11) DEFAULT NULL,
- `public_url` varchar(2000) DEFAULT NULL,
- `admin_url` varchar(2000) DEFAULT NULL,
- `internal_url` varchar(2000) DEFAULT NULL,
- `enabled` tinyint(1) DEFAULT NULL,
- `is_global` tinyint(1) DEFAULT NULL,
- PRIMARY KEY (`id`),
- KEY `service_id` (`service_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoint_templates`
---
-
-LOCK TABLES `endpoint_templates` WRITE;
-/*!40000 ALTER TABLE `endpoint_templates` DISABLE KEYS */;
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne',1,'http://10.4.128.10:8774/v1.1/%tenant_id%','http://10.4.128.10:8774/v1.1/%tenant_id%','http://10.4.128.10:8774/v1.1/%tenant_id%',1,1),(2,'RegionOne',2,'http://10.4.128.10:9292/v1.1/%tenant_id%','http://10.4.128.10:9292/v1.1/%tenant_id%','http://10.4.128.10:9292/v1.1/%tenant_id%',1,1),(3,'RegionOne',3,'http://10.4.128.10:5000/v2.0','http://10.4.128.10:35357/v2.0','http://10.4.128.10:5000/v2.0',1,1);
-/*!40000 ALTER TABLE `endpoint_templates` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `endpoints`
---
-
-DROP TABLE IF EXISTS `endpoints`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoints` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `tenant_id` int(11) DEFAULT NULL,
- `endpoint_template_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `endpoint_template_id` (`endpoint_template_id`,`tenant_id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoints`
---
-
-LOCK TABLES `endpoints` WRITE;
-/*!40000 ALTER TABLE `endpoints` DISABLE KEYS */;
-/*!40000 ALTER TABLE `endpoints` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `roles`
---
-
-DROP TABLE IF EXISTS `roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `roles` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `desc` varchar(255) DEFAULT NULL,
- `service_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`,`service_id`),
- KEY `service_id` (`service_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=5 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `roles`
---
-
-LOCK TABLES `roles` WRITE;
-/*!40000 ALTER TABLE `roles` DISABLE KEYS */;
-INSERT INTO `roles` VALUES (1,'Admin',NULL,NULL),(2,'Member',NULL,NULL),(3,'KeystoneAdmin',NULL,NULL),(4,'KeystoneServiceAdmin',NULL,NULL);
-/*!40000 ALTER TABLE `roles` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `services`
---
-
-DROP TABLE IF EXISTS `services`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `services` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `type` varchar(255) DEFAULT NULL,
- `desc` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`)
-) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `services`
---
-
-LOCK TABLES `services` WRITE;
-/*!40000 ALTER TABLE `services` DISABLE KEYS */;
-INSERT INTO `services` VALUES (1,'nova','compute','Nova Compute Service'),(2,'glance','image','Glance Image Service'),(3,'keystone','identity','Keystone Identity Service');
-/*!40000 ALTER TABLE `services` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `tenants`
---
-
-DROP TABLE IF EXISTS `tenants`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tenants` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `desc` varchar(255) DEFAULT NULL,
- `enabled` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`)
-) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tenants`
---
-
-LOCK TABLES `tenants` WRITE;
-/*!40000 ALTER TABLE `tenants` DISABLE KEYS */;
-INSERT INTO `tenants` VALUES (1,'admin',NULL,1),(2,'demo',NULL,1),(3,'invisible_to_admin',NULL,1);
-/*!40000 ALTER TABLE `tenants` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `token`
---
-
-DROP TABLE IF EXISTS `token`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `token` (
- `id` varchar(255) NOT NULL,
- `user_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- `expires` datetime DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `id` (`id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `token`
---
-
-LOCK TABLES `token` WRITE;
-/*!40000 ALTER TABLE `token` DISABLE KEYS */;
-INSERT INTO `token` VALUES ('secrete',1,1,'2015-02-05 00:00:00');
-/*!40000 ALTER TABLE `token` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `user_roles`
---
-
-DROP TABLE IF EXISTS `user_roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `user_roles` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `user_id` int(11) DEFAULT NULL,
- `role_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `user_id` (`user_id`,`role_id`,`tenant_id`),
- KEY `tenant_id` (`tenant_id`),
- KEY `role_id` (`role_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=8 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `user_roles`
---
-
-LOCK TABLES `user_roles` WRITE;
-/*!40000 ALTER TABLE `user_roles` DISABLE KEYS */;
-INSERT INTO `user_roles` VALUES (1,1,1,1),(2,2,2,2),(3,2,2,3),(4,1,1,2),(5,1,1,NULL),(6,1,3,NULL),(7,1,4,NULL);
-/*!40000 ALTER TABLE `user_roles` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `users`
---
-
-DROP TABLE IF EXISTS `users`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `users` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `password` varchar(255) DEFAULT NULL,
- `email` varchar(255) DEFAULT NULL,
- `enabled` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`),
- KEY `tenant_id` (`tenant_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=3 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `users`
---
-
-LOCK TABLES `users` WRITE;
-/*!40000 ALTER TABLE `users` DISABLE KEYS */;
-INSERT INTO `users` VALUES (1,'admin','secrete',NULL,1,NULL),(2,'demo','secrete',NULL,1,NULL);
-/*!40000 ALTER TABLE `users` ENABLE KEYS */;
-UNLOCK TABLES;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-02-14 0:16:40
diff --git a/keystone/tests/legacy_d5.sqlite b/keystone/tests/legacy_d5.sqlite
deleted file mode 100644
index d96dbf40f..000000000
--- a/keystone/tests/legacy_d5.sqlite
+++ /dev/null
@@ -1,277 +0,0 @@
-begin;
--- MySQL dump 10.13 Distrib 5.1.54, for debian-linux-gnu (x86_64)
---
--- Host: localhost Database: keystone
--- ------------------------------------------------------
--- Server version 5.1.54-1ubuntu4
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `credentials`
---
-
-DROP TABLE IF EXISTS `credentials`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `credentials` (
- `id` integer NOT NULL primary key autoincrement,
- `user_id` integer NULL,
- `tenant_id` integer NULL,
- `type` varchar(20) NULL,
- `key` varchar(255) NULL,
- `secret` varchar(255) NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `credentials`
---
-
-
-/*!40000 ALTER TABLE `credentials` DISABLE KEYS */;
-INSERT INTO `credentials` VALUES (1,1,1,'EC2','admin','secrete');
-INSERT INTO `credentials` VALUES (2,2,2,'EC2','demo','secrete');
-/*!40000 ALTER TABLE `credentials` ENABLE KEYS */;
-
-
---
--- Table structure for table `endpoint_templates`
---
-
-DROP TABLE IF EXISTS `endpoint_templates`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoint_templates` (
- `id` integer NOT NULL primary key autoincrement,
- `region` varchar(255) NULL,
- `service_id` integer NULL,
- `public_url` varchar(2000) NULL,
- `admin_url` varchar(2000) NULL,
- `internal_url` varchar(2000) NULL,
- `enabled` integer NULL,
- `is_global` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoint_templates`
---
-
-
-/*!40000 ALTER TABLE `endpoint_templates` DISABLE KEYS */;
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne',1,'http://10.4.128.10:8774/v1.1/%tenant_id%','http://10.4.128.10:8774/v1.1/%tenant_id%','http://10.4.128.10:8774/v1.1/%tenant_id%',1,1);
-INSERT INTO `endpoint_templates` VALUES (2,'RegionOne',2,'http://10.4.128.10:9292/v1.1/%tenant_id%','http://10.4.128.10:9292/v1.1/%tenant_id%','http://10.4.128.10:9292/v1.1/%tenant_id%',1,1);
-INSERT INTO `endpoint_templates` VALUES (3,'RegionOne',3,'http://10.4.128.10:5000/v2.0','http://10.4.128.10:35357/v2.0','http://10.4.128.10:5000/v2.0',1,1);
-/*!40000 ALTER TABLE `endpoint_templates` ENABLE KEYS */;
-
-
---
--- Table structure for table `endpoints`
---
-
-DROP TABLE IF EXISTS `endpoints`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoints` (
- `id` integer NOT NULL primary key autoincrement,
- `tenant_id` integer NULL,
- `endpoint_template_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoints`
---
-
-
-/*!40000 ALTER TABLE `endpoints` DISABLE KEYS */;
-/*!40000 ALTER TABLE `endpoints` ENABLE KEYS */;
-
-
---
--- Table structure for table `roles`
---
-
-DROP TABLE IF EXISTS `roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `roles` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `desc` varchar(255) NULL,
- `service_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `roles`
---
-
-
-/*!40000 ALTER TABLE `roles` DISABLE KEYS */;
-INSERT INTO `roles` VALUES (1,'Admin',NULL,NULL);
-INSERT INTO `roles` VALUES (2,'Member',NULL,NULL);
-INSERT INTO `roles` VALUES (3,'KeystoneAdmin',NULL,NULL);
-INSERT INTO `roles` VALUES (4,'KeystoneServiceAdmin',NULL,NULL);
-/*!40000 ALTER TABLE `roles` ENABLE KEYS */;
-
-
---
--- Table structure for table `services`
---
-
-DROP TABLE IF EXISTS `services`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `services` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `type` varchar(255) NULL,
- `desc` varchar(255) NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `services`
---
-
-
-/*!40000 ALTER TABLE `services` DISABLE KEYS */;
-INSERT INTO `services` VALUES (1,'nova','compute','Nova Compute Service');
-INSERT INTO `services` VALUES (2,'glance','image','Glance Image Service');
-INSERT INTO `services` VALUES (3,'keystone','identity','Keystone Identity Service');
-/*!40000 ALTER TABLE `services` ENABLE KEYS */;
-
-
---
--- Table structure for table `tenants`
---
-
-DROP TABLE IF EXISTS `tenants`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tenants` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `desc` varchar(255) NULL,
- `enabled` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tenants`
---
-
-
-/*!40000 ALTER TABLE `tenants` DISABLE KEYS */;
-INSERT INTO `tenants` VALUES (1,'admin',NULL,1);
-INSERT INTO `tenants` VALUES (2,'demo',NULL,1);
-INSERT INTO `tenants` VALUES (3,'invisible_to_admin',NULL,1);
-/*!40000 ALTER TABLE `tenants` ENABLE KEYS */;
-
-
---
--- Table structure for table `token`
---
-
-DROP TABLE IF EXISTS `token`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `token` (
- `id` varchar(255) NOT NULL,
- `user_id` integer NULL,
- `tenant_id` integer NULL,
- `expires` datetime NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `token`
---
-
-
-/*!40000 ALTER TABLE `token` DISABLE KEYS */;
-INSERT INTO `token` VALUES ('secrete',1,1,'2015-02-05 00:00:00');
-/*!40000 ALTER TABLE `token` ENABLE KEYS */;
-
-
---
--- Table structure for table `user_roles`
---
-
-DROP TABLE IF EXISTS `user_roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `user_roles` (
- `id` integer NOT NULL primary key autoincrement,
- `user_id` integer NULL,
- `role_id` integer NULL,
- `tenant_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `user_roles`
---
-
-
-/*!40000 ALTER TABLE `user_roles` DISABLE KEYS */;
-INSERT INTO `user_roles` VALUES (1,1,1,1);
-INSERT INTO `user_roles` VALUES (2,2,2,2);
-INSERT INTO `user_roles` VALUES (3,2,2,3);
-INSERT INTO `user_roles` VALUES (4,1,1,2);
-INSERT INTO `user_roles` VALUES (5,1,1,NULL);
-INSERT INTO `user_roles` VALUES (6,1,3,NULL);
-INSERT INTO `user_roles` VALUES (7,1,4,NULL);
-/*!40000 ALTER TABLE `user_roles` ENABLE KEYS */;
-
-
---
--- Table structure for table `users`
---
-
-DROP TABLE IF EXISTS `users`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `users` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `password` varchar(255) NULL,
- `email` varchar(255) NULL,
- `enabled` integer NULL,
- `tenant_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `users`
---
-
-
-/*!40000 ALTER TABLE `users` DISABLE KEYS */;
-INSERT INTO `users` VALUES (1,'admin','secrete',NULL,1,NULL);
-INSERT INTO `users` VALUES (2,'demo','secrete',NULL,1,NULL);
-/*!40000 ALTER TABLE `users` ENABLE KEYS */;
-
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-02-14 0:16:40
-commit;
diff --git a/keystone/tests/legacy_diablo.mysql b/keystone/tests/legacy_diablo.mysql
deleted file mode 100644
index 543f439f8..000000000
--- a/keystone/tests/legacy_diablo.mysql
+++ /dev/null
@@ -1,281 +0,0 @@
--- MySQL dump 10.13 Distrib 5.1.58, for debian-linux-gnu (x86_64)
---
--- Host: localhost Database: keystone
--- ------------------------------------------------------
--- Server version 5.1.58-1ubuntu1
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `credentials`
---
-
-DROP TABLE IF EXISTS `credentials`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `credentials` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `user_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- `type` varchar(20) DEFAULT NULL,
- `key` varchar(255) DEFAULT NULL,
- `secret` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`),
- KEY `tenant_id` (`tenant_id`),
- KEY `user_id` (`user_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=3 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `credentials`
---
-
-LOCK TABLES `credentials` WRITE;
-/*!40000 ALTER TABLE `credentials` DISABLE KEYS */;
-INSERT INTO `credentials` VALUES (1,1,1,'EC2','admin','secrete'),(2,2,2,'EC2','demo','secrete');
-/*!40000 ALTER TABLE `credentials` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `endpoint_templates`
---
-
-DROP TABLE IF EXISTS `endpoint_templates`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoint_templates` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `region` varchar(255) DEFAULT NULL,
- `service_id` int(11) DEFAULT NULL,
- `public_url` varchar(2000) DEFAULT NULL,
- `admin_url` varchar(2000) DEFAULT NULL,
- `internal_url` varchar(2000) DEFAULT NULL,
- `enabled` tinyint(1) DEFAULT NULL,
- `is_global` tinyint(1) DEFAULT NULL,
- PRIMARY KEY (`id`),
- KEY `service_id` (`service_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=5 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoint_templates`
---
-
-LOCK TABLES `endpoint_templates` WRITE;
-/*!40000 ALTER TABLE `endpoint_templates` DISABLE KEYS */;
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne',1,'http://192.168.2.10:8774/v1.1/%tenant_id%','http://192.168.2.10:8774/v1.1/%tenant_id%','http://192.168.2.10:8774/v1.1/%tenant_id%',1,1),(2,'RegionOne',2,'http://192.168.2.10:9292/v1','http://192.168.2.10:9292/v1','http://192.168.2.10:9292/v1',1,1),(3,'RegionOne',3,'http://192.168.2.10:5000/v2.0','http://192.168.2.10:35357/v2.0','http://192.168.2.10:5000/v2.0',1,1),(4,'RegionOne',4,'http://192.168.2.10:8080/v1/AUTH_%tenant_id%','http://192.168.2.10:8080/','http://192.168.2.10:8080/v1/AUTH_%tenant_id%',1,1);
-/*!40000 ALTER TABLE `endpoint_templates` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `endpoints`
---
-
-DROP TABLE IF EXISTS `endpoints`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoints` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `tenant_id` int(11) DEFAULT NULL,
- `endpoint_template_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `endpoint_template_id` (`endpoint_template_id`,`tenant_id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoints`
---
-
-LOCK TABLES `endpoints` WRITE;
-/*!40000 ALTER TABLE `endpoints` DISABLE KEYS */;
-/*!40000 ALTER TABLE `endpoints` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `roles`
---
-
-DROP TABLE IF EXISTS `roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `roles` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `desc` varchar(255) DEFAULT NULL,
- `service_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`,`service_id`),
- KEY `service_id` (`service_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=7 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `roles`
---
-
-LOCK TABLES `roles` WRITE;
-/*!40000 ALTER TABLE `roles` DISABLE KEYS */;
-INSERT INTO `roles` VALUES (1,'Admin',NULL,NULL),(2,'Member',NULL,NULL),(3,'KeystoneAdmin',NULL,NULL),(4,'KeystoneServiceAdmin',NULL,NULL),(5,'sysadmin',NULL,NULL),(6,'netadmin',NULL,NULL);
-/*!40000 ALTER TABLE `roles` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `services`
---
-
-DROP TABLE IF EXISTS `services`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `services` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `type` varchar(255) DEFAULT NULL,
- `desc` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`)
-) ENGINE=MyISAM AUTO_INCREMENT=5 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `services`
---
-
-LOCK TABLES `services` WRITE;
-/*!40000 ALTER TABLE `services` DISABLE KEYS */;
-INSERT INTO `services` VALUES (1,'nova','compute','Nova Compute Service'),(2,'glance','image','Glance Image Service'),(3,'keystone','identity','Keystone Identity Service'),(4,'swift','object-store','Swift Service');
-/*!40000 ALTER TABLE `services` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `tenants`
---
-
-DROP TABLE IF EXISTS `tenants`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tenants` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `desc` varchar(255) DEFAULT NULL,
- `enabled` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`)
-) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tenants`
---
-
-LOCK TABLES `tenants` WRITE;
-/*!40000 ALTER TABLE `tenants` DISABLE KEYS */;
-INSERT INTO `tenants` VALUES (1,'admin',NULL,1),(2,'demo',NULL,1),(3,'invisible_to_admin',NULL,1);
-/*!40000 ALTER TABLE `tenants` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `token`
---
-
-DROP TABLE IF EXISTS `token`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `token` (
- `id` varchar(255) NOT NULL,
- `user_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- `expires` datetime DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `id` (`id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `token`
---
-
-LOCK TABLES `token` WRITE;
-/*!40000 ALTER TABLE `token` DISABLE KEYS */;
-INSERT INTO `token` VALUES ('secrete',1,1,'2015-02-05 00:00:00');
-/*!40000 ALTER TABLE `token` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `user_roles`
---
-
-DROP TABLE IF EXISTS `user_roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `user_roles` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `user_id` int(11) DEFAULT NULL,
- `role_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `user_id` (`user_id`,`role_id`,`tenant_id`),
- KEY `tenant_id` (`tenant_id`),
- KEY `role_id` (`role_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=10 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `user_roles`
---
-
-LOCK TABLES `user_roles` WRITE;
-/*!40000 ALTER TABLE `user_roles` DISABLE KEYS */;
-INSERT INTO `user_roles` VALUES (1,1,1,1),(2,2,2,2),(3,2,5,2),(4,2,6,2),(5,2,2,3),(6,1,1,2),(7,1,1,NULL),(8,1,3,NULL),(9,1,4,NULL);
-/*!40000 ALTER TABLE `user_roles` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `users`
---
-
-DROP TABLE IF EXISTS `users`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `users` (
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `password` varchar(255) DEFAULT NULL,
- `email` varchar(255) DEFAULT NULL,
- `enabled` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`),
- KEY `tenant_id` (`tenant_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=3 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `users`
---
-
-LOCK TABLES `users` WRITE;
-/*!40000 ALTER TABLE `users` DISABLE KEYS */;
-INSERT INTO `users` VALUES (1,'admin','secrete',NULL,1,NULL),(2,'demo','secrete',NULL,1,NULL);
-/*!40000 ALTER TABLE `users` ENABLE KEYS */;
-UNLOCK TABLES;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-02-13 17:30:03
diff --git a/keystone/tests/legacy_diablo.sqlite b/keystone/tests/legacy_diablo.sqlite
deleted file mode 100644
index edf15be4c..000000000
--- a/keystone/tests/legacy_diablo.sqlite
+++ /dev/null
@@ -1,283 +0,0 @@
-begin;
--- MySQL dump 10.13 Distrib 5.1.58, for debian-linux-gnu (x86_64)
---
--- Host: localhost Database: keystone
--- ------------------------------------------------------
--- Server version 5.1.58-1ubuntu1
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `credentials`
---
-
-DROP TABLE IF EXISTS `credentials`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `credentials` (
- `id` integer NOT NULL primary key autoincrement,
- `user_id` integer NULL,
- `tenant_id` integer NULL,
- `type` varchar(20) NULL,
- `key` varchar(255) NULL,
- `secret` varchar(255) NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `credentials`
---
-
-
-/*!40000 ALTER TABLE `credentials` DISABLE KEYS */;
-INSERT INTO `credentials` VALUES (1,1,1,'EC2','admin','secrete');
-INSERT INTO `credentials` VALUES (2,2,2,'EC2','demo','secrete');
-/*!40000 ALTER TABLE `credentials` ENABLE KEYS */;
-
-
---
--- Table structure for table `endpoint_templates`
---
-
-DROP TABLE IF EXISTS `endpoint_templates`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoint_templates` (
- `id` integer NOT NULL primary key autoincrement,
- `region` varchar(255) NULL,
- `service_id` integer NULL,
- `public_url` varchar(2000) NULL,
- `admin_url` varchar(2000) NULL,
- `internal_url` varchar(2000) NULL,
- `enabled` integer NULL,
- `is_global` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoint_templates`
---
-
-
-/*!40000 ALTER TABLE `endpoint_templates` DISABLE KEYS */;
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne',1,'http://192.168.2.10:8774/v1.1/%tenant_id%','http://192.168.2.10:8774/v1.1/%tenant_id%','http://192.168.2.10:8774/v1.1/%tenant_id%',1,1);
-INSERT INTO `endpoint_templates` VALUES (2,'RegionOne',2,'http://192.168.2.10:9292/v1','http://192.168.2.10:9292/v1','http://192.168.2.10:9292/v1',1,1);
-INSERT INTO `endpoint_templates` VALUES (3,'RegionOne',3,'http://192.168.2.10:5000/v2.0','http://192.168.2.10:35357/v2.0','http://192.168.2.10:5000/v2.0',1,1);
-INSERT INTO `endpoint_templates` VALUES (4,'RegionOne',4,'http://192.168.2.10:8080/v1/AUTH_%tenant_id%','http://192.168.2.10:8080/','http://192.168.2.10:8080/v1/AUTH_%tenant_id%',1,1);
-/*!40000 ALTER TABLE `endpoint_templates` ENABLE KEYS */;
-
-
---
--- Table structure for table `endpoints`
---
-
-DROP TABLE IF EXISTS `endpoints`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoints` (
- `id` integer NOT NULL primary key autoincrement,
- `tenant_id` integer NULL,
- `endpoint_template_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoints`
---
-
-
-/*!40000 ALTER TABLE `endpoints` DISABLE KEYS */;
-/*!40000 ALTER TABLE `endpoints` ENABLE KEYS */;
-
-
---
--- Table structure for table `roles`
---
-
-DROP TABLE IF EXISTS `roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `roles` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `desc` varchar(255) NULL,
- `service_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `roles`
---
-
-
-/*!40000 ALTER TABLE `roles` DISABLE KEYS */;
-INSERT INTO `roles` VALUES (1,'Admin',NULL,NULL);
-INSERT INTO `roles` VALUES (2,'Member',NULL,NULL);
-INSERT INTO `roles` VALUES (3,'KeystoneAdmin',NULL,NULL);
-INSERT INTO `roles` VALUES (4,'KeystoneServiceAdmin',NULL,NULL);
-INSERT INTO `roles` VALUES (5,'sysadmin',NULL,NULL);
-INSERT INTO `roles` VALUES (6,'netadmin',NULL,NULL);
-/*!40000 ALTER TABLE `roles` ENABLE KEYS */;
-
-
---
--- Table structure for table `services`
---
-
-DROP TABLE IF EXISTS `services`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `services` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `type` varchar(255) NULL,
- `desc` varchar(255) NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `services`
---
-
-
-/*!40000 ALTER TABLE `services` DISABLE KEYS */;
-INSERT INTO `services` VALUES (1,'nova','compute','Nova Compute Service');
-INSERT INTO `services` VALUES (2,'glance','image','Glance Image Service');
-INSERT INTO `services` VALUES (3,'keystone','identity','Keystone Identity Service');
-INSERT INTO `services` VALUES (4,'swift','object-store','Swift Service');
-/*!40000 ALTER TABLE `services` ENABLE KEYS */;
-
-
---
--- Table structure for table `tenants`
---
-
-DROP TABLE IF EXISTS `tenants`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tenants` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `desc` varchar(255) NULL,
- `enabled` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tenants`
---
-
-
-/*!40000 ALTER TABLE `tenants` DISABLE KEYS */;
-INSERT INTO `tenants` VALUES (1,'admin',NULL,1);
-INSERT INTO `tenants` VALUES (2,'demo',NULL,1);
-INSERT INTO `tenants` VALUES (3,'invisible_to_admin',NULL,1);
-/*!40000 ALTER TABLE `tenants` ENABLE KEYS */;
-
-
---
--- Table structure for table `token`
---
-
-DROP TABLE IF EXISTS `token`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `token` (
- `id` varchar(255) NOT NULL,
- `user_id` integer NULL,
- `tenant_id` integer NULL,
- `expires` datetime NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `token`
---
-
-
-/*!40000 ALTER TABLE `token` DISABLE KEYS */;
-INSERT INTO `token` VALUES ('secrete',1,1,'2015-02-05 00:00:00');
-/*!40000 ALTER TABLE `token` ENABLE KEYS */;
-
-
---
--- Table structure for table `user_roles`
---
-
-DROP TABLE IF EXISTS `user_roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `user_roles` (
- `id` integer NOT NULL primary key autoincrement,
- `user_id` integer NULL,
- `role_id` integer NULL,
- `tenant_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `user_roles`
---
-
-
-/*!40000 ALTER TABLE `user_roles` DISABLE KEYS */;
-INSERT INTO `user_roles` VALUES (1,1,1,1);
-INSERT INTO `user_roles` VALUES (2,2,2,2);
-INSERT INTO `user_roles` VALUES (3,2,5,2);
-INSERT INTO `user_roles` VALUES (4,2,6,2);
-INSERT INTO `user_roles` VALUES (5,2,2,3);
-INSERT INTO `user_roles` VALUES (6,1,1,2);
-INSERT INTO `user_roles` VALUES (7,1,1,NULL);
-INSERT INTO `user_roles` VALUES (8,1,3,NULL);
-INSERT INTO `user_roles` VALUES (9,1,4,NULL);
-/*!40000 ALTER TABLE `user_roles` ENABLE KEYS */;
-
-
---
--- Table structure for table `users`
---
-
-DROP TABLE IF EXISTS `users`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `users` (
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `password` varchar(255) NULL,
- `email` varchar(255) NULL,
- `enabled` integer NULL,
- `tenant_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `users`
---
-
-
-/*!40000 ALTER TABLE `users` DISABLE KEYS */;
-INSERT INTO `users` VALUES (1,'admin','secrete',NULL,1,NULL);
-INSERT INTO `users` VALUES (2,'demo','secrete',NULL,1,NULL);
-/*!40000 ALTER TABLE `users` ENABLE KEYS */;
-
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-02-13 17:30:03
-commit;
diff --git a/keystone/tests/legacy_essex.mysql b/keystone/tests/legacy_essex.mysql
deleted file mode 100644
index eade2cbfe..000000000
--- a/keystone/tests/legacy_essex.mysql
+++ /dev/null
@@ -1,309 +0,0 @@
--- MySQL dump 10.13 Distrib 5.1.58, for debian-linux-gnu (x86_64)
---
--- Host: localhost Database: keystone
--- ------------------------------------------------------
--- Server version 5.1.58-1ubuntu1
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `credentials`
---
-
-DROP TABLE IF EXISTS `credentials`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `credentials` (
- `user_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- `secret` varchar(255) DEFAULT NULL,
- `key` varchar(255) DEFAULT NULL,
- `type` varchar(20) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- PRIMARY KEY (`id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `credentials`
---
-
-LOCK TABLES `credentials` WRITE;
-/*!40000 ALTER TABLE `credentials` DISABLE KEYS */;
-/*!40000 ALTER TABLE `credentials` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `endpoint_templates`
---
-
-DROP TABLE IF EXISTS `endpoint_templates`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoint_templates` (
- `is_global` tinyint(1) DEFAULT NULL,
- `region` varchar(255) DEFAULT NULL,
- `public_url` varchar(2000) DEFAULT NULL,
- `enabled` tinyint(1) DEFAULT NULL,
- `internal_url` varchar(2000) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `service_id` int(11) DEFAULT NULL,
- `admin_url` varchar(2000) DEFAULT NULL,
- `version_id` varchar(20) DEFAULT NULL,
- `version_list` varchar(2000) DEFAULT NULL,
- `version_info` varchar(500) DEFAULT NULL,
- PRIMARY KEY (`id`)
-) ENGINE=MyISAM AUTO_INCREMENT=6 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoint_templates`
---
-
-LOCK TABLES `endpoint_templates` WRITE;
-/*!40000 ALTER TABLE `endpoint_templates` DISABLE KEYS */;
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne','http://4.2.2.1:8774/v1.1/%tenant_id%',1,'http://4.2.2.1:8774/v1.1/%tenant_id%',1,1,'http://4.2.2.1:8774/v1.1/%tenant_id%',NULL,NULL,NULL),(1,'RegionOne','http://4.2.2.1:8773/services/Cloud',1,'http://4.2.2.1:8773/services/Cloud',2,2,'http://4.2.2.1:8773/services/Admin',NULL,NULL,NULL),(1,'RegionOne','http://4.2.2.1:9292/v1',1,'http://4.2.2.1:9292/v1',3,3,'http://4.2.2.1:9292/v1',NULL,NULL,NULL),(1,'RegionOne','http://4.2.2.1:5000/v2.0',1,'http://4.2.2.1:5000/v2.0',4,4,'http://4.2.2.1:35357/v2.0',NULL,NULL,NULL),(1,'RegionOne','http://4.2.2.1:8080/v1/AUTH_%tenant_id%',1,'http://4.2.2.1:8080/v1/AUTH_%tenant_id%',5,5,'http://4.2.2.1:8080/',NULL,NULL,NULL);
-/*!40000 ALTER TABLE `endpoint_templates` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `endpoints`
---
-
-DROP TABLE IF EXISTS `endpoints`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoints` (
- `endpoint_template_id` int(11) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- PRIMARY KEY (`id`),
- UNIQUE KEY `endpoint_template_id` (`endpoint_template_id`,`tenant_id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoints`
---
-
-LOCK TABLES `endpoints` WRITE;
-/*!40000 ALTER TABLE `endpoints` DISABLE KEYS */;
-/*!40000 ALTER TABLE `endpoints` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `migrate_version`
---
-
-DROP TABLE IF EXISTS `migrate_version`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `migrate_version` (
- `repository_id` varchar(250) NOT NULL,
- `repository_path` text,
- `version` int(11) DEFAULT NULL,
- PRIMARY KEY (`repository_id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `migrate_version`
---
-
-LOCK TABLES `migrate_version` WRITE;
-/*!40000 ALTER TABLE `migrate_version` DISABLE KEYS */;
-INSERT INTO `migrate_version` VALUES ('Keystone','/opt/stack/keystone/keystone/backends/sqlalchemy/migrate_repo',11);
-/*!40000 ALTER TABLE `migrate_version` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `roles`
---
-
-DROP TABLE IF EXISTS `roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `roles` (
- `service_id` int(11) DEFAULT NULL,
- `desc` varchar(255) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`,`service_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=7 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `roles`
---
-
-LOCK TABLES `roles` WRITE;
-/*!40000 ALTER TABLE `roles` DISABLE KEYS */;
-INSERT INTO `roles` VALUES (NULL,NULL,1,'admin'),(NULL,NULL,2,'Member'),(NULL,NULL,3,'KeystoneAdmin'),(NULL,NULL,4,'KeystoneServiceAdmin'),(NULL,NULL,5,'sysadmin'),(NULL,NULL,6,'netadmin');
-/*!40000 ALTER TABLE `roles` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `services`
---
-
-DROP TABLE IF EXISTS `services`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `services` (
- `desc` varchar(255) DEFAULT NULL,
- `type` varchar(255) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `owner_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `name` (`name`),
- UNIQUE KEY `name_2` (`name`)
-) ENGINE=MyISAM AUTO_INCREMENT=6 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `services`
---
-
-LOCK TABLES `services` WRITE;
-/*!40000 ALTER TABLE `services` DISABLE KEYS */;
-INSERT INTO `services` VALUES ('Nova Compute Service','compute',1,'nova',NULL),('EC2 Compatibility Layer','ec2',2,'ec2',NULL),('Glance Image Service','image',3,'glance',NULL),('Keystone Identity Service','identity',4,'keystone',NULL),('Swift Service','object-store',5,'swift',NULL);
-/*!40000 ALTER TABLE `services` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `tenants`
---
-
-DROP TABLE IF EXISTS `tenants`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tenants` (
- `desc` varchar(255) DEFAULT NULL,
- `enabled` tinyint(1) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `name` varchar(255) DEFAULT NULL,
- `uid` varchar(255) NOT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `tenants_uid_key` (`uid`),
- UNIQUE KEY `name` (`name`),
- UNIQUE KEY `name_2` (`name`)
-) ENGINE=MyISAM AUTO_INCREMENT=4 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tenants`
---
-
-LOCK TABLES `tenants` WRITE;
-/*!40000 ALTER TABLE `tenants` DISABLE KEYS */;
-INSERT INTO `tenants` VALUES (NULL,1,1,'admin','182c1fbf7eef44eda162ff3fd30c0a76'),(NULL,1,2,'demo','b1a7ea3a884f4d0685a98cd6e682a5da'),(NULL,1,3,'invisible_to_admin','f4d1eed9bb5d4d35a5f37af934f87574');
-/*!40000 ALTER TABLE `tenants` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `tokens`
---
-
-DROP TABLE IF EXISTS `tokens`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tokens` (
- `tenant_id` int(11) DEFAULT NULL,
- `expires` datetime DEFAULT NULL,
- `user_id` int(11) DEFAULT NULL,
- `id` varchar(255) NOT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `id` (`id`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tokens`
---
-
-LOCK TABLES `tokens` WRITE;
-/*!40000 ALTER TABLE `tokens` DISABLE KEYS */;
-INSERT INTO `tokens` VALUES (1,'2015-02-05 00:00:00',1,'123123123123123123123');
-/*!40000 ALTER TABLE `tokens` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `user_roles`
---
-
-DROP TABLE IF EXISTS `user_roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `user_roles` (
- `tenant_id` int(11) DEFAULT NULL,
- `user_id` int(11) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `role_id` int(11) DEFAULT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `user_id` (`user_id`,`role_id`,`tenant_id`)
-) ENGINE=MyISAM AUTO_INCREMENT=10 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `user_roles`
---
-
-LOCK TABLES `user_roles` WRITE;
-/*!40000 ALTER TABLE `user_roles` DISABLE KEYS */;
-INSERT INTO `user_roles` VALUES (1,1,1,1),(2,2,2,2),(2,2,3,5),(2,2,4,6),(3,2,5,2),(2,1,6,1),(NULL,1,7,1),(NULL,1,8,3),(NULL,1,9,4);
-/*!40000 ALTER TABLE `user_roles` ENABLE KEYS */;
-UNLOCK TABLES;
-
---
--- Table structure for table `users`
---
-
-DROP TABLE IF EXISTS `users`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `users` (
- `name` varchar(255) DEFAULT NULL,
- `tenant_id` int(11) DEFAULT NULL,
- `enabled` tinyint(1) DEFAULT NULL,
- `id` int(11) NOT NULL AUTO_INCREMENT,
- `password` varchar(255) DEFAULT NULL,
- `email` varchar(255) DEFAULT NULL,
- `uid` varchar(255) NOT NULL,
- PRIMARY KEY (`id`),
- UNIQUE KEY `users_uid_key` (`uid`),
- UNIQUE KEY `name` (`name`),
- UNIQUE KEY `name_2` (`name`)
-) ENGINE=MyISAM AUTO_INCREMENT=3 DEFAULT CHARSET=latin1;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `users`
---
-
-LOCK TABLES `users` WRITE;
-/*!40000 ALTER TABLE `users` DISABLE KEYS */;
-INSERT INTO `users` VALUES ('admin',NULL,1,1,'$6$rounds=40000$hFXlgBSMi599197d$tmGKBpoGHNRsLB3ruK9f1wPvvtfWWuMEUzdqUAynsmmYXBK6eekyNHTzzhwXTM3mWpnaMHCI4mHPOycqmPJJc0',NULL,'c93b19ea3fa94484824213db8ac0afce'),('demo',NULL,1,2,'$6$rounds=40000$RBsX2ja9fdj2uTNQ$/wJOn510AYKW9BPFAJneVQAjm6TM0Ty11LG.u4.k4RhmoUcXNSjGKmQT6KO0SsvypMM7A.doWgt73V5rNnv5h.',NULL,'04c6697e88ff4667820903fcce05d904');
-/*!40000 ALTER TABLE `users` ENABLE KEYS */;
-UNLOCK TABLES;
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-02-13 19:23:51
diff --git a/keystone/tests/legacy_essex.sqlite b/keystone/tests/legacy_essex.sqlite
deleted file mode 100644
index 72326d768..000000000
--- a/keystone/tests/legacy_essex.sqlite
+++ /dev/null
@@ -1,313 +0,0 @@
-begin;
--- MySQL dump 10.13 Distrib 5.1.58, for debian-linux-gnu (x86_64)
---
--- Host: localhost Database: keystone
--- ------------------------------------------------------
--- Server version 5.1.58-1ubuntu1
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
-/*!40103 SET TIME_ZONE='+00:00' */;
-/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
-/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
-/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
-/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
-
---
--- Table structure for table `credentials`
---
-
-DROP TABLE IF EXISTS `credentials`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `credentials` (
- `user_id` integer NULL,
- `tenant_id` integer NULL,
- `secret` varchar(255) NULL,
- `key` varchar(255) NULL,
- `type` varchar(20) NULL,
- `id` integer NOT NULL primary key autoincrement
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `credentials`
---
-
-
-/*!40000 ALTER TABLE `credentials` DISABLE KEYS */;
-/*!40000 ALTER TABLE `credentials` ENABLE KEYS */;
-
-
---
--- Table structure for table `endpoint_templates`
---
-
-DROP TABLE IF EXISTS `endpoint_templates`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoint_templates` (
- `is_global` integer NULL,
- `region` varchar(255) NULL,
- `public_url` varchar(2000) NULL,
- `enabled` integer NULL,
- `internal_url` varchar(2000) NULL,
- `id` integer NOT NULL primary key autoincrement,
- `service_id` integer NULL,
- `admin_url` varchar(2000) NULL,
- `version_id` varchar(20) NULL,
- `version_list` varchar(2000) NULL,
- `version_info` varchar(500) NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoint_templates`
---
-
-
-/*!40000 ALTER TABLE `endpoint_templates` DISABLE KEYS */;
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne','http://4.2.2.1:8774/v1.1/%tenant_id%',1,'http://4.2.2.1:8774/v1.1/%tenant_id%',1,1,'http://4.2.2.1:8774/v1.1/%tenant_id%',NULL,NULL,NULL);
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne','http://4.2.2.1:8773/services/Cloud',1,'http://4.2.2.1:8773/services/Cloud',2,2,'http://4.2.2.1:8773/services/Admin',NULL,NULL,NULL);
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne','http://4.2.2.1:9292/v1',1,'http://4.2.2.1:9292/v1',3,3,'http://4.2.2.1:9292/v1',NULL,NULL,NULL);
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne','http://4.2.2.1:5000/v2.0',1,'http://4.2.2.1:5000/v2.0',4,4,'http://4.2.2.1:35357/v2.0',NULL,NULL,NULL);
-INSERT INTO `endpoint_templates` VALUES (1,'RegionOne','http://4.2.2.1:8080/v1/AUTH_%tenant_id%',1,'http://4.2.2.1:8080/v1/AUTH_%tenant_id%',5,5,'http://4.2.2.1:8080/',NULL,NULL,NULL);
-/*!40000 ALTER TABLE `endpoint_templates` ENABLE KEYS */;
-
-
---
--- Table structure for table `endpoints`
---
-
-DROP TABLE IF EXISTS `endpoints`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `endpoints` (
- `endpoint_template_id` integer NULL,
- `tenant_id` integer NULL,
- `id` integer NOT NULL primary key autoincrement
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `endpoints`
---
-
-
-/*!40000 ALTER TABLE `endpoints` DISABLE KEYS */;
-/*!40000 ALTER TABLE `endpoints` ENABLE KEYS */;
-
-
---
--- Table structure for table `migrate_version`
---
-
-DROP TABLE IF EXISTS `migrate_version`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `migrate_version` (
- `repository_id` varchar(250) NOT NULL,
- `repository_path` text,
- `version` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `migrate_version`
---
-
-
-/*!40000 ALTER TABLE `migrate_version` DISABLE KEYS */;
-INSERT INTO `migrate_version` VALUES ('Keystone','/opt/stack/keystone/keystone/backends/sqlalchemy/migrate_repo',11);
-/*!40000 ALTER TABLE `migrate_version` ENABLE KEYS */;
-
-
---
--- Table structure for table `roles`
---
-
-DROP TABLE IF EXISTS `roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `roles` (
- `service_id` integer NULL,
- `desc` varchar(255) NULL,
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `roles`
---
-
-
-/*!40000 ALTER TABLE `roles` DISABLE KEYS */;
-INSERT INTO `roles` VALUES (NULL,NULL,1,'admin');
-INSERT INTO `roles` VALUES (NULL,NULL,2,'Member');
-INSERT INTO `roles` VALUES (NULL,NULL,3,'KeystoneAdmin');
-INSERT INTO `roles` VALUES (NULL,NULL,4,'KeystoneServiceAdmin');
-INSERT INTO `roles` VALUES (NULL,NULL,5,'sysadmin');
-INSERT INTO `roles` VALUES (NULL,NULL,6,'netadmin');
-/*!40000 ALTER TABLE `roles` ENABLE KEYS */;
-
-
---
--- Table structure for table `services`
---
-
-DROP TABLE IF EXISTS `services`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `services` (
- `desc` varchar(255) NULL,
- `type` varchar(255) NULL,
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `owner_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `services`
---
-
-
-/*!40000 ALTER TABLE `services` DISABLE KEYS */;
-INSERT INTO `services` VALUES ('Nova Compute Service','compute',1,'nova',NULL);
-INSERT INTO `services` VALUES ('EC2 Compatibility Layer','ec2',2,'ec2',NULL);
-INSERT INTO `services` VALUES ('Glance Image Service','image',3,'glance',NULL);
-INSERT INTO `services` VALUES ('Keystone Identity Service','identity',4,'keystone',NULL);
-INSERT INTO `services` VALUES ('Swift Service','object-store',5,'swift',NULL);
-/*!40000 ALTER TABLE `services` ENABLE KEYS */;
-
-
---
--- Table structure for table `tenants`
---
-
-DROP TABLE IF EXISTS `tenants`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tenants` (
- `desc` varchar(255) NULL,
- `enabled` integer NULL,
- `id` integer NOT NULL primary key autoincrement,
- `name` varchar(255) NULL,
- `uid` varchar(255) NOT NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tenants`
---
-
-
-/*!40000 ALTER TABLE `tenants` DISABLE KEYS */;
-INSERT INTO `tenants` VALUES (NULL,1,1,'admin','182c1fbf7eef44eda162ff3fd30c0a76');
-INSERT INTO `tenants` VALUES (NULL,1,2,'demo','b1a7ea3a884f4d0685a98cd6e682a5da');
-INSERT INTO `tenants` VALUES (NULL,1,3,'invisible_to_admin','f4d1eed9bb5d4d35a5f37af934f87574');
-/*!40000 ALTER TABLE `tenants` ENABLE KEYS */;
-
-
---
--- Table structure for table `tokens`
---
-
-DROP TABLE IF EXISTS `tokens`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `tokens` (
- `tenant_id` integer NULL,
- `expires` datetime NULL,
- `user_id` integer NULL,
- `id` varchar(255) NOT NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `tokens`
---
-
-
-/*!40000 ALTER TABLE `tokens` DISABLE KEYS */;
-INSERT INTO `tokens` VALUES (1,'2015-02-05 00:00:00',1,'123123123123123123123');
-/*!40000 ALTER TABLE `tokens` ENABLE KEYS */;
-
-
---
--- Table structure for table `user_roles`
---
-
-DROP TABLE IF EXISTS `user_roles`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `user_roles` (
- `tenant_id` integer NULL,
- `user_id` integer NULL,
- `id` integer NOT NULL primary key autoincrement,
- `role_id` integer NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `user_roles`
---
-
-
-/*!40000 ALTER TABLE `user_roles` DISABLE KEYS */;
-INSERT INTO `user_roles` VALUES (1,1,1,1);
-INSERT INTO `user_roles` VALUES (2,2,2,2);
-INSERT INTO `user_roles` VALUES (2,2,3,5);
-INSERT INTO `user_roles` VALUES (2,2,4,6);
-INSERT INTO `user_roles` VALUES (3,2,5,2);
-INSERT INTO `user_roles` VALUES (2,1,6,1);
-INSERT INTO `user_roles` VALUES (NULL,1,7,1);
-INSERT INTO `user_roles` VALUES (NULL,1,8,3);
-INSERT INTO `user_roles` VALUES (NULL,1,9,4);
-/*!40000 ALTER TABLE `user_roles` ENABLE KEYS */;
-
-
---
--- Table structure for table `users`
---
-
-DROP TABLE IF EXISTS `users`;
-/*!40101 SET @saved_cs_client = @@character_set_client */;
-/*!40101 SET character_set_client = utf8 */;
-CREATE TABLE `users` (
- `name` varchar(255) NULL,
- `tenant_id` integer NULL,
- `enabled` integer NULL,
- `id` integer NOT NULL primary key autoincrement,
- `password` varchar(255) NULL,
- `email` varchar(255) NULL,
- `uid` varchar(255) NOT NULL
-) ;
-/*!40101 SET character_set_client = @saved_cs_client */;
-
---
--- Dumping data for table `users`
---
-
-
-/*!40000 ALTER TABLE `users` DISABLE KEYS */;
-INSERT INTO `users` VALUES ('admin',NULL,1,1,'$6$rounds=40000$hFXlgBSMi599197d$tmGKBpoGHNRsLB3ruK9f1wPvvtfWWuMEUzdqUAynsmmYXBK6eekyNHTzzhwXTM3mWpnaMHCI4mHPOycqmPJJc0',NULL,'c93b19ea3fa94484824213db8ac0afce');
-INSERT INTO `users` VALUES ('demo',NULL,1,2,'$6$rounds=40000$RBsX2ja9fdj2uTNQ$/wJOn510AYKW9BPFAJneVQAjm6TM0Ty11LG.u4.k4RhmoUcXNSjGKmQT6KO0SsvypMM7A.doWgt73V5rNnv5h.',NULL,'04c6697e88ff4667820903fcce05d904');
-/*!40000 ALTER TABLE `users` ENABLE KEYS */;
-
-/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
-
-/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
-/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
-/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
-/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
-
--- Dump completed on 2012-02-13 19:23:51
-commit;
diff --git a/keystone/tests/mapping_fixtures.py b/keystone/tests/mapping_fixtures.py
index fca05036d..0a343b1ca 100644
--- a/keystone/tests/mapping_fixtures.py
+++ b/keystone/tests/mapping_fixtures.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,13 +10,23 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""Fixtures for Federation Mapping."""
+
+EMPLOYEE_GROUP_ID = "0cd5e9"
+CONTRACTOR_GROUP_ID = "85a868"
+TESTER_GROUP_ID = "123"
+DEVELOPER_GROUP_ID = "xyz"
+
+# Mapping summary:
+# LastName Smith & Not Contractor or SubContractor -> group 0cd5e9
+# FirstName Jill & Contractor or SubContractor -> to group 85a868
MAPPING_SMALL = {
"rules": [
{
"local": [
{
"group": {
- "id": "0cd5e9"
+ "id": EMPLOYEE_GROUP_ID
}
}
],
@@ -30,6 +37,12 @@ MAPPING_SMALL = {
"Contractor",
"SubContractor"
]
+ },
+ {
+ "type": "LastName",
+ "any_one_of": [
+ "Bo"
+ ]
}
]
},
@@ -37,7 +50,7 @@ MAPPING_SMALL = {
"local": [
{
"group": {
- "id": "85a868"
+ "id": CONTRACTOR_GROUP_ID
}
}
],
@@ -48,20 +61,33 @@ MAPPING_SMALL = {
"Contractor",
"SubContractor"
]
+ },
+ {
+ "type": "FirstName",
+ "any_one_of": [
+ "Jill"
+ ]
}
]
}
]
}
+# Mapping summary:
+# orgPersonType Admin or Big Cheese -> name {0} {1} email {2} and group 0cd5e9
+# orgPersonType Customer -> user name {0} email {1}
+# orgPersonType Test and email ^@example.com$ -> group 123 and xyz
MAPPING_LARGE = {
"rules": [
{
"local": [
{
"user": {
- "name": "$0 $1",
- "email": "$2"
+ "name": "{0} {1}",
+ "email": "{2}"
+ },
+ "group": {
+ "id": EMPLOYEE_GROUP_ID
}
}
],
@@ -76,10 +102,10 @@ MAPPING_LARGE = {
"type": "Email"
},
{
- "type": "Group",
+ "type": "orgPersonType",
"any_one_of": [
"Admin",
- "God"
+ "Big Cheese"
]
}
]
@@ -88,8 +114,8 @@ MAPPING_LARGE = {
"local": [
{
"user": {
- "name": "$0",
- "email": "$1"
+ "name": "{0}",
+ "email": "{1}"
}
}
],
@@ -101,9 +127,12 @@ MAPPING_LARGE = {
"type": "Email"
},
{
- "type": "Group",
- "any_one_of": [
- "Customer"
+ "type": "orgPersonType",
+ "not_any_of": [
+ "Admin",
+ "Employee",
+ "Contractor",
+ "Tester"
]
}
]
@@ -112,26 +141,26 @@ MAPPING_LARGE = {
"local": [
{
"group": {
- "id": "123"
+ "id": TESTER_GROUP_ID
}
},
{
"group": {
- "id": "xyz"
+ "id": DEVELOPER_GROUP_ID
}
}
],
"remote": [
{
- "type": "Group",
+ "type": "orgPersonType",
"any_one_of": [
- "Special"
+ "Tester"
]
},
{
"type": "Email",
"any_one_of": [
- "^@example.com$"
+ ".*@example.com$"
],
"regex": True
}
@@ -214,7 +243,7 @@ MAPPING_WRONG_TYPE = {
{
"local": [
{
- "user": "$1"
+ "user": "{1}"
}
],
"remote": [
@@ -231,7 +260,7 @@ MAPPING_MISSING_TYPE = {
{
"local": [
{
- "user": "$1"
+ "user": "{1}"
}
],
"remote": [
@@ -331,3 +360,51 @@ MAPPING_EXTRA_RULES_PROPS = {
}
]
}
+
+EMPLOYEE_ASSERTION = {
+ 'Email': 'tim@example.com',
+ 'UserName': 'tbo',
+ 'FirstName': 'Tim',
+ 'LastName': 'Bo',
+ 'orgPersonType': 'Employee;BuildingX;'
+}
+
+CONTRACTOR_ASSERTION = {
+ 'Email': 'jill@example.com',
+ 'UserName': 'jsmith',
+ 'FirstName': 'Jill',
+ 'LastName': 'Smith',
+ 'orgPersonType': 'Contractor;Non-Dev;'
+}
+
+ADMIN_ASSERTION = {
+ 'Email': 'bob@example.com',
+ 'UserName': 'bob',
+ 'FirstName': 'Bob',
+ 'LastName': 'Thompson',
+ 'orgPersonType': 'Admin;Chief;'
+}
+
+CUSTOMER_ASSERTION = {
+ 'Email': 'beth@example.com',
+ 'UserName': 'bwilliams',
+ 'FirstName': 'Beth',
+ 'LastName': 'Williams',
+ 'orgPersonType': 'Customer;'
+}
+
+TESTER_ASSERTION = {
+ 'Email': 'testacct@example.com',
+ 'UserName': 'testacct',
+ 'FirstName': 'Test',
+ 'LastName': 'Account',
+ 'orgPersonType': 'Tester;'
+}
+
+BAD_TESTER_ASSERTION = {
+ 'Email': 'eviltester@example.org',
+ 'UserName': 'Evil',
+ 'FirstName': 'Test',
+ 'LastName': 'Account',
+ 'orgPersonType': 'Tester;'
+}
diff --git a/keystone/tests/matchers.py b/keystone/tests/matchers.py
index a0a0b7999..f904d339c 100644
--- a/keystone/tests/matchers.py
+++ b/keystone/tests/matchers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/rest.py b/keystone/tests/rest.py
index 42010ca19..caedcee0d 100644
--- a/keystone/tests/rest.py
+++ b/keystone/tests/rest.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_associate_project_endpoint_extension.py b/keystone/tests/test_associate_project_endpoint_extension.py
index 76c1a2dcc..c5d6c5152 100644
--- a/keystone/tests/test_associate_project_endpoint_extension.py
+++ b/keystone/tests/test_associate_project_endpoint_extension.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,11 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
import uuid
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone import contrib
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common import importutils
from keystone import tests
from keystone.tests import test_v3
@@ -33,23 +31,20 @@ class TestExtensionCase(test_v3.RestfulTestCase):
self.conf_files = super(TestExtensionCase, self).config_files()
self.conf_files.append(
tests.dirs.tests('test_associate_project_endpoint_extension.conf'))
- self.addCleanup(self.conf_files.pop)
super(TestExtensionCase, self).setup_database()
- package_name = "%s.%s.migrate_repo" % (contrib.__name__,
- self.EXTENSION_NAME)
+ package_name = '.'.join((contrib.__name__, self.EXTENSION_NAME))
package = importutils.import_module(package_name)
- self.repo_path = os.path.abspath(
- os.path.dirname(package.__file__))
- migration.db_version_control(version=None, repo_path=self.repo_path)
- migration.db_sync(version=None, repo_path=self.repo_path)
+ abs_path = migration_helpers.find_migrate_repo(package)
+ migration.db_version_control(abs_path)
+ migration.db_sync(abs_path)
def setUp(self):
super(TestExtensionCase, self).setUp()
self.default_request_url = (
'/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.default_domain_project_id,
- 'endpoint_id': self.endpoint_id})
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': self.endpoint_id})
class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
@@ -75,8 +70,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': uuid.uuid4().hex,
- 'endpoint_id': self.endpoint_id},
+ 'project_id': uuid.uuid4().hex,
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
@@ -88,8 +83,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
"""
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.default_domain_project_id,
- 'endpoint_id': uuid.uuid4().hex},
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
@@ -115,8 +110,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
expected_status=204)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.default_domain_project_id,
- 'endpoint_id': self.endpoint_id},
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': self.endpoint_id},
expected_status=204)
def test_check_endpoint_project_assoc_noproj(self):
@@ -128,8 +123,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': uuid.uuid4().hex,
- 'endpoint_id': self.endpoint_id},
+ 'project_id': uuid.uuid4().hex,
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
@@ -142,8 +137,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
self.put(self.default_request_url)
self.head('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.default_domain_project_id,
- 'endpoint_id': uuid.uuid4().hex},
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
@@ -206,8 +201,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.default_domain_project_id,
- 'endpoint_id': self.endpoint_id},
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': self.endpoint_id},
expected_status=204)
def test_remove_endpoint_project_assoc_noproj(self):
@@ -219,8 +214,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': uuid.uuid4().hex,
- 'endpoint_id': self.endpoint_id},
+ 'project_id': uuid.uuid4().hex,
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=404)
@@ -233,8 +228,8 @@ class AssociateEndpointProjectFilterCRUDTestCase(TestExtensionCase):
self.put(self.default_request_url)
self.delete('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.default_domain_project_id,
- 'endpoint_id': uuid.uuid4().hex},
+ 'project_id': self.default_domain_project_id,
+ 'endpoint_id': uuid.uuid4().hex},
body='',
expected_status=404)
@@ -265,8 +260,8 @@ class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': project['id'],
- 'endpoint_id': self.endpoint_id},
+ 'project_id': project['id'],
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
@@ -288,8 +283,8 @@ class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
+ 'project_id': self.project['id'],
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
@@ -329,8 +324,8 @@ class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
# add one endpoint to the project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': project['id'],
- 'endpoint_id': self.endpoint_id},
+ 'project_id': project['id'],
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
@@ -352,8 +347,8 @@ class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
# add one endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
+ 'project_id': self.project['id'],
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
@@ -422,8 +417,8 @@ class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
# add first endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id},
+ 'project_id': self.project['id'],
+ 'endpoint_id': self.endpoint_id},
body='',
expected_status=204)
@@ -438,8 +433,8 @@ class AssociateProjectEndpointFilterTokenRequestTestCase(TestExtensionCase):
# add second endpoint to default project
self.put('/OS-EP-FILTER/projects/%(project_id)s'
'/endpoints/%(endpoint_id)s' % {
- 'project_id': self.project['id'],
- 'endpoint_id': self.endpoint_id2},
+ 'project_id': self.project['id'],
+ 'endpoint_id': self.endpoint_id2},
body='',
expected_status=204)
diff --git a/keystone/tests/test_auth.py b/keystone/tests/test_auth.py
index c3ecb80ed..76561c166 100644
--- a/keystone/tests/test_auth.py
+++ b/keystone/tests/test_auth.py
@@ -205,8 +205,7 @@ class AuthWithToken(AuthTest):
body_dict = _build_user_auth(username='FOO',
password='foo2')
unscoped_token = self.controller.authenticate({}, body_dict)
- tenant = unscoped_token["access"]["token"].get("tenant", None)
- self.assertEqual(tenant, None)
+ self.assertNotIn('tenant', unscoped_token['access']['token'])
def test_auth_invalid_token(self):
"""Verify exception is raised if invalid token."""
@@ -257,8 +256,8 @@ class AuthWithToken(AuthTest):
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
- self.assertEqual(tenant["id"], self.tenant_bar['id'])
- self.assertEqual(roles[0], self.role_member['id'])
+ self.assertEqual(self.tenant_bar['id'], tenant["id"])
+ self.assertEqual(self.role_member['id'], roles[0])
def test_auth_token_project_group_role(self):
"""Verify getting a token in a tenant with group roles."""
@@ -290,7 +289,7 @@ class AuthWithToken(AuthTest):
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
- self.assertEqual(tenant["id"], self.tenant_bar['id'])
+ self.assertEqual(self.tenant_bar['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
@@ -343,7 +342,7 @@ class AuthWithToken(AuthTest):
scoped_token = self.controller.authenticate({}, body_dict)
tenant = scoped_token["access"]["token"]["tenant"]
roles = scoped_token["access"]["metadata"]["roles"]
- self.assertEqual(tenant["id"], project1['id'])
+ self.assertEqual(project1['id'], tenant["id"])
self.assertIn(self.role_member['id'], roles)
self.assertIn(self.role_admin['id'], roles)
self.assertNotIn(role_foo_domain1['id'], roles)
@@ -394,7 +393,7 @@ class AuthWithToken(AuthTest):
# the token should have bind information in it
bind = unscoped_token['access']['token']['bind']
- self.assertEqual(bind['kerberos'], 'FOO')
+ self.assertEqual('FOO', bind['kerberos'])
body_dict = _build_user_auth(
token=unscoped_token['access']['token'],
@@ -412,7 +411,7 @@ class AuthWithToken(AuthTest):
# the bind information should be carried over from the original token
bind = scoped_token['access']['token']['bind']
- self.assertEqual(bind['kerberos'], 'FOO')
+ self.assertEqual('FOO', bind['kerberos'])
def test_deleting_role_revokes_token(self):
role_controller = assignment.controllers.Role()
@@ -627,7 +626,7 @@ class AuthWithRemoteUser(AuthTest):
body_dict = _build_user_auth(tenant_name="BAR")
token = self.controller.authenticate(self.context_with_remote_user,
body_dict)
- self.assertEqual(token['access']['token']['bind']['kerberos'], 'FOO')
+ self.assertEqual('FOO', token['access']['token']['bind']['kerberos'])
def test_bind_without_config_opt(self):
self.opt_in_group('token', bind=['x509'])
@@ -707,11 +706,11 @@ class AuthWithTrust(AuthTest):
self.new_trust = None
self.sample_data['roles'] = []
self.create_trust()
- self.assertEqual(self.new_trust['roles'], [])
+ self.assertEqual([], self.new_trust['roles'])
def test_create_trust(self):
- self.assertEqual(self.new_trust['trustor_user_id'], self.trustor['id'])
- self.assertEqual(self.new_trust['trustee_user_id'], self.trustee['id'])
+ self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
+ self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
self.assertTrue(timeutils.parse_strtime(self.new_trust['expires_at'],
fmt=TIME_FORMAT))
@@ -738,16 +737,16 @@ class AuthWithTrust(AuthTest):
context = {'token_id': self.unscoped_token['access']['token']['id']}
trust = self.trust_controller.get_trust(context,
self.new_trust['id'])['trust']
- self.assertEqual(trust['trustor_user_id'], self.trustor['id'])
- self.assertEqual(trust['trustee_user_id'], self.trustee['id'])
+ self.assertEqual(self.trustor['id'], trust['trustor_user_id'])
+ self.assertEqual(self.trustee['id'], trust['trustee_user_id'])
role_ids = [self.role_browser['id'], self.role_member['id']]
for role in self.new_trust['roles']:
self.assertIn(role['id'], role_ids)
def test_create_trust_no_impersonation(self):
self.create_trust(expires_at=None, impersonation=False)
- self.assertEqual(self.new_trust['trustor_user_id'], self.trustor['id'])
- self.assertEqual(self.new_trust['trustee_user_id'], self.trustee['id'])
+ self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
+ self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], False)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
@@ -757,8 +756,8 @@ class AuthWithTrust(AuthTest):
def test_create_trust_impersonation(self):
self.create_trust(expires_at=None)
- self.assertEqual(self.new_trust['trustor_user_id'], self.trustor['id'])
- self.assertEqual(self.new_trust['trustee_user_id'], self.trustee['id'])
+ self.assertEqual(self.trustor['id'], self.new_trust['trustor_user_id'])
+ self.assertEqual(self.trustee['id'], self.new_trust['trustee_user_id'])
self.assertIs(self.new_trust['impersonation'], True)
auth_response = self.fetch_v2_token_from_trust()
token_user = auth_response['access']['user']
@@ -809,17 +808,17 @@ class AuthWithTrust(AuthTest):
auth_response = self.fetch_v3_token_from_trust()
trust_token_user = auth_response.json['token']['user']
- self.assertEqual(trust_token_user['id'], self.trustor['id'])
+ self.assertEqual(self.trustor['id'], trust_token_user['id'])
trust_token_trust = auth_response.json['token']['OS-TRUST:trust']
self.assertEqual(trust_token_trust['id'], self.new_trust['id'])
- self.assertEqual(trust_token_trust['trustor_user']['id'],
- self.trustor['id'])
- self.assertEqual(trust_token_trust['trustee_user']['id'],
- self.trustee['id'])
+ self.assertEqual(self.trustor['id'],
+ trust_token_trust['trustor_user']['id'])
+ self.assertEqual(self.trustee['id'],
+ trust_token_trust['trustee_user']['id'])
trust_token_roles = auth_response.json['token']['roles']
- self.assertEqual(len(trust_token_roles), 2)
+ self.assertEqual(2, len(trust_token_roles))
def test_v3_trust_token_get_token_fails(self):
auth_response = self.fetch_v3_token_from_trust()
@@ -838,8 +837,8 @@ class AuthWithTrust(AuthTest):
auth_response = self.fetch_v2_token_from_trust()
self.assertIsNotNone(auth_response)
- self.assertEqual(len(auth_response['access']['metadata']['roles']),
- 2,
+ self.assertEqual(2,
+ len(auth_response['access']['metadata']['roles']),
"user_foo has three roles, but the token should"
" only get the two roles specified in the trust.")
@@ -847,7 +846,7 @@ class AuthWithTrust(AuthTest):
tokens = self.trust_controller.token_api._list_tokens(
self.trustee['id'], trust_id=self.new_trust['id'])
token_count = len(tokens)
- self.assertEqual(token_count, expected_value)
+ self.assertEqual(expected_value, token_count)
def test_delete_tokens_for_user_invalidates_tokens_from_trust(self):
self.assert_token_count_for_trust(0)
@@ -872,11 +871,11 @@ class AuthWithTrust(AuthTest):
trust_id = self.new_trust['id']
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
- self.assertEqual(len(tokens), 1)
+ self.assertEqual(1, len(tokens))
self.trust_controller.delete_trust(context, trust_id=trust_id)
tokens = self.token_api._list_tokens(self.trustor['id'],
trust_id=trust_id)
- self.assertEqual(len(tokens), 0)
+ self.assertEqual(0, len(tokens))
def test_token_from_trust_with_no_role_fails(self):
for assigned_role in self.assigned_roles:
diff --git a/keystone/tests/test_auth_plugin.py b/keystone/tests/test_auth_plugin.py
index 8001f3c02..d401cd7aa 100644
--- a/keystone/tests/test_auth_plugin.py
+++ b/keystone/tests/test_auth_plugin.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_backend.py b/keystone/tests/test_backend.py
index 27b00421d..94a431972 100644
--- a/keystone/tests/test_backend.py
+++ b/keystone/tests/test_backend.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -81,17 +79,20 @@ class IdentityTests(object):
def test_authenticate_bad_user(self):
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id=uuid.uuid4().hex,
password=self.user_foo['password'])
def test_authenticate_bad_password(self):
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id=self.user_foo['id'],
password=uuid.uuid4().hex)
def test_authenticate(self):
user_ref = self.identity_api.authenticate(
+ context={},
user_id=self.user_sna['id'],
password=self.user_sna['password'])
# NOTE(termie): the password field is left in user_sna to make
@@ -112,6 +113,7 @@ class IdentityTests(object):
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
user_ref = self.identity_api.authenticate(
+ context={},
user_id=user['id'],
password=user['password'])
self.assertNotIn('password', user_ref)
@@ -136,6 +138,7 @@ class IdentityTests(object):
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id=id_,
password='password')
@@ -723,12 +726,6 @@ class IdentityTests(object):
uuid.uuid4().hex)
def test_add_role_to_user_and_project_404(self):
- self.assertRaises(exception.UserNotFound,
- self.assignment_api.add_role_to_user_and_project,
- uuid.uuid4().hex,
- self.tenant_bar['id'],
- self.role_admin['id'])
-
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.add_role_to_user_and_project,
self.user_foo['id'],
@@ -741,6 +738,13 @@ class IdentityTests(object):
self.tenant_bar['id'],
uuid.uuid4().hex)
+ def test_add_role_to_user_and_project_no_user(self):
+ # If add_role_to_user_and_project and the user doesn't exist, then
+ # no error.
+ user_id_not_exist = uuid.uuid4().hex
+ self.assignment_api.add_role_to_user_and_project(
+ user_id_not_exist, self.tenant_bar['id'], self.role_admin['id'])
+
def test_remove_role_from_user_and_project(self):
self.assignment_api.add_role_to_user_and_project(
self.user_foo['id'], self.tenant_bar['id'], 'member')
@@ -1567,10 +1571,12 @@ class IdentityTests(object):
uuid.uuid4().hex,
self.user_foo['id'])
- self.assertRaises(exception.UserNotFound,
- self.assignment_api.add_user_to_project,
- self.tenant_bar['id'],
- uuid.uuid4().hex)
+ def test_add_user_to_project_no_user(self):
+ # If add_user_to_project and the user doesn't exist, then
+ # no error.
+ user_id_not_exist = uuid.uuid4().hex
+ self.assignment_api.add_user_to_project(self.tenant_bar['id'],
+ user_id_not_exist)
def test_remove_user_from_project(self):
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
@@ -1818,10 +1824,12 @@ class IdentityTests(object):
# with a password that is empty string or None
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id='fake1',
password='')
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id='fake1',
password=None)
@@ -1834,10 +1842,12 @@ class IdentityTests(object):
# with a password that is empty string or None
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id='fake1',
password='')
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id='fake1',
password=None)
@@ -2895,14 +2905,14 @@ class TokenTests(object):
data_ref = self.token_api.create_token(token_id, data)
expires = data_ref.pop('expires')
data_ref.pop('user_id')
- self.assertTrue(isinstance(expires, datetime.datetime))
+ self.assertIsInstance(expires, datetime.datetime)
data_ref.pop('id')
data.pop('id')
self.assertDictEqual(data_ref, data)
new_data_ref = self.token_api.get_token(token_id)
expires = new_data_ref.pop('expires')
- self.assertTrue(isinstance(expires, datetime.datetime))
+ self.assertIsInstance(expires, datetime.datetime)
new_data_ref.pop('user_id')
new_data_ref.pop('id')
@@ -3348,11 +3358,11 @@ class TrustTests(object):
trust_data = (self.trust_api.create_trust
(new_id,
{'trustor_user_id': self.trustor['id'],
- 'trustee_user_id': self.user_two['id'],
- 'project_id': self.tenant_bar['id'],
- 'expires_at': timeutils.
+ 'trustee_user_id': self.user_two['id'],
+ 'project_id': self.tenant_bar['id'],
+ 'expires_at': timeutils.
parse_isotime('2031-02-18T18:10:00Z'),
- 'impersonation': True},
+ 'impersonation': True},
roles=[{"id": "member"},
{"id": "other"},
{"id": "browser"}]))
@@ -3450,7 +3460,6 @@ class CatalogTests(object):
'description': uuid.uuid4().hex,
}
res = self.catalog_api.create_region(
- region_id,
new_region.copy())
# Ensure that we don't need to have a
# parent_region_id in the original supplied
@@ -3471,7 +3480,6 @@ class CatalogTests(object):
'parent_region_id': parent_region_id
}
self.catalog_api.create_region(
- region_id,
new_region.copy())
# list
@@ -3513,7 +3521,6 @@ class CatalogTests(object):
}
self.assertRaises(exception.RegionNotFound,
self.catalog_api.create_region,
- region_id,
new_region)
def test_service_crud(self):
@@ -4059,7 +4066,7 @@ class FilterTests(filtering.FilterTests):
# SQL backend tests). For production deployments. OpenStack
# assumes a case sensitive database. For these tests, therefore, we
# need to be able to check the sensitivity of the database so as to
- # know whether to run case senstive tests here.
+ # know whether to run case sensitive tests here.
self._delete_test_data('user', user_list)
@@ -4093,3 +4100,76 @@ class FilterTests(filtering.FilterTests):
groups = self.identity_api.list_groups()
self.assertTrue(len(groups) > 0)
+
+
+class LimitTests(filtering.FilterTests):
+ def setUp(self):
+ """Setup for Limit Test Cases."""
+
+ self.domain1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ self.assignment_api.create_domain(self.domain1['id'], self.domain1)
+ self.addCleanup(self.clean_up_domain)
+
+ self.entity_lists = {}
+ self.domain1_entity_lists = {}
+
+ for entity in ['user', 'group', 'project']:
+ # Create 20 entities, 14 of which are in domain1
+ self.entity_lists[entity] = self._create_test_data(entity, 6)
+ self.domain1_entity_lists[entity] = self._create_test_data(
+ entity, 14, self.domain1['id'])
+ # Make sure we clean up when finished
+ self.addCleanup(self.clean_up_entity, entity)
+
+ def clean_up_domain(self):
+ """Clean up domain test data from Limit Test Cases."""
+
+ self.domain1['enabled'] = False
+ self.assignment_api.update_domain(self.domain1['id'], self.domain1)
+ self.assignment_api.delete_domain(self.domain1['id'])
+
+ def clean_up_entity(self, entity):
+ """Clean up entity test data from Limit Test Cases."""
+
+ self._delete_test_data(entity, self.entity_lists[entity])
+ self._delete_test_data(entity, self.domain1_entity_lists[entity])
+
+ def _test_list_entity_filtered_and_limited(self, entity):
+ self.opt(list_limit=10)
+ # Should get back just 10 entities in domain1
+ hints = driver_hints.Hints()
+ hints.add_filter('domain_id', self.domain1['id'])
+ entities = self._list_entities(entity)(hints=hints)
+ self.assertEqual(len(entities), hints.get_limit()['limit'])
+ self.assertTrue(hints.get_limit()['truncated'])
+ self._match_with_list(entities, self.domain1_entity_lists[entity])
+
+ # Override with driver specific limit
+ if entity == 'project':
+ self.opt_in_group('assignment', list_limit=5)
+ else:
+ self.opt_in_group('identity', list_limit=5)
+
+ # Should get back just 5 users in domain1
+ hints = driver_hints.Hints()
+ hints.add_filter('domain_id', self.domain1['id'])
+ entities = self._list_entities(entity)(hints=hints)
+ self.assertEqual(len(entities), hints.get_limit()['limit'])
+ self._match_with_list(entities, self.domain1_entity_lists[entity])
+
+ # Finally, let's pretend we want to get the full list of entities,
+ # even with the limits set, as part of some internal calculation.
+ # Calling the API without a hints list should achieve this, and
+ # return at least the 20 entries we created (there may be other
+ # entities lying around created by other tests/setup).
+ entities = self._list_entities(entity)()
+ self.assertTrue(len(entities) >= 20)
+
+ def test_list_users_filtered_and_limited(self):
+ self._test_list_entity_filtered_and_limited('user')
+
+ def test_list_groups_filtered_and_limited(self):
+ self._test_list_entity_filtered_and_limited('group')
+
+ def test_list_projects_filtered_and_limited(self):
+ self._test_list_entity_filtered_and_limited('project')
diff --git a/keystone/tests/test_backend_federation_sql.py b/keystone/tests/test_backend_federation_sql.py
index 5c238e350..0ca2d962e 100644
--- a/keystone/tests/test_backend_federation_sql.py
+++ b/keystone/tests/test_backend_federation_sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_backend_kvs.py b/keystone/tests/test_backend_kvs.py
index 683a538bb..cf600fecb 100644
--- a/keystone/tests/test_backend_kvs.py
+++ b/keystone/tests/test_backend_kvs.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -64,18 +62,6 @@ class KvsIdentity(tests.TestCase, test_backend.IdentityTests):
def test_move_project_between_domains_with_clashing_names_fails(self):
self.skipTest('Blocked by bug 1119770')
- def test_delete_user_grant_no_user(self):
- # See bug 1239476, kvs checks if user exists and sql does not.
- self.assertRaises(
- exception.UserNotFound,
- super(KvsIdentity, self).test_delete_user_grant_no_user)
-
- def test_delete_group_grant_no_group(self):
- # See bug 1239476, kvs checks if group exists and sql does not.
- self.assertRaises(
- exception.GroupNotFound,
- super(KvsIdentity, self).test_delete_group_grant_no_group)
-
class KvsToken(tests.TestCase, test_backend.TokenTests):
def setUp(self):
diff --git a/keystone/tests/test_backend_ldap.py b/keystone/tests/test_backend_ldap.py
index 0d8bc6652..4ae555778 100644
--- a/keystone/tests/test_backend_ldap.py
+++ b/keystone/tests/test_backend_ldap.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
@@ -112,6 +110,16 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.identity_api.delete_user,
self.user_foo['id'])
+ def test_configurable_forbidden_create_existing_user(self):
+ conf = self.get_config(CONF.identity.default_domain_id)
+ conf.ldap.user_allow_create = False
+ self.reload_backends(CONF.identity.default_domain_id)
+
+ self.assertRaises(exception.ForbiddenAction,
+ self.identity_api.create_user,
+ self.user_foo['id'],
+ self.user_foo)
+
def test_user_filter(self):
user_ref = self.identity_api.get_user(self.user_foo['id'])
self.user_foo.pop('password')
@@ -439,6 +447,7 @@ class BaseLDAPIdentity(test_backend.IdentityTests):
self.assertRaises(AssertionError,
self.identity_api.authenticate,
+ context={},
user_id=user['id'],
password=None,
domain_scope=user['domain_id'])
@@ -531,6 +540,40 @@ class LDAPIdentity(tests.TestCase, BaseLDAPIdentity):
self.assignment_api.get_project,
'fake1')
+ def test_configurable_subtree_delete(self):
+ self.opt_in_group('ldap', allow_subtree_delete=True)
+ self.load_backends()
+
+ project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+ 'domain_id': CONF.identity.default_domain_id}
+ self.assignment_api.create_project(project1['id'], project1)
+
+ role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ self.assignment_api.create_role(role1['id'], role1)
+
+ user1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
+ 'domain_id': CONF.identity.default_domain_id,
+ 'password': uuid.uuid4().hex,
+ 'enabled': True}
+ self.identity_api.create_user(user1['id'], user1)
+
+ self.assignment_api.add_role_to_user_and_project(
+ user_id=user1['id'],
+ tenant_id=project1['id'],
+ role_id=role1['id'])
+
+ self.assignment_api.delete_project(project1['id'])
+ self.assertRaises(exception.ProjectNotFound,
+ self.assignment_api.get_project,
+ project1['id'])
+
+ self.assignment_api.create_project(project1['id'], project1)
+
+ list = self.assignment_api.get_roles_for_user_and_project(
+ user1['id'],
+ project1['id'])
+ self.assertEqual(len(list), 0)
+
def test_configurable_forbidden_project_actions(self):
CONF.ldap.tenant_allow_create = False
CONF.ldap.tenant_allow_update = False
@@ -1136,7 +1179,7 @@ class LDAPIdentityEnabledEmulation(LDAPIdentity):
"Enabled emulation conflicts with enabled mask")
-class LdapIdentitySqlAssignment(sql.Base, tests.TestCase, BaseLDAPIdentity):
+class LdapIdentitySqlAssignment(tests.TestCase, BaseLDAPIdentity):
def _set_config(self):
self.config([tests.dirs.etc('keystone.conf.sample'),
@@ -1199,7 +1242,7 @@ class LdapIdentitySqlAssignment(sql.Base, tests.TestCase, BaseLDAPIdentity):
self.skipTest('Blocked by bug 1221805')
-class MultiLDAPandSQLIdentity(sql.Base, tests.TestCase, BaseLDAPIdentity):
+class MultiLDAPandSQLIdentity(tests.TestCase, BaseLDAPIdentity):
"""Class to test common SQL plus individual LDAP backends.
We define a set of domains and domain-specific backends:
diff --git a/keystone/tests/test_backend_memcache.py b/keystone/tests/test_backend_memcache.py
deleted file mode 100644
index 9da8bbe4b..000000000
--- a/keystone/tests/test_backend_memcache.py
+++ /dev/null
@@ -1,238 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import datetime
-import uuid
-
-import memcache
-import six
-
-from keystone.common import utils
-from keystone import config
-from keystone import exception
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common import timeutils
-from keystone import tests
-from keystone.tests import test_backend
-from keystone.tests import test_utils
-from keystone import token
-from keystone.token.backends import memcache as token_memcache
-
-CONF = config.CONF
-
-
-class MemcacheClient(object):
- """Replicates a tiny subset of memcached client interface."""
-
- def __init__(self, *args, **kwargs):
- """Ignores the passed in args."""
- self.cache = {}
- self.reject_cas = False
-
- def add(self, key, value):
- if self.get(key):
- return False
- return self.set(key, value)
-
- def append(self, key, value):
- existing_value = self.get(key)
- if existing_value:
- self.set(key, existing_value + value)
- return True
- return False
-
- def check_key(self, key):
- if not isinstance(key, str):
- raise memcache.Client.MemcachedStringEncodingError()
-
- def gets(self, key):
- #Call self.get() since we don't really do 'cas' here.
- return self.get(key)
-
- def get(self, key):
- """Retrieves the value for a key or None."""
- self.check_key(key)
- obj = self.cache.get(key)
- now = utils.unixtime(timeutils.utcnow())
- if obj and (obj[1] == 0 or obj[1] > now):
- # NOTE(morganfainberg): This behaves more like memcache
- # actually does and prevents modification of the passed in
- # reference from affecting the cached back-end data. This makes
- # tests a little easier to write.
- #
- # The back-end store should only change with an explicit
- # set/delete/append/etc
- data_copy = copy.deepcopy(obj[0])
- return data_copy
-
- def set(self, key, value, time=0):
- """Sets the value for a key."""
- self.check_key(key)
- # NOTE(morganfainberg): This behaves more like memcache
- # actually does and prevents modification of the passed in
- # reference from affecting the cached back-end data. This makes
- # tests a little easier to write.
- #
- # The back-end store should only change with an explicit
- # set/delete/append/etc
- data_copy = copy.deepcopy(value)
- self.cache[key] = (data_copy, time)
- return True
-
- def cas(self, key, value, time=0, min_compress_len=0):
- # Call self.set() since we don't really do 'cas' here.
- if self.reject_cas:
- return False
- return self.set(key, value, time=time)
-
- def reset_cas(self):
- #This is a stub for the memcache client reset_cas function.
- pass
-
- def delete(self, key):
- self.check_key(key)
- try:
- del self.cache[key]
- except KeyError:
- #NOTE(bcwaldon): python-memcached always returns the same value
- pass
-
-
-class MemcacheToken(tests.TestCase, test_backend.TokenTests):
- def setUp(self):
- super(MemcacheToken, self).setUp()
- # Use the memcache backend for the token driver.
- self.opt_in_group('token',
- driver='keystone.token.backends.memcache.Token')
- self.load_backends()
- # Override the memcache client with the "dummy" client.
- fake_client = MemcacheClient()
- self.token_man = token.Manager()
- self.token_man.driver = token_memcache.Token(client=fake_client)
- self.token_api = self.token_man
-
- def test_create_unicode_token_id(self):
- token_id = six.text_type(self._create_token_id())
- data = {'id': token_id, 'a': 'b',
- 'user': {'id': 'testuserid'}}
- self.token_api.create_token(token_id, data)
- self.token_api.get_token(token_id)
-
- def test_create_unicode_user_id(self):
- token_id = self._create_token_id()
- user_id = six.text_type(uuid.uuid4().hex)
- data = {'id': token_id, 'a': 'b',
- 'user': {'id': user_id}}
- self.token_api.create_token(token_id, data)
- self.token_api.get_token(token_id)
-
- def test_list_tokens_unicode_user_id(self):
- user_id = six.text_type(uuid.uuid4().hex)
- self.token_api.list_tokens(user_id)
-
- def test_flush_expired_token(self):
- self.assertRaises(exception.NotImplemented,
- self.token_api.flush_expired_tokens)
-
- def test_cleanup_user_index_on_create(self):
- valid_token_id = uuid.uuid4().hex
- second_valid_token_id = uuid.uuid4().hex
- expired_token_id = uuid.uuid4().hex
- user_id = six.text_type(uuid.uuid4().hex)
-
- expire_delta = datetime.timedelta(seconds=CONF.token.expiration)
-
- valid_data = {'id': valid_token_id, 'a': 'b',
- 'user': {'id': user_id}}
- second_valid_data = {'id': second_valid_token_id, 'a': 'b',
- 'user': {'id': user_id}}
- expired_data = {'id': expired_token_id, 'a': 'b',
- 'user': {'id': user_id}}
- self.token_api.create_token(valid_token_id, valid_data)
- self.token_api.create_token(expired_token_id, expired_data)
- # NOTE(morganfainberg): Directly access the data cache since we need to
- # get expired tokens as well as valid tokens. token_api._list_tokens()
- # will not return any expired tokens in the list.
- user_key = self.token_api.driver._prefix_user_id(user_id)
- user_record = self.token_api.driver.client.get(user_key)
- user_token_list = jsonutils.loads('[%s]' % user_record)
- self.assertEqual(len(user_token_list), 2)
- expired_token_ptk = self.token_api.driver._prefix_token_id(
- expired_token_id)
- expired_token = self.token_api.driver.client.get(expired_token_ptk)
- expired_token['expires'] = (timeutils.utcnow() - expire_delta)
- self.token_api.driver.client.set(expired_token_ptk, expired_token)
-
- self.token_api.create_token(second_valid_token_id, second_valid_data)
- user_record = self.token_api.driver.client.get(user_key)
- user_token_list = jsonutils.loads('[%s]' % user_record)
- self.assertEqual(len(user_token_list), 2)
-
- def test_cas_failure(self):
- self.token_api.driver.client.reject_cas = True
- token_id = uuid.uuid4().hex
- user_id = six.text_type(uuid.uuid4().hex)
- user_key = self.token_api.driver._prefix_user_id(user_id)
- token_data = jsonutils.dumps(token_id)
- self.assertRaises(
- exception.UnexpectedError,
- self.token_api.driver._update_user_list_with_cas,
- user_key, token_data)
-
- def test_token_expire_timezone(self):
-
- @test_utils.timezone
- def _create_token(expire_time):
- token_id = uuid.uuid4().hex
- user_id = six.text_type(uuid.uuid4().hex)
- data = {'id': token_id, 'a': 'b', 'user': {'id': user_id},
- 'expires': expire_time
- }
- self.token_api.create_token(token_id, data)
- return data
-
- for d in ['+0', '-11', '-8', '-5', '+5', '+8', '+14']:
- test_utils.TZ = 'UTC' + d
- expire_time = timeutils.utcnow() + \
- datetime.timedelta(minutes=1)
- data_in = _create_token(expire_time)
- data_get = None
- data_get = self.token_api.get_token(data_in['id'])
-
- self.assertIsNotNone(data_get, "TZ=%s" % test_utils.TZ)
- self.assertEqual(data_in['id'], data_get['id'],
- "TZ=%s" % test_utils.TZ)
-
- expire_time_expired = timeutils.utcnow() + \
- datetime.timedelta(minutes=-1)
- data_in = _create_token(expire_time_expired)
- self.assertRaises(exception.TokenNotFound,
- self.token_api.get_token, data_in['id'])
-
-
-class MemcacheTokenCacheInvalidation(tests.TestCase,
- test_backend.TokenCacheInvalidation):
- def setUp(self):
- super(MemcacheTokenCacheInvalidation, self).setUp()
- CONF.token.driver = 'keystone.token.backends.memcache.Token'
- self.load_backends()
- fake_client = MemcacheClient()
- self.token_man = token.Manager()
- self.token_man.driver = token_memcache.Token(client=fake_client)
- self.token_api = self.token_man
- self.token_provider_api.driver.token_api = self.token_api
- self._create_test_data()
diff --git a/keystone/tests/test_backend_pam.py b/keystone/tests/test_backend_pam.py
index cb4906cc3..ec27f7569 100644
--- a/keystone/tests/test_backend_pam.py
+++ b/keystone/tests/test_backend_pam.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_backend_sql.py b/keystone/tests/test_backend_sql.py
index 3291056f5..b07956123 100644
--- a/keystone/tests/test_backend_sql.py
+++ b/keystone/tests/test_backend_sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -34,7 +32,7 @@ CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
-class SqlTests(tests.TestCase, sql.Base):
+class SqlTests(tests.TestCase):
def setUp(self):
super(SqlTests, self).setUp()
@@ -44,14 +42,6 @@ class SqlTests(tests.TestCase, sql.Base):
self.load_backends()
- # create tables and keep an engine reference for cleanup.
- # this must be done after the models are loaded by the managers.
- self.engine = db_session.get_engine()
- self.addCleanup(db_session.cleanup)
-
- sql.ModelBase.metadata.create_all(bind=self.engine)
- self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
-
# populate the engine with tables & fixtures
self.load_fixtures(default_fixtures)
#defaulted by the data load
@@ -116,29 +106,13 @@ class SqlModels(SqlTests):
('name', sql.String, 255))
self.assertExpectedSchema('role', cols)
- def test_user_project_metadata_model(self):
- cols = (('user_id', sql.String, 64),
- ('project_id', sql.String, 64),
- ('data', sql.JsonBlob, None))
- self.assertExpectedSchema('user_project_metadata', cols)
-
- def test_user_domain_metadata_model(self):
- cols = (('user_id', sql.String, 64),
- ('domain_id', sql.String, 64),
- ('data', sql.JsonBlob, None))
- self.assertExpectedSchema('user_domain_metadata', cols)
-
- def test_group_project_metadata_model(self):
- cols = (('group_id', sql.String, 64),
- ('project_id', sql.String, 64),
- ('data', sql.JsonBlob, None))
- self.assertExpectedSchema('group_project_metadata', cols)
-
- def test_group_domain_metadata_model(self):
- cols = (('group_id', sql.String, 64),
- ('domain_id', sql.String, 64),
- ('data', sql.JsonBlob, None))
- self.assertExpectedSchema('group_domain_metadata', cols)
+ def test_role_assignment_model(self):
+ cols = (('type', sql.Enum, None),
+ ('actor_id', sql.String, 64),
+ ('target_id', sql.String, 64),
+ ('role_id', sql.String, 64),
+ ('inherited', sql.Boolean, False))
+ self.assertExpectedSchema('assignment', cols)
def test_user_group_membership(self):
cols = (('group_id', sql.String, 64),
@@ -494,3 +468,9 @@ class SqlTokenCacheInvalidation(SqlTests, test_backend.TokenCacheInvalidation):
class SqlFilterTests(SqlTests, test_backend.FilterTests):
pass
+
+
+class SqlLimitTests(SqlTests, test_backend.LimitTests):
+ def setUp(self):
+ super(SqlLimitTests, self).setUp()
+ test_backend.LimitTests.setUp(self)
diff --git a/keystone/tests/test_backend_templated.py b/keystone/tests/test_backend_templated.py
index c0c55e129..c2fa559e5 100644
--- a/keystone/tests/test_backend_templated.py
+++ b/keystone/tests/test_backend_templated.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_base64utils.py b/keystone/tests/test_base64utils.py
index 27da716b7..1af09c37f 100644
--- a/keystone/tests/test_base64utils.py
+++ b/keystone/tests/test_base64utils.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_cache.py b/keystone/tests/test_cache.py
index 12a0508ab..9918f5866 100644
--- a/keystone/tests/test_cache.py
+++ b/keystone/tests/test_cache.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -74,9 +72,6 @@ class TestProxyValue(object):
class CacheRegionTest(tests.TestCase):
- def __init__(self, *args, **kwargs):
- super(CacheRegionTest, self).__init__(*args, **kwargs)
- self.region = None
def setUp(self):
super(CacheRegionTest, self).setUp()
@@ -228,9 +223,6 @@ class CacheRegionTest(tests.TestCase):
class CacheNoopBackendTest(tests.TestCase):
- def __init__(self, *args, **kwargs):
- super(CacheNoopBackendTest, self).__init__(*args, **kwargs)
- self.region = None
def setUp(self):
super(CacheNoopBackendTest, self).setUp()
diff --git a/keystone/tests/test_catalog.py b/keystone/tests/test_catalog.py
index 5579b1732..559d95616 100644
--- a/keystone/tests/test_catalog.py
+++ b/keystone/tests/test_catalog.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_cert_setup.py b/keystone/tests/test_cert_setup.py
index 4e9e9146c..a672787bf 100644
--- a/keystone/tests/test_cert_setup.py
+++ b/keystone/tests/test_cert_setup.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -22,6 +20,7 @@ from keystone.common import openssl
from keystone import exception
from keystone import tests
from keystone.tests import default_fixtures
+from keystone.tests import rest
from keystone import token
@@ -34,7 +33,7 @@ CERTDIR = os.path.join(SSLDIR, 'certs')
KEYDIR = os.path.join(SSLDIR, 'private')
-class CertSetupTestCase(tests.TestCase):
+class CertSetupTestCase(rest.RestfulTestCase):
def setUp(self):
super(CertSetupTestCase, self).setUp()
@@ -98,3 +97,41 @@ class CertSetupTestCase(tests.TestCase):
self.assertTrue(os.path.exists(CONF.ssl.ca_certs))
self.assertTrue(os.path.exists(CONF.ssl.certfile))
self.assertTrue(os.path.exists(CONF.ssl.keyfile))
+
+ def test_fetch_signing_cert(self):
+ pki = openssl.ConfigurePKI(None, None)
+ pki.run()
+
+ # NOTE(jamielennox): Use request directly because certificate
+ # requests don't have some of the normal information
+ signing_resp = self.request(self.public_app,
+ '/v2.0/certificates/signing',
+ method='GET', expected_status=200)
+
+ cacert_resp = self.request(self.public_app,
+ '/v2.0/certificates/ca',
+ method='GET', expected_status=200)
+
+ with open(CONF.signing.certfile) as f:
+ self.assertEqual(signing_resp.text, f.read())
+
+ with open(CONF.signing.ca_certs) as f:
+ self.assertEqual(cacert_resp.text, f.read())
+
+ # NOTE(jamielennox): This is weird behaviour that we need to enforce.
+ # It doesn't matter what you ask for it's always going to give text
+ # with a text/html content_type.
+
+ for path in ['/v2.0/certificates/signing', '/v2.0/certificates/ca']:
+ for accept in [None, 'text/html', 'application/json', 'text/xml']:
+ headers = {'Accept': accept} if accept else {}
+ resp = self.request(self.public_app, path, method='GET',
+ expected_status=200,
+ headers=headers)
+
+ self.assertEqual(resp.content_type, 'text/html')
+
+ def test_failure(self):
+ for path in ['/v2.0/certificates/signing', '/v2.0/certificates/ca']:
+ self.request(self.public_app, path, method='GET',
+ expected_status=500)
diff --git a/keystone/tests/test_config.py b/keystone/tests/test_config.py
index 35d95b8f1..9998c8326 100644
--- a/keystone/tests/test_config.py
+++ b/keystone/tests/test_config.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_content_types.py b/keystone/tests/test_content_types.py
index 73dbb56cd..8a2d18594 100644
--- a/keystone/tests/test_content_types.py
+++ b/keystone/tests/test_content_types.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -253,15 +251,14 @@ class CoreApiTests(object):
self.assertValidTenantResponse(r)
def test_get_user_roles(self):
- self.skipTest('Blocked by bug 933565')
+ # The server responds with a 501 Not Implemented. See bug 933565.
token = self.get_scoped_token()
- r = self.admin_request(
+ self.admin_request(
path='/v2.0/users/%(user_id)s/roles' % {
'user_id': self.user_foo['id'],
},
- token=token)
- self.assertValidRoleListResponse(r)
+ token=token, expected_status=501)
def test_get_user_roles_with_tenant(self):
token = self.get_scoped_token()
@@ -1042,7 +1039,7 @@ class JsonTestCase(RestfulTestCase, CoreApiTests, LegacyV2UsernameTests):
if require_service_catalog:
self.assertIsNotNone(serviceCatalog)
if serviceCatalog is not None:
- self.assertTrue(isinstance(serviceCatalog, list))
+ self.assertIsInstance(serviceCatalog, list)
if require_service_catalog:
self.assertNotEmpty(serviceCatalog)
for service in r.result['access']['serviceCatalog']:
diff --git a/keystone/tests/test_contrib_s3_core.py b/keystone/tests/test_contrib_s3_core.py
index 66f8066a9..8d3c56129 100644
--- a/keystone/tests/test_contrib_s3_core.py
+++ b/keystone/tests/test_contrib_s3_core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_contrib_simple_cert.py b/keystone/tests/test_contrib_simple_cert.py
index a71b43499..e81f5ed55 100644
--- a/keystone/tests/test_contrib_simple_cert.py
+++ b/keystone/tests/test_contrib_simple_cert.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
diff --git a/keystone/tests/test_contrib_stats_core.py b/keystone/tests/test_contrib_stats_core.py
index 85356940a..9060f1fd3 100644
--- a/keystone/tests/test_contrib_stats_core.py
+++ b/keystone/tests/test_contrib_stats_core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_driver_hints.py b/keystone/tests/test_driver_hints.py
index 2592ad2d9..bb2cba5c9 100644
--- a/keystone/tests/test_driver_hints.py
+++ b/keystone/tests/test_driver_hints.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -49,3 +47,16 @@ class ListHintsTests(test.TestCase):
hints2.add_filter('t4', 'data1')
hints2.add_filter('t5', 'data2')
self.assertEqual(len(hints.filters()), 2)
+
+ def test_limits(self):
+ hints = driver_hints.Hints()
+ self.assertIsNone(hints.get_limit())
+ hints.set_limit(10)
+ self.assertEqual(hints.get_limit()['limit'], 10)
+ self.assertFalse(hints.get_limit()['truncated'])
+ hints.set_limit(11)
+ self.assertEqual(hints.get_limit()['limit'], 11)
+ self.assertFalse(hints.get_limit()['truncated'])
+ hints.set_limit(10, truncated=True)
+ self.assertEqual(hints.get_limit()['limit'], 10)
+ self.assertTrue(hints.get_limit()['truncated'])
diff --git a/keystone/tests/test_exception.py b/keystone/tests/test_exception.py
index d8158f0a6..10a79a648 100644
--- a/keystone/tests/test_exception.py
+++ b/keystone/tests/test_exception.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_injection.py b/keystone/tests/test_injection.py
index 34250a11c..acfc87773 100644
--- a/keystone/tests/test_injection.py
+++ b/keystone/tests/test_injection.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_ipv6.py b/keystone/tests/test_ipv6.py
index 24838c8e4..02c5bea2d 100644
--- a/keystone/tests/test_ipv6.py
+++ b/keystone/tests/test_ipv6.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/keystone/tests/test_keystoneclient.py b/keystone/tests/test_keystoneclient.py
index cce7c13b1..abce4277c 100644
--- a/keystone/tests/test_keystoneclient.py
+++ b/keystone/tests/test_keystoneclient.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -657,14 +655,12 @@ class KeystoneClientTests(object):
client.users.update,
user=uuid.uuid4().hex)
- def test_user_update_tenant_404(self):
- self.skipTest('N/A')
- from keystoneclient import exceptions as client_exceptions
+ def test_user_update_tenant(self):
client = self.get_client(admin=True)
- self.assertRaises(client_exceptions.NotFound,
- client.users.update,
- user=self.user_foo['id'],
- tenant_id=uuid.uuid4().hex)
+ tenant_id = uuid.uuid4().hex
+ user = client.users.update(user=self.user_foo['id'],
+ tenant_id=tenant_id)
+ self.assertEqual(tenant_id, user.tenant_id)
def test_user_update_password_404(self):
from keystoneclient import exceptions as client_exceptions
@@ -898,14 +894,16 @@ class KeystoneClientTests(object):
self.assertRaises(client_exceptions.NotFound,
client.roles.add_user_role,
tenant=self.tenant_baz['id'],
- user=uuid.uuid4().hex,
- role=self.role_member['id'])
- self.assertRaises(client_exceptions.NotFound,
- client.roles.add_user_role,
- tenant=self.tenant_baz['id'],
user=self.user_foo['id'],
role=uuid.uuid4().hex)
+ def test_user_role_add_no_user(self):
+ # If add_user_role and user doesn't exist, doesn't fail.
+ client = self.get_client(admin=True)
+ client.roles.add_user_role(tenant=self.tenant_baz['id'],
+ user=uuid.uuid4().hex,
+ role=self.role_member['id'])
+
def test_user_role_remove_404(self):
from keystoneclient import exceptions as client_exceptions
client = self.get_client(admin=True)
diff --git a/keystone/tests/test_keystoneclient_sql.py b/keystone/tests/test_keystoneclient_sql.py
index a8b562abe..b268098ec 100644
--- a/keystone/tests/test_keystoneclient_sql.py
+++ b/keystone/tests/test_keystoneclient_sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -19,7 +17,6 @@ import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
-from keystone.common import sql
from keystone import config
from keystone import tests
from keystone.tests import test_keystoneclient
@@ -28,7 +25,7 @@ from keystone.tests import test_keystoneclient
CONF = config.CONF
-class KcMasterSqlTestCase(test_keystoneclient.KcMasterTestCase, sql.Base):
+class KcMasterSqlTestCase(test_keystoneclient.KcMasterTestCase):
def config(self, config_files):
super(KcMasterSqlTestCase, self).config([
tests.dirs.etc('keystone.conf.sample'),
diff --git a/keystone/tests/test_kvs.py b/keystone/tests/test_kvs.py
index 93bd5aebf..ebd82104d 100644
--- a/keystone/tests/test_kvs.py
+++ b/keystone/tests/test_kvs.py
@@ -14,14 +14,18 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
import uuid
from dogpile.cache import api
-from dogpile.cache.backends import memcached as dogpile_memcached
from dogpile.cache import proxy
from dogpile.cache import util
+import mock
+import six
+from testtools import matchers
from keystone.common.kvs.backends import inmemdb
+from keystone.common.kvs.backends import memcached
from keystone.common.kvs import core
from keystone import exception
from keystone import tests
@@ -83,6 +87,68 @@ class RegionProxy2Fixture(proxy.ProxyBackend):
"""A test dogpile.cache proxy that does nothing."""
+class TestMemcacheDriver(api.CacheBackend):
+ """A test dogpile.cache backend that conforms to the mixin-mechanism for
+ overriding set and set_multi methods on dogpile memcached drivers.
+ """
+ class test_client(object):
+ # FIXME(morganfainberg): Convert this test client over to using mock
+ # and/or mock.MagicMock as appropriate
+
+ def __init__(self):
+ self.__name__ = 'TestingMemcacheDriverClientObject'
+ self.set_arguments_passed = None
+ self.keys_values = {}
+ self.lock_set_time = None
+ self.lock_expiry = None
+
+ def set(self, key, value, **set_arguments):
+ self.keys_values.clear()
+ self.keys_values[key] = value
+ self.set_arguments_passed = set_arguments
+
+ def set_multi(self, mapping, **set_arguments):
+ self.keys_values.clear()
+ self.keys_values = mapping
+ self.set_arguments_passed = set_arguments
+
+ def add(self, key, value, expiry_time):
+ # NOTE(morganfainberg): `add` is used in this case for the
+ # memcache lock testing. If further testing is required around the
+ # actual memcache `add` interface, this method should be
+ # expanded to work more like the actual memcache `add` function
+ if self.lock_expiry is not None and self.lock_set_time is not None:
+ if time.time() - self.lock_set_time < self.lock_expiry:
+ return False
+ self.lock_expiry = expiry_time
+ self.lock_set_time = time.time()
+ return True
+
+ def delete(self, key):
+ # NOTE(morganfainberg): `delete` is used in this case for the
+ # memcache lock testing. If further testing is required around the
+ # actual memcache `delete` interface, this method should be
+ # expanded to work more like the actual memcache `delete` function.
+ self.lock_expiry = None
+ self.lock_set_time = None
+ return True
+
+ def __init__(self, arguments):
+ self.client = self.test_client()
+ self.set_arguments = {}
+ # NOTE(morganfainberg): This is the same logic as the dogpile backend
+ # since we need to mirror that functionality for the `set_argument`
+ # values to appear on the actual backend.
+ if 'memcached_expire_time' in arguments:
+ self.set_arguments['time'] = arguments['memcached_expire_time']
+
+ def set(self, key, value):
+ self.client.set(key, value, **self.set_arguments)
+
+ def set_multi(self, mapping):
+ self.client.set_multi(mapping, **self.set_arguments)
+
+
class KVSTest(tests.TestCase):
def setUp(self):
super(KVSTest, self).setUp()
@@ -90,6 +156,10 @@ class KVSTest(tests.TestCase):
self.value_foo = uuid.uuid4().hex
self.key_bar = 'bar_' + uuid.uuid4().hex
self.value_bar = {'complex_data_structure': uuid.uuid4().hex}
+ self.addCleanup(memcached.VALID_DOGPILE_BACKENDS.pop,
+ 'TestDriver',
+ None)
+ memcached.VALID_DOGPILE_BACKENDS['TestDriver'] = TestMemcacheDriver
def _get_kvs_region(self, name=None):
if name is None:
@@ -136,6 +206,9 @@ class KVSTest(tests.TestCase):
kvs.configure('openstack.kvs.Memory')
self.assertIs(kvs._region.key_mangler, util.sha1_mangle_key)
+ # The backend should also have the keymangler set the same as the
+ # region now.
+ self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
def test_kvs_key_mangler_configuration_backend(self):
kvs = self._get_kvs_region()
@@ -156,7 +229,18 @@ class KVSTest(tests.TestCase):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memory')
- self.assertIs(kvs._region.key_mangler, None)
+ self.assertIsNone(kvs._region.key_mangler)
+ self.assertIsNone(kvs._region.backend.key_mangler)
+
+ def test_kvs_key_mangler_set_on_backend(self):
+ def test_key_mangler(key):
+ return key
+
+ kvs = self._get_kvs_region()
+ kvs.configure('openstack.kvs.Memory')
+ self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+ kvs._set_key_mangler(test_key_mangler)
+ self.assertIs(kvs._region.backend.key_mangler, test_key_mangler)
def test_kvs_basic_get_set_delete(self):
# Test the basic get/set/delete actions on the KVS region
@@ -321,17 +405,180 @@ class KVSTest(tests.TestCase):
kvs.configure('openstack.kvs.Memory')
core._register_backends()
- def test_kvs_memcache_manager_valid_dogpile_memcache_backend(self):
+ def test_kvs_memcached_manager_valid_dogpile_memcached_backend(self):
kvs = self._get_kvs_region()
kvs.configure('openstack.kvs.Memcached',
- dogpile_memcache_backend='MemcachedBackend')
+ memcached_backend='TestDriver')
self.assertIsInstance(kvs._region.backend.driver,
- dogpile_memcached.MemcachedBackend)
+ TestMemcacheDriver)
- def test_kvs_memcache_manager_invalid_dogpile_memcache_backend(self):
+ def test_kvs_memcached_manager_invalid_dogpile_memcached_backend(self):
# Invalid dogpile memcache backend should raise ValueError
kvs = self._get_kvs_region()
self.assertRaises(ValueError,
kvs.configure,
backing_store='openstack.kvs.Memcached',
- dogpile_memcache_backend=uuid.uuid4().hex)
+ memcached_backend=uuid.uuid4().hex)
+
+ def test_kvs_memcache_manager_no_expiry_keys(self):
+ # Make sure the memcache backend recalculates the no-expiry keys
+ # correctly when a key-mangler is set on it.
+
+ def new_mangler(key):
+ return '_mangled_key_' + key
+
+ kvs = self._get_kvs_region()
+ no_expiry_keys = set(['test_key'])
+ kvs.configure('openstack.kvs.Memcached',
+ memcached_backend='TestDriver',
+ no_expiry_keys=no_expiry_keys)
+ calculated_keys = set([kvs._region.key_mangler(key)
+ for key in no_expiry_keys])
+ self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+ self.assertSetEqual(calculated_keys,
+ kvs._region.backend.no_expiry_hashed_keys)
+ self.assertSetEqual(no_expiry_keys,
+ kvs._region.backend.raw_no_expiry_keys)
+ calculated_keys = set([new_mangler(key) for key in no_expiry_keys])
+ kvs._region.backend.key_mangler = new_mangler
+ self.assertSetEqual(calculated_keys,
+ kvs._region.backend.no_expiry_hashed_keys)
+ self.assertSetEqual(no_expiry_keys,
+ kvs._region.backend.raw_no_expiry_keys)
+
+ def test_kvs_memcache_key_mangler_set_to_none(self):
+ kvs = self._get_kvs_region()
+ no_expiry_keys = set(['test_key'])
+ kvs.configure('openstack.kvs.Memcached',
+ memcached_backend='TestDriver',
+ no_expiry_keys=no_expiry_keys)
+ self.assertIs(kvs._region.backend.key_mangler, util.sha1_mangle_key)
+ kvs._region.backend.key_mangler = None
+ self.assertSetEqual(kvs._region.backend.raw_no_expiry_keys,
+ kvs._region.backend.no_expiry_hashed_keys)
+ self.assertIsNone(kvs._region.backend.key_mangler)
+
+ def test_noncallable_key_mangler_set_on_driver_raises_type_error(self):
+ kvs = self._get_kvs_region()
+ kvs.configure('openstack.kvs.Memcached',
+ memcached_backend='TestDriver')
+ self.assertRaises(TypeError,
+ setattr,
+ kvs._region.backend,
+ 'key_mangler',
+ 'Non-Callable')
+
+ def test_kvs_memcache_set_arguments_and_memcache_expires_ttl(self):
+ # Test the "set_arguments" (arguments passed on all set calls) logic
+ # and the no-expiry-key modifications of set_arguments for the explicit
+ # memcache TTL.
+ self.opt_in_group('kvs', enable_key_mangler=False)
+ kvs = self._get_kvs_region()
+ memcache_expire_time = 86400
+
+ expected_set_args = {'time': memcache_expire_time}
+ expected_no_expiry_args = {}
+
+ expected_foo_keys = [self.key_foo]
+ expected_bar_keys = [self.key_bar]
+
+ mapping_foo = dict([(self.key_foo, self.value_foo)])
+ mapping_bar = dict([(self.key_bar, self.value_bar)])
+
+ kvs.configure(backing_store='openstack.kvs.Memcached',
+ memcached_backend='TestDriver',
+ memcached_expire_time=memcache_expire_time,
+ some_other_arg=uuid.uuid4().hex,
+ no_expiry_keys=[self.key_bar])
+ # Ensure the set_arguments are correct
+ self.assertDictEqual(
+ kvs._region.backend._get_set_arguments_driver_attr(),
+ expected_set_args)
+
+ # Set a key that would have an expiry and verify the correct result
+ # occurred and that the correct set_arguments were passed.
+ kvs.set(self.key_foo, self.value_foo)
+ self.assertDictEqual(
+ kvs._region.backend.driver.client.set_arguments_passed,
+ expected_set_args)
+ self.assertEqual(kvs._region.backend.driver.client.keys_values.keys(),
+ expected_foo_keys)
+ self.assertEqual(
+ kvs._region.backend.driver.client.keys_values[self.key_foo][0],
+ self.value_foo)
+
+ # Set a key that would not have an expiry and verify the correct result
+ # occurred and that the correct set_arguments were passed.
+ kvs.set(self.key_bar, self.value_bar)
+ self.assertDictEqual(
+ kvs._region.backend.driver.client.set_arguments_passed,
+ expected_no_expiry_args)
+ self.assertEqual(kvs._region.backend.driver.client.keys_values.keys(),
+ expected_bar_keys)
+ self.assertEqual(
+ kvs._region.backend.driver.client.keys_values[self.key_bar][0],
+ self.value_bar)
+
+ # set_multi a dict that would have an expiry and verify the correct
+ # result occurred and that the correct set_arguments were passed.
+ kvs.set_multi(mapping_foo)
+ self.assertDictEqual(
+ kvs._region.backend.driver.client.set_arguments_passed,
+ expected_set_args)
+ self.assertEqual(kvs._region.backend.driver.client.keys_values.keys(),
+ expected_foo_keys)
+ self.assertEqual(
+ kvs._region.backend.driver.client.keys_values[self.key_foo][0],
+ self.value_foo)
+
+ # set_multi a dict that would not have an expiry and verify the correct
+ # result occurred and that the correct set_arguments were passed.
+ kvs.set_multi(mapping_bar)
+ self.assertDictEqual(
+ kvs._region.backend.driver.client.set_arguments_passed,
+ expected_no_expiry_args)
+ self.assertEqual(kvs._region.backend.driver.client.keys_values.keys(),
+ expected_bar_keys)
+ self.assertEqual(
+ kvs._region.backend.driver.client.keys_values[self.key_bar][0],
+ self.value_bar)
+
+ def test_memcached_lock_max_lock_attempts(self):
+ kvs = self._get_kvs_region()
+ max_lock_attempts = 1
+ test_key = uuid.uuid4().hex
+
+ kvs.configure(backing_store='openstack.kvs.Memcached',
+ memcached_backend='TestDriver',
+ max_lock_attempts=max_lock_attempts)
+
+ self.assertEqual(kvs._region.backend.max_lock_attempts,
+ max_lock_attempts)
+ # Simple Lock success test
+ with kvs.get_lock(test_key) as lock:
+ kvs.set(test_key, 'testing', lock)
+
+ def lock_within_a_lock(key):
+ with kvs.get_lock(key) as first_lock:
+ kvs.set(test_key, 'lock', first_lock)
+ with kvs.get_lock(key) as second_lock:
+ kvs.set(key, 'lock-within-a-lock', second_lock)
+
+ self.assertRaises(exception.UnexpectedError,
+ lock_within_a_lock,
+ key=test_key)
+
+
+class TestMemcachedBackend(tests.TestCase):
+
+ @mock.patch('__builtin__._', six.text_type)
+ def test_invalid_backend_fails_initialization(self):
+ raises_valueerror = matchers.Raises(matchers.MatchesException(
+ ValueError, r'.*FakeBackend.*'))
+
+ options = {
+ 'url': 'needed to get to the focus of this test (the backend)',
+ 'memcached_backend': 'FakeBackend',
+ }
+ self.assertThat(lambda: memcached.MemcachedBackend(options),
+ raises_valueerror)
diff --git a/keystone/tests/test_matchers.py b/keystone/tests/test_matchers.py
index a8b1b53c0..b1c324a2d 100644
--- a/keystone/tests/test_matchers.py
+++ b/keystone/tests/test_matchers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_middleware.py b/keystone/tests/test_middleware.py
index a2760bfe2..6f3cfaa61 100644
--- a/keystone/tests/test_middleware.py
+++ b/keystone/tests/test_middleware.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_no_admin_token_auth.py b/keystone/tests/test_no_admin_token_auth.py
index 5c2817903..9b0bdf48a 100644
--- a/keystone/tests/test_no_admin_token_auth.py
+++ b/keystone/tests/test_no_admin_token_auth.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_notifications.py b/keystone/tests/test_notifications.py
index b1befa2f9..da8da5e9d 100644
--- a/keystone/tests/test_notifications.py
+++ b/keystone/tests/test_notifications.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,14 +14,18 @@
import uuid
+import mock
+from oslo.config import cfg
+
from keystone.common import dependency
from keystone import notifications
from keystone.openstack.common.fixture import moxstubout
-from keystone.openstack.common.notifier import api as notifier_api
from keystone import tests
from keystone.tests import test_v3
+CONF = cfg.CONF
+
EXP_RESOURCE_TYPE = uuid.uuid4().hex
@@ -37,14 +39,13 @@ class NotificationsWrapperTestCase(tests.TestCase):
self.exp_resource_id = None
self.exp_operation = None
- self.exp_host = None
self.send_notification_called = False
- def fake_notify(operation, resource_type, resource_id, host=None):
+ def fake_notify(operation, resource_type, resource_id,
+ public=True):
self.assertEqual(self.exp_operation, operation)
self.assertEqual(EXP_RESOURCE_TYPE, resource_type)
self.assertEqual(self.exp_resource_id, resource_id)
- self.assertEqual(self.exp_host, host)
self.send_notification_called = True
fixture = self.useFixture(moxstubout.MoxStubout())
@@ -62,7 +63,6 @@ class NotificationsWrapperTestCase(tests.TestCase):
exp_resource_data = {
'id': self.exp_resource_id,
'key': uuid.uuid4().hex}
- self.exp_host = None
self.create_resource(self.exp_resource_id, exp_resource_data)
self.assertTrue(self.send_notification_called)
@@ -77,7 +77,6 @@ class NotificationsWrapperTestCase(tests.TestCase):
exp_resource_data = {
'id': self.exp_resource_id,
'key': uuid.uuid4().hex}
- self.exp_host = None
self.update_resource(self.exp_resource_id, exp_resource_data)
self.assertTrue(self.send_notification_called)
@@ -89,7 +88,6 @@ class NotificationsWrapperTestCase(tests.TestCase):
def test_resource_deleted_notification(self):
self.exp_operation = 'deleted'
self.exp_resource_id = uuid.uuid4().hex
- self.exp_host = None
self.delete_resource(self.exp_resource_id)
self.assertTrue(self.send_notification_called)
@@ -128,15 +126,17 @@ class NotificationsTestCase(tests.TestCase):
fixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = fixture.stubs
+ # these should use self.opt(), but they haven't been registered yet
+ CONF.rpc_backend = 'fake'
+ CONF.notification_driver = ['fake']
+
def test_send_notification(self):
"""Test the private method _send_notification to ensure event_type,
payload, and context are built and passed properly.
"""
-
resource = uuid.uuid4().hex
resource_type = EXP_RESOURCE_TYPE
operation = 'created'
- host = None
# NOTE(ldbragst): Even though notifications._send_notification doesn't
# contain logic that creates cases, this is suppose to test that
@@ -145,45 +145,80 @@ class NotificationsTestCase(tests.TestCase):
# agreed that context should be empty in Keystone's case, which is
# also noted in the /keystone/notifications.py module. This test
# ensures and maintains these conditions.
- def fake_notify(context, publisher_id, event_type, priority, payload):
- exp_event_type = 'identity.project.created'
- self.assertEqual(exp_event_type, event_type)
- exp_context = {}
- self.assertEqual(exp_context, context)
- exp_payload = {'resource_info': 'some_resource_id'}
- self.assertEqual(exp_payload, payload)
+ expected_args = [
+ {}, # empty context
+ 'identity.%s.created' % resource_type, # event_type
+ {'resource_info': resource}, # payload
+ 'INFO', # priority is always INFO...
+ ]
+
+ with mock.patch.object(notifications._get_notifier(),
+ '_notify') as mocked:
+ notifications._send_notification(operation, resource_type,
+ resource)
+ mocked.assert_called_once_with(*expected_args)
- self.stubs.Set(notifier_api, 'notify', fake_notify)
- notifications._send_notification(resource, resource_type, operation,
- host=host)
+ notifications._send_notification(operation, resource_type, resource)
class NotificationsForEntities(test_v3.RestfulTestCase):
def setUp(self):
super(NotificationsForEntities, self).setUp()
-
- self.exp_resource_id = None
- self.exp_operation = None
- self.exp_resource_type = None
- self.send_notification_called = False
-
- def fake_notify(operation, resource_type, resource_id, host=None):
- self.exp_resource_id = resource_id
- self.exp_operation = operation
- self.exp_resource_type = resource_type
- self.send_notification_called = True
+ self._notifications = []
+
+ def fake_notify(operation, resource_type, resource_id,
+ public=True):
+ note = {
+ 'resource_id': resource_id,
+ 'operation': operation,
+ 'resource_type': resource_type,
+ 'send_notification_called': True,
+ 'public': public}
+ self._notifications.append(note)
fixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = fixture.stubs
self.stubs.Set(notifications, '_send_notification', fake_notify)
- def _assertLastNotify(self, resource_id, operation, resource_type):
- self.assertEqual(self.exp_operation, operation)
- self.assertEqual(self.exp_resource_id, resource_id)
- self.assertEqual(self.exp_resource_type, resource_type)
+ def _assertNotifySeen(self, resource_id, operation, resource_type):
+ self.assertIn(operation, self.exp_operations)
+ self.assertIn(resource_id, self.exp_resource_ids)
+ self.assertIn(resource_type, self.exp_resource_types)
self.assertTrue(self.send_notification_called)
+ def _assertLastNotify(self, resource_id, operation, resource_type):
+ self.assertTrue(len(self._notifications) > 0)
+ note = self._notifications[-1]
+ self.assertEqual(note['operation'], operation)
+ self.assertEqual(note['resource_id'], resource_id)
+ self.assertEqual(note['resource_type'], resource_type)
+ self.assertTrue(note['send_notification_called'])
+
+ def _assertNotifyNotSent(self, resource_id, operation, resource_type,
+ public=True):
+ unexpected = {
+ 'resource_id': resource_id,
+ 'operation': operation,
+ 'resource_type': resource_type,
+ 'send_notification_called': True,
+ 'public': public}
+ for note in self._notifications:
+ self.assertNotEqual(unexpected, note)
+
+ def _assertNotifySent(self, resource_id, operation, resource_type, public):
+ expected = {
+ 'resource_id': resource_id,
+ 'operation': operation,
+ 'resource_type': resource_type,
+ 'send_notification_called': True,
+ 'public': public}
+ for note in self._notifications:
+ if expected == note:
+ break
+ else:
+ self.fail("Notification not sent.")
+
def test_create_group(self):
group_ref = self.new_group_ref(domain_id=self.domain_id)
self.identity_api.create_group(group_ref['id'], group_ref)
@@ -242,6 +277,13 @@ class NotificationsForEntities(test_v3.RestfulTestCase):
self.identity_api.delete_user(user_ref['id'])
self._assertLastNotify(user_ref['id'], 'deleted', 'user')
+ def test_update_domain(self):
+ domain_ref = self.new_domain_ref()
+ self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+ domain_ref['description'] = uuid.uuid4().hex
+ self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+ self._assertLastNotify(domain_ref['id'], 'updated', 'domain')
+
def test_delete_trust(self):
trustor = self.new_user_ref(domain_id=self.domain_id)
self.identity_api.create_user(trustor['id'], trustor)
@@ -255,6 +297,22 @@ class NotificationsForEntities(test_v3.RestfulTestCase):
self.trust_api.delete_trust(trust_ref['id'])
self._assertLastNotify(trust_ref['id'], 'deleted', 'OS-TRUST:trust')
+ def test_delete_domain(self):
+ domain_ref = self.new_domain_ref()
+ self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+ domain_ref['enabled'] = False
+ self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+ self.assignment_api.delete_domain(domain_ref['id'])
+ self._assertLastNotify(domain_ref['id'], 'deleted', 'domain')
+
+ def test_disable_domain(self):
+ domain_ref = self.new_domain_ref()
+ self.assignment_api.create_domain(domain_ref['id'], domain_ref)
+ domain_ref['enabled'] = False
+ self.assignment_api.update_domain(domain_ref['id'], domain_ref)
+ self._assertNotifySent(domain_ref['id'], 'disabled', 'domain',
+ public=False)
+
def test_update_group(self):
group_ref = self.new_group_ref(domain_id=self.domain_id)
self.identity_api.create_group(group_ref['id'], group_ref)
@@ -265,7 +323,24 @@ class NotificationsForEntities(test_v3.RestfulTestCase):
project_ref = self.new_project_ref(domain_id=self.domain_id)
self.assignment_api.create_project(project_ref['id'], project_ref)
self.assignment_api.update_project(project_ref['id'], project_ref)
+ self._assertNotifySent(project_ref['id'], 'updated', 'project',
+ public=True)
+
+ def test_disable_project(self):
+ project_ref = self.new_project_ref(domain_id=self.domain_id)
+ self.assignment_api.create_project(project_ref['id'], project_ref)
+ project_ref['enabled'] = False
+ self.assignment_api.update_project(project_ref['id'], project_ref)
+ self._assertNotifySent(project_ref['id'], 'disabled', 'project',
+ public=False)
+
+ def test_update_project_does_not_send_disable(self):
+ project_ref = self.new_project_ref(domain_id=self.domain_id)
+ self.assignment_api.create_project(project_ref['id'], project_ref)
+ project_ref['enabled'] = True
+ self.assignment_api.update_project(project_ref['id'], project_ref)
self._assertLastNotify(project_ref['id'], 'updated', 'project')
+ self._assertNotifyNotSent(project_ref['id'], 'disabled', 'project')
def test_update_role(self):
role_ref = self.new_role_ref()
@@ -365,3 +440,68 @@ class TestEventCallbacks(test_v3.RestfulTestCase):
notifications.SUBSCRIBERS = {}
self.assertRaises(ValueError, Foo)
+
+
+class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase):
+
+ LOCAL_HOST = 'localhost'
+ ACTION = 'authenticate'
+
+ def setUp(self):
+ super(CadfNotificationsWrapperTestCase, self).setUp()
+ self._notifications = []
+
+ def fake_notify(action, initiator, outcome):
+ note = {
+ 'action': action,
+ 'initiator': initiator,
+ # NOTE(stevemar): outcome has 2 stages, pending and success
+ # so we are ignoring it for now.
+ #'outcome': outcome,
+ 'send_notification_called': True}
+ self._notifications.append(note)
+
+ # TODO(stevemar): Look into using mock instead of mox
+ fixture = self.useFixture(moxstubout.MoxStubout())
+ self.stubs = fixture.stubs
+ self.stubs.Set(notifications, '_send_audit_notification',
+ fake_notify)
+
+ def _assertLastNotify(self, action, user_id):
+ self.assertTrue(self._notifications)
+ note = self._notifications[-1]
+ self.assertEqual(note['action'], action)
+ initiator = note['initiator']
+ self.assertEqual(initiator.name, user_id)
+ self.assertEqual(initiator.host.address, self.LOCAL_HOST)
+ self.assertTrue(note['send_notification_called'])
+
+ def test_v3_authenticate_user_name_and_domain_id(self):
+ user_id = self.user_id
+ user_name = self.user['name']
+ password = self.user['password']
+ domain_id = self.domain_id
+ data = self.build_authentication_request(username=user_name,
+ user_domain_id=domain_id,
+ password=password)
+ self.post('/auth/tokens', body=data)
+ self._assertLastNotify(self.ACTION, user_id)
+
+ def test_v3_authenticate_user_id(self):
+ user_id = self.user_id
+ password = self.user['password']
+ data = self.build_authentication_request(user_id=user_id,
+ password=password)
+ self.post('/auth/tokens', body=data)
+ self._assertLastNotify(self.ACTION, user_id)
+
+ def test_v3_authenticate_user_name_and_domain_name(self):
+ user_id = self.user_id
+ user_name = self.user['name']
+ password = self.user['password']
+ domain_name = self.domain['name']
+ data = self.build_authentication_request(username=user_name,
+ user_domain_name=domain_name,
+ password=password)
+ self.post('/auth/tokens', body=data)
+ self._assertLastNotify(self.ACTION, user_id)
diff --git a/keystone/tests/test_overrides.conf b/keystone/tests/test_overrides.conf
index b33ad4880..d1f7bae14 100644
--- a/keystone/tests/test_overrides.conf
+++ b/keystone/tests/test_overrides.conf
@@ -5,7 +5,7 @@ crypt_strength = 1000
driver = keystone.identity.backends.kvs.Identity
[catalog]
-driver = keystone.catalog.backends.templated.TemplatedCatalog
+driver = keystone.catalog.backends.templated.Catalog
template_file = default_catalog.templates
[trust]
@@ -27,3 +27,10 @@ ca_certs = examples/pki/certs/cacert.pem
[kvs]
backends = keystone.tests.test_kvs.KVSBackendForcedKeyMangleFixture, keystone.tests.test_kvs.KVSBackendFixture
+
+[auth]
+methods = external,password,token,oauth1
+oauth1 = keystone.auth.plugins.oauth1.OAuth
+
+[paste_deploy]
+config_file = keystone-paste.ini
diff --git a/keystone/tests/test_pemutils.py b/keystone/tests/test_pemutils.py
index adc521f25..5ce654398 100644
--- a/keystone/tests/test_pemutils.py
+++ b/keystone/tests/test_pemutils.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_policy.py b/keystone/tests/test_policy.py
index 4878e8076..9a93f25b3 100644
--- a/keystone/tests/test_policy.py
+++ b/keystone/tests/test_policy.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
diff --git a/keystone/tests/test_s3_token_middleware.py b/keystone/tests/test_s3_token_middleware.py
index e425ee34e..788df3893 100644
--- a/keystone/tests/test_s3_token_middleware.py
+++ b/keystone/tests/test_s3_token_middleware.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,219 +13,26 @@
# under the License.
import testtools
-import webob
-
-from keystone.middleware import s3_token
-from keystone.openstack.common import jsonutils
-
-
-class FakeHTTPResponse(object):
- def __init__(self, status, body):
- self.status = status
- self.body = body
- self.reason = ""
-
- def read(self):
- return self.body
-
-
-class FakeApp(object):
- """This represents a WSGI app protected by the auth_token middleware."""
- def __call__(self, env, start_response):
- resp = webob.Response()
- resp.environ = env
- return resp(env, start_response)
-
-
-class FakeHTTPConnection(object):
- def __init__(self, *args):
- return
- def getresponse(self):
- return self.resp
+from keystoneclient.middleware import s3_token as ksc_s3_token
- def close(self):
- pass
-
- def request(self, method, path, **kwargs):
- pass
+from keystone.middleware import s3_token
class S3TokenMiddlewareTestBase(testtools.TestCase):
- def setUp(self):
- super(S3TokenMiddlewareTestBase, self).setUp()
-
- def start_fake_response(self, status, headers):
- self.response_status = int(status.split(' ', 1)[0])
- self.response_headers = dict(headers)
-
-
-def good_request(cls, method, path, **kwargs):
- cls.status = 201
- ret = {'access': {'token':
- {'id': 'TOKEN_ID',
- 'tenant': {'id': 'TENANT_ID'}}}}
- body = jsonutils.dumps(ret)
- cls.resp = FakeHTTPResponse(cls.status, body)
-
-
-class S3TokenMiddlewareTestGood(S3TokenMiddlewareTestBase):
- def setup_middleware_fake(self):
- self.middleware.http_client_class = FakeHTTPConnection
- self.middleware.http_client_class.request = good_request
-
- def setUp(self):
- self.middleware = s3_token.S3Token(FakeApp(), {})
- self.setup_middleware_fake()
- super(S3TokenMiddlewareTestGood, self).setUp()
-
- # Ignore the request and pass to the next middleware in the
- # pipeline if no path has been specified.
- def test_no_path_request(self):
- req = webob.Request.blank('/')
- self.middleware(req.environ, self.start_fake_response)
- self.assertEqual(self.response_status, 200)
-
- # Ignore the request and pass to the next middleware in the
- # pipeline if no Authorization header has been specified
- def test_without_authorization(self):
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- self.middleware(req.environ, self.start_fake_response)
- self.assertEqual(self.response_status, 200)
-
- def test_without_auth_storage_token(self):
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- req.headers['Authorization'] = 'badboy'
- self.middleware(req.environ, self.start_fake_response)
- self.assertEqual(self.response_status, 200)
-
- def test_authorized(self):
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- req.headers['Authorization'] = 'access:signature'
- req.headers['X-Storage-Token'] = 'token'
- req.get_response(self.middleware)
- self.assertTrue(req.path.startswith('/v1/AUTH_TENANT_ID'))
- self.assertEqual(req.headers['X-Auth-Token'], 'TOKEN_ID')
-
- def test_authorized_http(self):
- self.middleware = (
- s3_token.filter_factory({'auth_protocol': 'http'})(FakeApp()))
- self.setup_middleware_fake()
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- req.headers['Authorization'] = 'access:signature'
- req.headers['X-Storage-Token'] = 'token'
- req.get_response(self.middleware)
- self.assertTrue(req.path.startswith('/v1/AUTH_TENANT_ID'))
- self.assertEqual(req.headers['X-Auth-Token'], 'TOKEN_ID')
-
- def test_authorization_nova_toconnect(self):
- req = webob.Request.blank('/v1/AUTH_swiftint/c/o')
- req.headers['Authorization'] = 'access:FORCED_TENANT_ID:signature'
- req.headers['X-Storage-Token'] = 'token'
- req.get_response(self.middleware)
- path = req.environ['PATH_INFO']
- self.assertTrue(path.startswith('/v1/AUTH_FORCED_TENANT_ID'))
-
-
-class S3TokenMiddlewareTestBad(S3TokenMiddlewareTestBase):
- def setUp(self):
- self.middleware = s3_token.S3Token(FakeApp(), {})
- self.middleware.http_client_class = FakeHTTPConnection
- super(S3TokenMiddlewareTestBad, self).setUp()
-
- def test_unauthorized_token(self):
- def request(self, method, path, **kwargs):
- ret = {"error":
- {"message": "EC2 access key not found.",
- "code": 401,
- "title": "Unauthorized"}}
- body = jsonutils.dumps(ret)
- self.status = 403
- self.resp = FakeHTTPResponse(self.status, body)
-
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- req.headers['Authorization'] = 'access:signature'
- req.headers['X-Storage-Token'] = 'token'
- self.middleware.http_client_class.request = request
- resp = req.get_response(self.middleware)
- s3_denied_req = self.middleware.deny_request('AccessDenied')
- self.assertEqual(resp.body, s3_denied_req.body)
- self.assertEqual(resp.status_int, s3_denied_req.status_int)
-
- def test_bogus_authorization(self):
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- req.headers['Authorization'] = 'badboy'
- req.headers['X-Storage-Token'] = 'token'
- resp = req.get_response(self.middleware)
- self.assertEqual(resp.status_int, 400)
- s3_invalid_req = self.middleware.deny_request('InvalidURI')
- self.assertEqual(resp.body, s3_invalid_req.body)
- self.assertEqual(resp.status_int, s3_invalid_req.status_int)
-
- def test_fail_to_connect_to_keystone(self):
- def request(self, method, path, **kwargs):
- raise s3_token.ServiceError
- self.middleware.http_client_class.request = request
-
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- req.headers['Authorization'] = 'access:signature'
- req.headers['X-Storage-Token'] = 'token'
- self.middleware.http_client_class.status = 503
- resp = req.get_response(self.middleware)
- s3_invalid_req = self.middleware.deny_request('InvalidURI')
- self.assertEqual(resp.body, s3_invalid_req.body)
- self.assertEqual(resp.status_int, s3_invalid_req.status_int)
-
- def test_bad_reply(self):
- def request(self, method, path, **kwargs):
- body = "<badreply>"
- self.status = 201
- self.resp = FakeHTTPResponse(self.status, body)
-
- req = webob.Request.blank('/v1/AUTH_cfa/c/o')
- req.headers['Authorization'] = 'access:signature'
- req.headers['X-Storage-Token'] = 'token'
- self.middleware.http_client_class.request = request
- resp = req.get_response(self.middleware)
- s3_invalid_req = self.middleware.deny_request('InvalidURI')
- self.assertEqual(resp.body, s3_invalid_req.body)
- self.assertEqual(resp.status_int, s3_invalid_req.status_int)
-
-
-class S3TokenMiddlewareTestUtil(testtools.TestCase):
- def test_split_path_failed(self):
- self.assertRaises(ValueError, s3_token.split_path, '')
- self.assertRaises(ValueError, s3_token.split_path, '/')
- self.assertRaises(ValueError, s3_token.split_path, '//')
- self.assertRaises(ValueError, s3_token.split_path, '//a')
- self.assertRaises(ValueError, s3_token.split_path, '/a/c')
- self.assertRaises(ValueError, s3_token.split_path, '//c')
- self.assertRaises(ValueError, s3_token.split_path, '/a/c/')
- self.assertRaises(ValueError, s3_token.split_path, '/a//')
- self.assertRaises(ValueError, s3_token.split_path, '/a', 2)
- self.assertRaises(ValueError, s3_token.split_path, '/a', 2, 3)
- self.assertRaises(ValueError, s3_token.split_path, '/a', 2, 3, True)
- self.assertRaises(ValueError, s3_token.split_path, '/a/c/o/r', 3, 3)
- self.assertRaises(ValueError, s3_token.split_path, '/a', 5, 4)
-
- def test_split_path_success(self):
- self.assertEqual(s3_token.split_path('/a'), ['a'])
- self.assertEqual(s3_token.split_path('/a/'), ['a'])
- self.assertEqual(s3_token.split_path('/a/c', 2), ['a', 'c'])
- self.assertEqual(s3_token.split_path('/a/c/o', 3), ['a', 'c', 'o'])
- self.assertEqual(s3_token.split_path('/a/c/o/r', 3, 3, True),
- ['a', 'c', 'o/r'])
- self.assertEqual(s3_token.split_path('/a/c', 2, 3, True),
- ['a', 'c', None])
- self.assertEqual(s3_token.split_path('/a/c/', 2), ['a', 'c'])
- self.assertEqual(s3_token.split_path('/a/c/', 2, 3), ['a', 'c', ''])
-
- def test_split_path_invalid_path(self):
- try:
- s3_token.split_path('o\nn e', 2)
- except ValueError as err:
- self.assertEqual(str(err), 'Invalid path: o%0An%20e')
- try:
- s3_token.split_path('o\nn e', 2, 3, True)
- except ValueError as err:
- self.assertEqual(str(err), 'Invalid path: o%0An%20e')
+ def test_symbols(self):
+ """Verify s3_token middleware symbols.
+
+ Verify that the keystone version of s3_token middleware forwards the
+ public symbols from the keystoneclient version of the s3_token
+ middleware for backwards compatibility.
+
+ """
+
+ self.assertIs(ksc_s3_token.PROTOCOL_NAME, s3_token.PROTOCOL_NAME)
+ self.assertIs(ksc_s3_token.split_path, s3_token.split_path)
+ self.assertIs(ksc_s3_token.ServiceError, s3_token.ServiceError)
+ self.assertIs(ksc_s3_token.filter_factory, s3_token.filter_factory)
+ self.assertTrue(
+ issubclass(s3_token.S3Token, ksc_s3_token.S3Token),
+ 's3_token.S3Token is not subclass of keystoneclient s3_token')
diff --git a/keystone/tests/test_serializer.py b/keystone/tests/test_serializer.py
index bda75f8b7..ec2259e46 100644
--- a/keystone/tests/test_serializer.py
+++ b/keystone/tests/test_serializer.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_singular_plural.py b/keystone/tests/test_singular_plural.py
index 652a78b63..95733b64b 100644
--- a/keystone/tests/test_singular_plural.py
+++ b/keystone/tests/test_singular_plural.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_sizelimit.py b/keystone/tests/test_sizelimit.py
index 5809ef5f1..936005716 100644
--- a/keystone/tests/test_sizelimit.py
+++ b/keystone/tests/test_sizelimit.py
@@ -40,7 +40,7 @@ class TestRequestBodySizeLimiter(tests.TestCase):
self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE
self.request.body = "0" * MAX_REQUEST_BODY_SIZE
response = self.request.get_response(self.middleware)
- self.assertEqual(response.status_int, 200)
+ self.assertEqual(200, response.status_int)
def test_content_length_too_large(self):
self.request.headers['Content-Length'] = MAX_REQUEST_BODY_SIZE + 1
diff --git a/keystone/tests/test_sql_migrate_extensions.py b/keystone/tests/test_sql_migrate_extensions.py
index 604975b88..1e2f19a67 100644
--- a/keystone/tests/test_sql_migrate_extensions.py
+++ b/keystone/tests/test_sql_migrate_extensions.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -140,8 +138,8 @@ class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
class FederationExtension(test_sql_upgrade.SqlMigrateBase):
"""Test class for ensuring the Federation SQL."""
- def __init__(self, *args, **kwargs):
- super(FederationExtension, self).__init__(*args, **kwargs)
+ def setUp(self):
+ super(FederationExtension, self).setUp()
self.identity_provider = 'identity_provider'
self.federation_protocol = 'federation_protocol'
self.mapping = 'mapping'
diff --git a/keystone/tests/test_sql_upgrade.py b/keystone/tests/test_sql_upgrade.py
index a412c1227..3b20b7903 100644
--- a/keystone/tests/test_sql_upgrade.py
+++ b/keystone/tests/test_sql_upgrade.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -38,11 +36,14 @@ import uuid
from migrate.versioning import api as versioning_api
import sqlalchemy
-from keystone.common.sql import migration
+from keystone.assignment.backends import sql as assignment_sql
+from keystone.common import sql
+from keystone.common.sql import migration_helpers
from keystone.common import utils
from keystone import config
from keystone import credential
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common.db.sqlalchemy import session
from keystone import tests
from keystone.tests import default_fixtures
@@ -66,7 +67,7 @@ class SqlMigrateBase(tests.TestCase):
return self._config_file_list
def repo_package(self):
- return None
+ return sql
def setUp(self):
super(SqlMigrateBase, self).setUp()
@@ -78,7 +79,8 @@ class SqlMigrateBase(tests.TestCase):
self.Session = session.get_maker(self.engine, autocommit=False)
self.initialize_sql()
- self.repo_path = migration.find_migrate_repo(self.repo_package())
+ self.repo_path = migration_helpers.find_migrate_repo(
+ self.repo_package())
self.schema = versioning_api.ControlledSchema.create(
self.engine,
self.repo_path, 0)
@@ -156,8 +158,8 @@ class SqlUpgradeTests(SqlMigrateBase):
self.assertTableDoesNotExist('user')
def test_start_version_0(self):
- version = migration.db_version()
- self.assertEqual(version, 0, "DB is at version 0")
+ version = migration.db_version(self.repo_path, 0)
+ self.assertEqual(version, 0, "DB is not at version 0")
def test_two_steps_forward_one_step_back(self):
"""You should be able to cleanly undo and re-apply all upgrades.
@@ -1393,7 +1395,7 @@ class SqlUpgradeTests(SqlMigrateBase):
credential_api = credential.Manager()
self.assertNotEmpty(credential_api.
list_credentials(
- user_id=ec2_credential['user_id']))
+ user_id=ec2_credential['user_id']))
self.downgrade(32)
session.commit()
self.assertTableExists('ec2_credential')
@@ -1682,6 +1684,336 @@ class SqlUpgradeTests(SqlMigrateBase):
self.downgrade(36)
self.assertTableDoesNotExist('region')
+ def test_assignment_table_migration(self):
+
+ def create_base_data(session):
+ domain_table = sqlalchemy.Table('domain', self.metadata,
+ autoload=True)
+ user_table = sqlalchemy.Table('user', self.metadata, autoload=True)
+ group_table = sqlalchemy.Table('group', self.metadata,
+ autoload=True)
+ role_table = sqlalchemy.Table('role', self.metadata, autoload=True)
+ project_table = sqlalchemy.Table(
+ 'project', self.metadata, autoload=True)
+
+ base_data = {}
+ # Create a Domain
+ base_data['domain'] = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'enabled': True}
+ session.execute(domain_table.insert().values(base_data['domain']))
+
+ # Create another Domain
+ base_data['domain2'] = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'enabled': True}
+ session.execute(domain_table.insert().values(base_data['domain2']))
+
+ # Create a Project
+ base_data['project'] = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'domain_id': base_data['domain']['id'],
+ 'extra': "{}"}
+ session.execute(
+ project_table.insert().values(base_data['project']))
+
+ # Create another Project
+ base_data['project2'] = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'domain_id': base_data['domain']['id'],
+ 'extra': "{}"}
+ session.execute(
+ project_table.insert().values(base_data['project2']))
+
+ # Create a User
+ base_data['user'] = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'domain_id': base_data['domain']['id'],
+ 'password': uuid.uuid4().hex,
+ 'enabled': True,
+ 'extra': "{}"}
+ session.execute(user_table.insert().values(base_data['user']))
+
+ # Create a Group
+ base_data['group'] = {'id': uuid.uuid4().hex,
+ 'name': uuid.uuid4().hex,
+ 'domain_id': base_data['domain']['id'],
+ 'extra': "{}"}
+ session.execute(group_table.insert().values(base_data['group']))
+
+ # Create roles
+ base_data['roles'] = []
+ for _ in range(9):
+ role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
+ session.execute(role_table.insert().values(role))
+ base_data['roles'].append(role)
+
+ return base_data
+
+ def populate_grants(session, base_data):
+
+ user_project_table = sqlalchemy.Table(
+ 'user_project_metadata', self.metadata, autoload=True)
+ user_domain_table = sqlalchemy.Table(
+ 'user_domain_metadata', self.metadata, autoload=True)
+ group_project_table = sqlalchemy.Table(
+ 'group_project_metadata', self.metadata, autoload=True)
+ group_domain_table = sqlalchemy.Table(
+ 'group_domain_metadata', self.metadata, autoload=True)
+
+ # Grant a role to user on project
+ grant = {'user_id': base_data['user']['id'],
+ 'project_id': base_data['project']['id'],
+ 'data': json.dumps(
+ {'roles': [{'id': base_data['roles'][0]['id']}]})}
+ session.execute(user_project_table.insert().values(grant))
+
+ # Grant two roles to user on project2
+ grant = {'user_id': base_data['user']['id'],
+ 'project_id': base_data['project2']['id'],
+ 'data': json.dumps(
+ {'roles': [{'id': base_data['roles'][1]['id']},
+ {'id': base_data['roles'][2]['id']}]})}
+ session.execute(user_project_table.insert().values(grant))
+
+ # Grant role to group on project
+ grant = {'group_id': base_data['group']['id'],
+ 'project_id': base_data['project']['id'],
+ 'data': json.dumps(
+ {'roles': [{'id': base_data['roles'][3]['id']}]})}
+ session.execute(group_project_table.insert().values(grant))
+
+ # Grant two roles to group on project2
+ grant = {'group_id': base_data['group']['id'],
+ 'project_id': base_data['project2']['id'],
+ 'data': json.dumps(
+ {'roles': [{'id': base_data['roles'][4]['id']},
+ {'id': base_data['roles'][5]['id']}]})}
+ session.execute(group_project_table.insert().values(grant))
+
+ # Grant two roles to group on domain, one inherited, one not
+ grant = {'group_id': base_data['group']['id'],
+ 'domain_id': base_data['domain']['id'],
+ 'data': json.dumps(
+ {'roles': [{'id': base_data['roles'][6]['id']},
+ {'id': base_data['roles'][7]['id'],
+ 'inherited_to': 'projects'}]})}
+ session.execute(group_domain_table.insert().values(grant))
+
+ # Grant inherited role to user on domain
+ grant = {'user_id': base_data['user']['id'],
+ 'domain_id': base_data['domain']['id'],
+ 'data': json.dumps(
+ {'roles': [{'id': base_data['roles'][8]['id'],
+ 'inherited_to': 'projects'}]})}
+ session.execute(user_domain_table.insert().values(grant))
+
+ # Grant two non-inherited roles to user on domain2, using roles
+ # that are also assigned to other actors/targets
+ grant = {'user_id': base_data['user']['id'],
+ 'domain_id': base_data['domain2']['id'],
+ 'data': json.dumps(
+ {'roles': [{'id': base_data['roles'][6]['id']},
+ {'id': base_data['roles'][7]['id']}]})}
+ session.execute(user_domain_table.insert().values(grant))
+
+ session.commit()
+
+ def check_grants(session, base_data):
+ user_project_table = sqlalchemy.Table(
+ 'user_project_metadata', self.metadata, autoload=True)
+ user_domain_table = sqlalchemy.Table(
+ 'user_domain_metadata', self.metadata, autoload=True)
+ group_project_table = sqlalchemy.Table(
+ 'group_project_metadata', self.metadata, autoload=True)
+ group_domain_table = sqlalchemy.Table(
+ 'group_domain_metadata', self.metadata, autoload=True)
+
+ s = sqlalchemy.select([user_project_table.c.data]).where(
+ (user_project_table.c.user_id == base_data['user']['id']) &
+ (user_project_table.c.project_id ==
+ base_data['project']['id']))
+ r = session.execute(s)
+ data = json.loads(r.fetchone()['data'])
+ self.assertEqual(len(data['roles']), 1)
+ self.assertIn({'id': base_data['roles'][0]['id']}, data['roles'])
+
+ s = sqlalchemy.select([user_project_table.c.data]).where(
+ (user_project_table.c.user_id == base_data['user']['id']) &
+ (user_project_table.c.project_id ==
+ base_data['project2']['id']))
+ r = session.execute(s)
+ data = json.loads(r.fetchone()['data'])
+ self.assertEqual(len(data['roles']), 2)
+ self.assertIn({'id': base_data['roles'][1]['id']}, data['roles'])
+ self.assertIn({'id': base_data['roles'][2]['id']}, data['roles'])
+
+ s = sqlalchemy.select([group_project_table.c.data]).where(
+ (group_project_table.c.group_id == base_data['group']['id']) &
+ (group_project_table.c.project_id ==
+ base_data['project']['id']))
+ r = session.execute(s)
+ data = json.loads(r.fetchone()['data'])
+ self.assertEqual(len(data['roles']), 1)
+ self.assertIn({'id': base_data['roles'][3]['id']}, data['roles'])
+
+ s = sqlalchemy.select([group_project_table.c.data]).where(
+ (group_project_table.c.group_id == base_data['group']['id']) &
+ (group_project_table.c.project_id ==
+ base_data['project2']['id']))
+ r = session.execute(s)
+ data = json.loads(r.fetchone()['data'])
+ self.assertEqual(len(data['roles']), 2)
+ self.assertIn({'id': base_data['roles'][4]['id']}, data['roles'])
+ self.assertIn({'id': base_data['roles'][5]['id']}, data['roles'])
+
+ s = sqlalchemy.select([group_domain_table.c.data]).where(
+ (group_domain_table.c.group_id == base_data['group']['id']) &
+ (group_domain_table.c.domain_id == base_data['domain']['id']))
+ r = session.execute(s)
+ data = json.loads(r.fetchone()['data'])
+ self.assertEqual(len(data['roles']), 2)
+ self.assertIn({'id': base_data['roles'][6]['id']}, data['roles'])
+ self.assertIn({'id': base_data['roles'][7]['id'],
+ 'inherited_to': 'projects'}, data['roles'])
+
+ s = sqlalchemy.select([user_domain_table.c.data]).where(
+ (user_domain_table.c.user_id == base_data['user']['id']) &
+ (user_domain_table.c.domain_id == base_data['domain']['id']))
+ r = session.execute(s)
+ data = json.loads(r.fetchone()['data'])
+ self.assertEqual(len(data['roles']), 1)
+ self.assertIn({'id': base_data['roles'][8]['id'],
+ 'inherited_to': 'projects'}, data['roles'])
+
+ s = sqlalchemy.select([user_domain_table.c.data]).where(
+ (user_domain_table.c.user_id == base_data['user']['id']) &
+ (user_domain_table.c.domain_id == base_data['domain2']['id']))
+ r = session.execute(s)
+ data = json.loads(r.fetchone()['data'])
+ self.assertEqual(len(data['roles']), 2)
+ self.assertIn({'id': base_data['roles'][6]['id']}, data['roles'])
+ self.assertIn({'id': base_data['roles'][7]['id']}, data['roles'])
+
+ def check_assignments(session, base_data):
+
+ def check_assignment_type(refs, type):
+ for ref in refs:
+ self.assertEqual(ref.type, type)
+
+ assignment_table = sqlalchemy.Table(
+ 'assignment', self.metadata, autoload=True)
+
+ refs = session.query(assignment_table).all()
+ self.assertEqual(len(refs), 11)
+
+ q = session.query(assignment_table)
+ q = q.filter_by(actor_id=base_data['user']['id'])
+ q = q.filter_by(target_id=base_data['project']['id'])
+ refs = q.all()
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(refs[0].role_id, base_data['roles'][0]['id'])
+ self.assertFalse(refs[0].inherited)
+ check_assignment_type(refs,
+ assignment_sql.AssignmentType.USER_PROJECT)
+
+ q = session.query(assignment_table)
+ q = q.filter_by(actor_id=base_data['user']['id'])
+ q = q.filter_by(target_id=base_data['project2']['id'])
+ refs = q.all()
+ self.assertEqual(len(refs), 2)
+ role_ids = [base_data['roles'][1]['id'],
+ base_data['roles'][2]['id']]
+ self.assertIn(refs[0].role_id, role_ids)
+ self.assertIn(refs[1].role_id, role_ids)
+ self.assertFalse(refs[0].inherited)
+ self.assertFalse(refs[1].inherited)
+ check_assignment_type(refs,
+ assignment_sql.AssignmentType.USER_PROJECT)
+
+ q = session.query(assignment_table)
+ q = q.filter_by(actor_id=base_data['group']['id'])
+ q = q.filter_by(target_id=base_data['project']['id'])
+ refs = q.all()
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(refs[0].role_id, base_data['roles'][3]['id'])
+ self.assertFalse(refs[0].inherited)
+ check_assignment_type(refs,
+ assignment_sql.AssignmentType.GROUP_PROJECT)
+
+ q = session.query(assignment_table)
+ q = q.filter_by(actor_id=base_data['group']['id'])
+ q = q.filter_by(target_id=base_data['project2']['id'])
+ refs = q.all()
+ self.assertEqual(len(refs), 2)
+ role_ids = [base_data['roles'][4]['id'],
+ base_data['roles'][5]['id']]
+ self.assertIn(refs[0].role_id, role_ids)
+ self.assertIn(refs[1].role_id, role_ids)
+ self.assertFalse(refs[0].inherited)
+ self.assertFalse(refs[1].inherited)
+ check_assignment_type(refs,
+ assignment_sql.AssignmentType.GROUP_PROJECT)
+
+ q = session.query(assignment_table)
+ q = q.filter_by(actor_id=base_data['group']['id'])
+ q = q.filter_by(target_id=base_data['domain']['id'])
+ refs = q.all()
+ self.assertEqual(len(refs), 2)
+ role_ids = [base_data['roles'][6]['id'],
+ base_data['roles'][7]['id']]
+ self.assertIn(refs[0].role_id, role_ids)
+ self.assertIn(refs[1].role_id, role_ids)
+ if refs[0].role_id == base_data['roles'][7]['id']:
+ self.assertTrue(refs[0].inherited)
+ self.assertFalse(refs[1].inherited)
+ else:
+ self.assertTrue(refs[1].inherited)
+ self.assertFalse(refs[0].inherited)
+ check_assignment_type(refs,
+ assignment_sql.AssignmentType.GROUP_DOMAIN)
+
+ q = session.query(assignment_table)
+ q = q.filter_by(actor_id=base_data['user']['id'])
+ q = q.filter_by(target_id=base_data['domain']['id'])
+ refs = q.all()
+ self.assertEqual(len(refs), 1)
+ self.assertEqual(refs[0].role_id, base_data['roles'][8]['id'])
+ self.assertTrue(refs[0].inherited)
+ check_assignment_type(refs,
+ assignment_sql.AssignmentType.USER_DOMAIN)
+
+ q = session.query(assignment_table)
+ q = q.filter_by(actor_id=base_data['user']['id'])
+ q = q.filter_by(target_id=base_data['domain2']['id'])
+ refs = q.all()
+ self.assertEqual(len(refs), 2)
+ role_ids = [base_data['roles'][6]['id'],
+ base_data['roles'][7]['id']]
+ self.assertIn(refs[0].role_id, role_ids)
+ self.assertIn(refs[1].role_id, role_ids)
+ self.assertFalse(refs[0].inherited)
+ self.assertFalse(refs[1].inherited)
+ check_assignment_type(refs,
+ assignment_sql.AssignmentType.USER_DOMAIN)
+
+ session = self.Session()
+ self.upgrade(37)
+ self.assertTableDoesNotExist('assignment')
+ base_data = create_base_data(session)
+ populate_grants(session, base_data)
+ check_grants(session, base_data)
+ self.upgrade(40)
+ self.assertTableExists('assignment')
+ self.assertTableDoesNotExist('user_project_metadata')
+ self.assertTableDoesNotExist('group_project_metadata')
+ self.assertTableDoesNotExist('user_domain_metadata')
+ self.assertTableDoesNotExist('group_domain__metadata')
+ check_assignments(session, base_data)
+ self.downgrade(37)
+ self.assertTableDoesNotExist('assignment')
+ check_grants(session, base_data)
+
def populate_user_table(self, with_pass_enab=False,
with_pass_enab_domain=False):
# Populate the appropriate fields in the user
diff --git a/keystone/tests/test_ssl.py b/keystone/tests/test_ssl.py
index ed38e849a..bd6c678e2 100644
--- a/keystone/tests/test_ssl.py
+++ b/keystone/tests/test_ssl.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -50,7 +48,7 @@ class SSLTestCase(tests.TestCase):
'127.0.0.1', CONF.admin_port)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
# Verify Public
with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
@@ -58,7 +56,7 @@ class SSLTestCase(tests.TestCase):
'127.0.0.1', CONF.public_port)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
def test_2way_ssl_ok(self):
"""Make sure both public and admin API work with 2-way SSL.
@@ -74,7 +72,7 @@ class SSLTestCase(tests.TestCase):
'127.0.0.1', CONF.admin_port, CLIENT, CLIENT)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
# Verify Public
with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
@@ -82,7 +80,7 @@ class SSLTestCase(tests.TestCase):
'127.0.0.1', CONF.public_port, CLIENT, CLIENT)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
def test_1way_ssl_with_ipv6_ok(self):
"""Make sure both public and admin API work with 1-way ipv6 & SSL."""
@@ -96,14 +94,14 @@ class SSLTestCase(tests.TestCase):
conn = environment.httplib.HTTPSConnection('::1', CONF.admin_port)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
# Verify Public
with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
conn = environment.httplib.HTTPSConnection('::1', CONF.public_port)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
def test_2way_ssl_with_ipv6_ok(self):
"""Make sure both public and admin API work with 2-way ipv6 & SSL.
@@ -122,7 +120,7 @@ class SSLTestCase(tests.TestCase):
'::1', CONF.admin_port, CLIENT, CLIENT)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
# Verify Public
with appserver.AppServer(paste_conf, appserver.MAIN, **ssl_kwargs):
@@ -130,7 +128,7 @@ class SSLTestCase(tests.TestCase):
'::1', CONF.public_port, CLIENT, CLIENT)
conn.request('GET', '/')
resp = conn.getresponse()
- self.assertEqual(resp.status, 300)
+ self.assertEqual(300, resp.status)
def test_2way_ssl_fail(self):
"""Expect to fail when client does not present proper certificate."""
diff --git a/keystone/tests/test_token_provider.py b/keystone/tests/test_token_provider.py
index b25aa05a3..a3fafbe28 100644
--- a/keystone/tests/test_token_provider.py
+++ b/keystone/tests/test_token_provider.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -644,19 +642,22 @@ SAMPLE_V3_TOKEN_WITH_EMBEDED_VERSION = {
'token_version': 'v3.0'
}
-SAMPLE_V2_TOKEN_VALID = {
- "access": {
- "token": {
- "expires": timeutils.isotime(CURRENT_DATE + FUTURE_DELTA),
- "issued_at": "2013-05-21T00:02:43.941473Z",
- "tenant": {
- "enabled": True,
- "id": "01257",
- "name": "service"
+
+def create_v2_token():
+ return {
+ "access": {
+ "token": {
+ "expires": timeutils.isotime(CURRENT_DATE + FUTURE_DELTA),
+ "issued_at": "2013-05-21T00:02:43.941473Z",
+ "tenant": {
+ "enabled": True,
+ "id": "01257",
+ "name": "service"
+ }
}
}
}
-}
+
SAMPLE_V2_TOKEN_EXPIRED = {
"access": {
@@ -672,12 +673,15 @@ SAMPLE_V2_TOKEN_EXPIRED = {
}
}
-SAMPLE_V3_TOKEN_VALID = {
- "token": {
- "expires_at": timeutils.isotime(CURRENT_DATE + FUTURE_DELTA),
- "issued_at": "2013-05-21T00:02:43.941473Z",
+
+def create_v3_token():
+ return {
+ "token": {
+ "expires_at": timeutils.isotime(CURRENT_DATE + FUTURE_DELTA),
+ "issued_at": "2013-05-21T00:02:43.941473Z",
+ }
}
-}
+
SAMPLE_V3_TOKEN_EXPIRED = {
"token": {
@@ -815,10 +819,10 @@ class TestTokenProvider(tests.TestCase):
SAMPLE_MALFORMED_TOKEN)
self.assertEqual(
None,
- self.token_provider_api._is_valid_token(SAMPLE_V2_TOKEN_VALID))
+ self.token_provider_api._is_valid_token(create_v2_token()))
self.assertEqual(
None,
- self.token_provider_api._is_valid_token(SAMPLE_V3_TOKEN_VALID))
+ self.token_provider_api._is_valid_token(create_v3_token()))
def test_uuid_provider_no_oauth_fails_oauth(self):
self.load_fixtures(default_fixtures)
diff --git a/keystone/tests/test_url_middleware.py b/keystone/tests/test_url_middleware.py
index b27b7d501..180bd24e7 100644
--- a/keystone/tests/test_url_middleware.py
+++ b/keystone/tests/test_url_middleware.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_utils.py b/keystone/tests/test_utils.py
index eb18a9378..1426131d5 100644
--- a/keystone/tests/test_utils.py
+++ b/keystone/tests/test_utils.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -35,6 +33,7 @@ import os
import time
from keystone.common import utils
+from keystone import service
from keystone import tests
@@ -150,6 +149,16 @@ class UtilsTestCase(tests.TestCase):
_test_unixtime()
+class ServiceHelperTests(tests.TestCase):
+
+ @service.fail_gracefully
+ def _do_test(self):
+ raise Exception("Test Exc")
+
+ def test_fail_gracefully(self):
+ self.assertRaises(tests.UnexpectedExit, self._do_test)
+
+
class LimitingReaderTests(tests.TestCase):
def test_read_default_value(self):
diff --git a/keystone/tests/test_v2_controller.py b/keystone/tests/test_v2_controller.py
new file mode 100644
index 000000000..f330879b8
--- /dev/null
+++ b/keystone/tests/test_v2_controller.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import uuid
+
+from keystone.assignment import controllers
+from keystone import exception
+from keystone import tests
+from keystone.tests import default_fixtures
+
+
+_ADMIN_CONTEXT = {'is_admin': True}
+
+
+class TenantTestCase(tests.TestCase):
+ """Tests for the V2 Tenant controller.
+
+ These tests exercise :class:`keystone.assignment.controllers.Tenant`.
+
+ """
+
+ def test_get_project_users_no_user(self):
+ """get_project_users when user doesn't exist, raises UserNotFound.
+
+ When a user that's not known to `identity` has a role on a project,
+ then `get_project_users` raises
+ :class:`keystone.exception.UserNotFound`.
+
+ """
+
+ self.load_backends()
+ self.load_fixtures(default_fixtures)
+ tenant_controller = controllers.Tenant()
+ role_controller = controllers.Role()
+
+ # Assign a role to a user that doesn't exist to the `bar` project.
+
+ project_id = self.tenant_bar['id']
+
+ user_id = uuid.uuid4().hex
+ role_controller.add_role_to_user(
+ _ADMIN_CONTEXT, user_id, self.role_other['id'], project_id)
+
+ self.assertRaisesRegexp(exception.UserNotFound,
+ 'Could not find user, %s' % user_id,
+ tenant_controller.get_project_users,
+ _ADMIN_CONTEXT, project_id)
diff --git a/keystone/tests/test_v3.py b/keystone/tests/test_v3.py
index d6eafa0a0..6596b30ce 100644
--- a/keystone/tests/test_v3.py
+++ b/keystone/tests/test_v3.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -155,7 +153,6 @@ class RestfulTestCase(rest.RestfulTestCase):
self.region = self.new_region_ref()
self.region['id'] = self.region_id
self.catalog_api.create_region(
- self.region_id,
self.region.copy())
self.service_id = uuid.uuid4().hex
@@ -492,7 +489,7 @@ class RestfulTestCase(rest.RestfulTestCase):
except Exception:
msg = '%s is not a valid ISO 8601 extended format date time.' % dt
raise AssertionError(msg)
- self.assertTrue(isinstance(dt, datetime.datetime))
+ self.assertIsInstance(dt, datetime.datetime)
def assertValidTokenResponse(self, r, user=None):
self.assertTrue(r.headers.get('X-Subject-Token'))
@@ -573,7 +570,7 @@ class RestfulTestCase(rest.RestfulTestCase):
trust = token.get('OS-TRUST:trust')
self.assertIsNotNone(trust)
self.assertIsNotNone(trust.get('id'))
- self.assertTrue(isinstance(trust.get('impersonation'), bool))
+ self.assertIsInstance(trust.get('impersonation'), bool)
self.assertIsNotNone(trust.get('trustor_user'))
self.assertIsNotNone(trust.get('trustee_user'))
self.assertIsNotNone(trust['trustor_user'].get('id'))
diff --git a/keystone/tests/test_v3_auth.py b/keystone/tests/test_v3_auth.py
index 268729fe9..fbd21168b 100644
--- a/keystone/tests/test_v3_auth.py
+++ b/keystone/tests/test_v3_auth.py
@@ -2025,8 +2025,8 @@ class TestTrustAuth(TestAuthInfo):
self.identity_api.create_user(self.trustee_user_id, self.trustee_user)
def test_create_trust_400(self):
- self.skipTest('Blocked by bug 1133435')
- self.post('/OS-TRUST/trusts', body={'trust': {}}, expected_status=400)
+ # The server returns a 403 Forbidden rather than a 400, see bug 1133435
+ self.post('/OS-TRUST/trusts', body={'trust': {}}, expected_status=403)
def test_create_unscoped_trust(self):
ref = self.new_trust_ref(
diff --git a/keystone/tests/test_v3_catalog.py b/keystone/tests/test_v3_catalog.py
index 8edc45a8b..f36a3da28 100644
--- a/keystone/tests/test_v3_catalog.py
+++ b/keystone/tests/test_v3_catalog.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -20,18 +18,28 @@ from keystone.tests import test_v3
class CatalogTestCase(test_v3.RestfulTestCase):
"""Test service & endpoint CRUD."""
- def setUp(self):
- super(CatalogTestCase, self).setUp()
-
# region crud tests
+ def test_create_region_with_id(self):
+ """Call ``PUT /regions/{region_id}``."""
+ ref = dict(description="my region")
+ region_id = 'myregion'
+ r = self.put(
+ '/regions/myregion',
+ body={'region': ref}, expected_status=200)
+ self.assertValidRegionResponse(r, ref)
+ # Double-check that the region ID was kept as-is and not
+ # populated with a UUID, like is the case with POST /regions
+ entity = r.result.get("region")
+ self.assertEqual(region_id, entity['id'])
+
def test_create_region(self):
"""Call ``POST /regions``."""
ref = self.new_region_ref()
r = self.post(
'/regions',
body={'region': ref})
- return self.assertValidRegionResponse(r, ref)
+ self.assertValidRegionResponse(r, ref)
def test_list_regions(self):
"""Call ``GET /regions``."""
@@ -71,7 +79,7 @@ class CatalogTestCase(test_v3.RestfulTestCase):
r = self.post(
'/services',
body={'service': ref})
- return self.assertValidServiceResponse(r, ref)
+ self.assertValidServiceResponse(r, ref)
def test_list_services(self):
"""Call ``GET /services``."""
diff --git a/keystone/tests/test_v3_credential.py b/keystone/tests/test_v3_credential.py
index a2a5bd281..a9083b9b0 100644
--- a/keystone/tests/test_v3_credential.py
+++ b/keystone/tests/test_v3_credential.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_v3_federation.py b/keystone/tests/test_v3_federation.py
index 873fdc010..291c866a8 100644
--- a/keystone/tests/test_v3_federation.py
+++ b/keystone/tests/test_v3_federation.py
@@ -1,6 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,13 +10,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
import random
import uuid
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone import config
from keystone import contrib
+from keystone.contrib.federation import utils as mapping_utils
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common import importutils
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
@@ -42,12 +40,11 @@ class FederationTests(test_v3.RestfulTestCase):
def setup_database(self):
super(FederationTests, self).setup_database()
- package_name = "%s.%s.migrate_repo" % (contrib.__name__,
- self.EXTENSION_NAME)
+ package_name = '.'.join((contrib.__name__, self.EXTENSION_NAME))
package = importutils.import_module(package_name)
- self.repo_path = os.path.abspath(os.path.dirname(package.__file__))
- migration.db_version_control(version=None, repo_path=self.repo_path)
- migration.db_sync(version=None, repo_path=self.repo_path)
+ abs_path = migration_helpers.find_migrate_repo(package)
+ migration.db_version_control(abs_path)
+ migration.db_sync(abs_path)
class FederatedIdentityProviderTests(FederationTests):
@@ -167,7 +164,7 @@ class FederatedIdentityProviderTests(FederationTests):
return r.get('id')
ids = []
- for _ in xrange(iterations):
+ for _ in range(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
@@ -311,7 +308,7 @@ class FederatedIdentityProviderTests(FederationTests):
def test_protocol_composite_pk(self):
"""Test whether Keystone let's add two entities with identical
- names, however attached to diferent IdPs.
+ names, however attached to different IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
@@ -385,7 +382,7 @@ class FederatedIdentityProviderTests(FederationTests):
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
iterations = random.randint(0, 16)
protocol_ids = []
- for _ in xrange(iterations):
+ for _ in range(iterations):
resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
@@ -584,3 +581,135 @@ class MappingCRUDTests(FederationTests):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS})
+
+
+class MappingRuleEngineTests(FederationTests):
+ """A class for testing the mapping rule engine."""
+
+ def test_rule_engine_any_one_of_and_direct_mapping(self):
+ """Should return user's name and group id EMPLOYEE_GROUP_ID.
+
+ The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE.
+ The will test the case where `any_one_of` is valid, and there is
+ a direct mapping for the users name.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ assertion = mapping_fixtures.ADMIN_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ fn = mapping_fixtures.ADMIN_ASSERTION.get('FirstName')
+ ln = mapping_fixtures.ADMIN_ASSERTION.get('LastName')
+ full_name = '%s %s' % (fn, ln)
+
+ group_ids = values.get('group_ids')
+ name = values.get('name')
+
+ self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
+ self.assertEqual(name, full_name)
+
+ def test_rule_engine_no_regex_match(self):
+ """Should return no values, the email of the tester won't match.
+
+ This will not match since the email in the assertion will fail
+ the regex test. It is set to match any @example.com address.
+ But the incoming value is set to eviltester@example.org.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ assertion = mapping_fixtures.BAD_TESTER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ group_ids = values.get('group_ids')
+ name = values.get('name')
+
+ self.assertIsNone(name)
+ self.assertEqual(group_ids, [])
+
+ def test_rule_engine_any_one_of_many_rules(self):
+ """Should return group CONTRACTOR_GROUP_ID.
+
+ The CONTRACTOR_ASSERTION should successfully have a match in
+ MAPPING_SMALL. This will test the case where many rules
+ must be matched, including an `any_one_of`, and a direct
+ mapping.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_SMALL
+ assertion = mapping_fixtures.CONTRACTOR_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ group_ids = values.get('group_ids')
+ name = values.get('name')
+
+ self.assertIsNone(name)
+ self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids)
+
+ def test_rule_engine_not_any_of_and_direct_mapping(self):
+ """Should return user's name and email.
+
+ The CUSTOMER_ASSERTION should successfully have a match in
+ MAPPING_LARGE. This will test the case where a requirement
+ has `not_any_of`, and direct mapping to a username, no group.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ assertion = mapping_fixtures.CUSTOMER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ user_name = mapping_fixtures.CUSTOMER_ASSERTION.get('UserName')
+ group_ids = values.get('group_ids')
+ name = values.get('name')
+
+ self.assertEqual(name, user_name)
+ self.assertEqual(group_ids, [])
+
+ def test_rule_engine_not_any_of_many_rules(self):
+ """Should return group EMPLOYEE_GROUP_ID.
+
+ The EMPLOYEE_ASSERTION should successfully have a match in
+ MAPPING_SMALL. This will test the case where many remote
+ rules must be matched, including a `not_any_of`.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_SMALL
+ assertion = mapping_fixtures.EMPLOYEE_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ group_ids = values.get('group_ids')
+ name = values.get('name')
+
+ self.assertIsNone(name)
+ self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids)
+
+ def test_rule_engine_regex_match_and_many_groups(self):
+ """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID.
+
+ The TESTER_ASSERTION should successfully have a match in
+ MAPPING_LARGE. This will test a successful regex match
+ for an `any_one_of` evaluation type, and will have many
+ groups returned.
+
+ """
+
+ mapping = mapping_fixtures.MAPPING_LARGE
+ assertion = mapping_fixtures.TESTER_ASSERTION
+ rp = mapping_utils.RuleProcessor(mapping['rules'])
+ values = rp.process(assertion)
+
+ group_ids = values.get('group_ids')
+ name = values.get('name')
+
+ self.assertIsNone(name)
+ self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids)
+ self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids)
diff --git a/keystone/tests/test_v3_filters.py b/keystone/tests/test_v3_filters.py
index 216700a56..3f5b7405e 100644
--- a/keystone/tests/test_v3_filters.py
+++ b/keystone/tests/test_v3_filters.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack LLC
# Copyright 2013 IBM Corp.
#
@@ -31,6 +29,7 @@ CONF = config.CONF
class IdentityTestFilteredCase(filtering.FilterTests,
test_v3.RestfulTestCase):
"""Test filter enforcement on the v3 Identity API."""
+ content_type = 'json'
def setUp(self):
"""Setup for Identity Filter Test Cases."""
@@ -299,3 +298,141 @@ class IdentityTestFilteredCase(filtering.FilterTests,
url_by_name = "/groups"
r = self.get(url_by_name, auth=self.auth)
self.assertTrue(len(r.result.get('groups')) > 0)
+
+
+class IdentityTestListLimitCase(IdentityTestFilteredCase):
+ """Test list limiting enforcement on the v3 Identity API."""
+ content_type = 'json'
+
+ def setUp(self):
+ """Setup for Identity Limit Test Cases."""
+
+ super(IdentityTestListLimitCase, self).setUp()
+
+ self._set_policy({"identity:list_users": [],
+ "identity:list_groups": [],
+ "identity:list_projects": [],
+ "identity:list_services": [],
+ "identity:list_policies": []})
+
+ # Create 10 entries for each of the entities we are going to test
+ self.ENTITY_TYPES = ['user', 'group', 'project']
+ self.entity_lists = {}
+ for entity in self.ENTITY_TYPES:
+ self.entity_lists[entity] = self._create_test_data(entity, 10)
+ # Make sure we clean up when finished
+ self.addCleanup(self.clean_up_entity, entity)
+
+ self.service_list = []
+ self.addCleanup(self.clean_up_service)
+ for _ in range(10):
+ new_entity = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex}
+ service = self.catalog_api.create_service(new_entity['id'],
+ new_entity)
+ self.service_list.append(service)
+
+ self.policy_list = []
+ self.addCleanup(self.clean_up_policy)
+ for _ in range(10):
+ new_entity = {'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex,
+ 'blob': uuid.uuid4().hex}
+ policy = self.policy_api.create_policy(new_entity['id'],
+ new_entity)
+ self.policy_list.append(policy)
+
+ def clean_up_entity(self, entity):
+ """Clean up entity test data from Identity Limit Test Cases."""
+
+ self._delete_test_data(entity, self.entity_lists[entity])
+
+ def clean_up_service(self):
+ """Clean up service test data from Identity Limit Test Cases."""
+
+ for service in self.service_list:
+ self.catalog_api.delete_service(service['id'])
+
+ def clean_up_policy(self):
+ """Clean up policy test data from Identity Limit Test Cases."""
+
+ for policy in self.policy_list:
+ self.policy_api.delete_policy(policy['id'])
+
+ def _test_entity_list_limit(self, entity, driver):
+ """GET /<entities> (limited)
+
+ Test Plan:
+
+ - For the specified type of entity:
+ - Update policy for no protection on api
+ - Add a bunch of entities
+ - Set the global list limit to 5, and check that getting all
+ - entities only returns 5
+ - Set the driver list_limit to 4, and check that now only 4 are
+ - returned
+
+ """
+ if entity == 'policy':
+ plural = 'policies'
+ else:
+ plural = '%ss' % entity
+
+ self.opt(list_limit=5)
+ self.opt_in_group(driver, list_limit=None)
+ r = self.get('/%s' % plural, auth=self.auth)
+ self.assertEqual(len(r.result.get(plural)), 5)
+ self.assertIs(r.result.get('truncated'), True)
+
+ self.opt_in_group(driver, list_limit=4)
+ r = self.get('/%s' % plural, auth=self.auth)
+ self.assertEqual(len(r.result.get(plural)), 4)
+ self.assertIs(r.result.get('truncated'), True)
+
+ def test_users_list_limit(self):
+ self._test_entity_list_limit('user', 'identity')
+
+ def test_groups_list_limit(self):
+ self._test_entity_list_limit('group', 'identity')
+
+ def test_projects_list_limit(self):
+ self._test_entity_list_limit('project', 'assignment')
+
+ def test_services_list_limit(self):
+ self._test_entity_list_limit('service', 'catalog')
+
+ def test_non_driver_list_limit(self):
+ """Check list can be limited without driver level support.
+
+ Policy limiting is not done at the driver level (since it
+ really isn't worth doing it there). So use this as a test
+ for ensuring the controller level will successfully limit
+ in this case.
+
+ """
+ self._test_entity_list_limit('policy', 'policy')
+
+ def test_no_limit(self):
+ """Check truncated attribute not set when list not limited."""
+
+ r = self.get('/services', auth=self.auth)
+ self.assertEqual(len(r.result.get('services')), 10)
+ self.assertIsNone(r.result.get('truncated'))
+
+ def test_at_limit(self):
+ """Check truncated attribute not set when list at max size."""
+
+ # Test this by overriding the general limit with a higher
+ # driver-specific limit (allowing all entities to be returned
+ # in the collection), which should result in a non truncated list
+ self.opt(list_limit=5)
+ self.opt_in_group('catalog', list_limit=10)
+ r = self.get('/services', auth=self.auth)
+ self.assertEqual(len(r.result.get('services')), 10)
+ self.assertIsNone(r.result.get('truncated'))
+
+
+class IdentityTestFilteredCaseXML(IdentityTestFilteredCase):
+ content_type = 'xml'
+
+
+class IdentityTestListLimitCaseXML(IdentityTestListLimitCase):
+ content_type = 'xml'
diff --git a/keystone/tests/test_v3_identity.py b/keystone/tests/test_v3_identity.py
index a9046bd1f..2fd761fc1 100644
--- a/keystone/tests/test_v3_identity.py
+++ b/keystone/tests/test_v3_identity.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -739,6 +737,25 @@ class IdentityTestCase(test_v3.RestfulTestCase):
#self.assertValidRoleListResponse(r, expected_length=0)
#self.assertIn(collection_url, r.result['links']['self'])
+ def test_crud_user_project_role_grants_no_user(self):
+ """Grant role on a project to a user that doesn't exist, 404 result.
+
+ When grant a role on a project to a user that doesn't exist, the server
+ returns 404 Not Found for the user.
+
+ """
+
+ user_id = uuid.uuid4().hex
+
+ collection_url = (
+ '/projects/%(project_id)s/users/%(user_id)s/roles' % {
+ 'project_id': self.project['id'], 'user_id': user_id})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ self.put(member_url, expected_status=404)
+
def test_crud_user_domain_role_grants(self):
collection_url = (
'/domains/%(domain_id)s/users/%(user_id)s/roles' % {
@@ -759,6 +776,25 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.assertValidRoleListResponse(r, expected_length=0)
self.assertIn(collection_url, r.result['links']['self'])
+ def test_crud_user_domain_role_grants_no_user(self):
+ """Grant role on a domain to a user that doesn't exist, 404 result.
+
+ When grant a role on a domain to a user that doesn't exist, the server
+ returns 404 Not Found for the user.
+
+ """
+
+ user_id = uuid.uuid4().hex
+
+ collection_url = (
+ '/domains/%(domain_id)s/users/%(user_id)s/roles' % {
+ 'domain_id': self.domain_id, 'user_id': user_id})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ self.put(member_url, expected_status=404)
+
def test_crud_group_project_role_grants(self):
collection_url = (
'/projects/%(project_id)s/groups/%(group_id)s/roles' % {
@@ -779,6 +815,26 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.assertValidRoleListResponse(r, expected_length=0)
self.assertIn(collection_url, r.result['links']['self'])
+ def test_crud_group_project_role_grants_no_group(self):
+ """Grant role on a project to a group that doesn't exist, 404 result.
+
+ When grant a role on a project to a group that doesn't exist, the
+ server returns 404 Not Found for the group.
+
+ """
+
+ group_id = uuid.uuid4().hex
+
+ collection_url = (
+ '/projects/%(project_id)s/groups/%(group_id)s/roles' % {
+ 'project_id': self.project_id,
+ 'group_id': group_id})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ self.put(member_url, expected_status=404)
+
def test_crud_group_domain_role_grants(self):
collection_url = (
'/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
@@ -799,6 +855,26 @@ class IdentityTestCase(test_v3.RestfulTestCase):
self.assertValidRoleListResponse(r, expected_length=0)
self.assertIn(collection_url, r.result['links']['self'])
+ def test_crud_group_domain_role_grants_no_group(self):
+ """Grant role on a domain to a group that doesn't exist, 404 result.
+
+ When grant a role on a domain to a group that doesn't exist, the server
+ returns 404 Not Found for the group.
+
+ """
+
+ group_id = uuid.uuid4().hex
+
+ collection_url = (
+ '/domains/%(domain_id)s/groups/%(group_id)s/roles' % {
+ 'domain_id': self.domain_id,
+ 'group_id': group_id})
+ member_url = '%(collection_url)s/%(role_id)s' % {
+ 'collection_url': collection_url,
+ 'role_id': self.role_id}
+
+ self.put(member_url, expected_status=404)
+
def test_get_role_assignments(self):
"""Call ``GET /role_assignments``.
@@ -1154,8 +1230,8 @@ class IdentityTestCase(test_v3.RestfulTestCase):
collection_url = (
'/role_assignments?user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
- 'user_id': self.user1['id'],
- 'project_id': self.project1['id']})
+ 'user_id': self.user1['id'],
+ 'project_id': self.project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r)
self.assertEqual(len(r.result.get('role_assignments')), 1)
@@ -1198,8 +1274,8 @@ class IdentityTestCase(test_v3.RestfulTestCase):
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
- 'user_id': self.user1['id'],
- 'project_id': self.project1['id']})
+ 'user_id': self.user1['id'],
+ 'project_id': self.project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r)
self.assertEqual(len(r.result.get('role_assignments')), 2)
@@ -1314,8 +1390,8 @@ class IdentityInheritanceTestCase(test_v3.RestfulTestCase):
collection_url = (
'/role_assignments?user.id=%(user_id)s'
'&scope.domain.id=%(domain_id)s' % {
- 'user_id': user1['id'],
- 'domain_id': domain['id']})
+ 'user_id': user1['id'],
+ 'domain_id': domain['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r)
self.assertEqual(len(r.result.get('role_assignments')), 1)
@@ -1330,8 +1406,8 @@ class IdentityInheritanceTestCase(test_v3.RestfulTestCase):
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
- 'user_id': user1['id'],
- 'project_id': project1['id']})
+ 'user_id': user1['id'],
+ 'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r)
self.assertEqual(len(r.result.get('role_assignments')), 3)
@@ -1408,8 +1484,8 @@ class IdentityInheritanceTestCase(test_v3.RestfulTestCase):
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
- 'user_id': user1['id'],
- 'project_id': project1['id']})
+ 'user_id': user1['id'],
+ 'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r)
self.assertEqual(len(r.result.get('role_assignments')), 3)
@@ -1512,8 +1588,8 @@ class IdentityInheritanceTestCase(test_v3.RestfulTestCase):
collection_url = (
'/role_assignments?group.id=%(group_id)s'
'&scope.domain.id=%(domain_id)s' % {
- 'group_id': group1['id'],
- 'domain_id': domain['id']})
+ 'group_id': group1['id'],
+ 'domain_id': domain['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r)
self.assertEqual(len(r.result.get('role_assignments')), 1)
@@ -1528,8 +1604,8 @@ class IdentityInheritanceTestCase(test_v3.RestfulTestCase):
collection_url = (
'/role_assignments?effective&user.id=%(user_id)s'
'&scope.project.id=%(project_id)s' % {
- 'user_id': user1['id'],
- 'project_id': project1['id']})
+ 'user_id': user1['id'],
+ 'project_id': project1['id']})
r = self.get(collection_url)
self.assertValidRoleAssignmentListResponse(r)
self.assertEqual(len(r.result.get('role_assignments')), 3)
@@ -1706,22 +1782,22 @@ class TestV3toV2Methods(tests.TestCase):
def test_v3_to_v2_user_method(self):
- updated_user1 = self.identity_api.v3_to_v2_user(self.user1)
+ updated_user1 = controller.V2Controller.v3_to_v2_user(self.user1)
self.assertIs(self.user1, updated_user1)
self.assertDictEqual(self.user1, self.expected_user)
- updated_user2 = self.identity_api.v3_to_v2_user(self.user2)
+ updated_user2 = controller.V2Controller.v3_to_v2_user(self.user2)
self.assertIs(self.user2, updated_user2)
self.assertDictEqual(self.user2, self.expected_user_no_tenant_id)
- updated_user3 = self.identity_api.v3_to_v2_user(self.user3)
+ updated_user3 = controller.V2Controller.v3_to_v2_user(self.user3)
self.assertIs(self.user3, updated_user3)
self.assertDictEqual(self.user3, self.expected_user)
- updated_user4 = self.identity_api.v3_to_v2_user(self.user4)
+ updated_user4 = controller.V2Controller.v3_to_v2_user(self.user4)
self.assertIs(self.user4, updated_user4)
self.assertDictEqual(self.user4, self.expected_user_no_tenant_id)
def test_v3_to_v2_user_method_list(self):
user_list = [self.user1, self.user2, self.user3, self.user4]
- updated_list = self.identity_api.v3_to_v2_user(user_list)
+ updated_list = controller.V2Controller.v3_to_v2_user(user_list)
self.assertEqual(len(updated_list), len(user_list))
@@ -1765,10 +1841,10 @@ class TestV3toV2Methods(tests.TestCase):
self.assertDictEqual(ref, expected_ref)
-class UserChangingPasswordsTestCase(test_v3.RestfulTestCase):
+class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
def setUp(self):
- super(UserChangingPasswordsTestCase, self).setUp()
+ super(UserSelfServiceChangingPasswordsTestCase, self).setUp()
self.user_ref = self.new_user_ref(domain_id=self.domain['id'])
self.identity_api.create_user(self.user_ref['id'], self.user_ref)
self.token = self.get_request_token(self.user_ref['password'], 201)
diff --git a/keystone/tests/test_v3_oauth1.py b/keystone/tests/test_v3_oauth1.py
index 09664c480..7a01c2a11 100644
--- a/keystone/tests/test_v3_oauth1.py
+++ b/keystone/tests/test_v3_oauth1.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,17 +13,17 @@
# under the License.
import copy
-import os
import uuid
from six.moves import urllib
-from keystone.common.sql import migration
+from keystone.common.sql import migration_helpers
from keystone import config
from keystone import contrib
from keystone.contrib import oauth1
from keystone.contrib.oauth1 import controllers
from keystone import exception
+from keystone.openstack.common.db.sqlalchemy import migration
from keystone.openstack.common import importutils
from keystone.tests import test_v3
@@ -40,12 +38,11 @@ class OAuth1Tests(test_v3.RestfulTestCase):
def setup_database(self):
super(OAuth1Tests, self).setup_database()
- package_name = "%s.%s.migrate_repo" % (contrib.__name__,
- self.EXTENSION_NAME)
+ package_name = '.'.join((contrib.__name__, self.EXTENSION_NAME))
package = importutils.import_module(package_name)
- self.repo_path = os.path.abspath(os.path.dirname(package.__file__))
- migration.db_version_control(version=None, repo_path=self.repo_path)
- migration.db_sync(version=None, repo_path=self.repo_path)
+ abs_path = migration_helpers.find_migrate_repo(package)
+ migration.db_version_control(abs_path)
+ migration.db_sync(abs_path)
def setUp(self):
super(OAuth1Tests, self).setUp()
@@ -59,7 +56,7 @@ class OAuth1Tests(test_v3.RestfulTestCase):
resp = self.post(
'/OS-OAUTH1/consumers',
body={'consumer': ref})
- return resp.result.get('consumer')
+ return resp.result['consumer']
def _create_request_token(self, consumer, project_id):
endpoint = '/OS-OAUTH1/request_token'
@@ -105,19 +102,23 @@ class OAuth1Tests(test_v3.RestfulTestCase):
class ConsumerCRUDTests(OAuth1Tests):
- def _consumer_create(self, description=None, description_flag=True):
+ def _consumer_create(self, description=None, description_flag=True,
+ **kwargs):
if description_flag:
ref = {'description': description}
else:
ref = {}
+ if kwargs:
+ ref.update(kwargs)
resp = self.post(
'/OS-OAUTH1/consumers',
body={'consumer': ref})
- consumer = resp.result.get('consumer')
- consumer_id = consumer.get('id')
+ consumer = resp.result['consumer']
+ consumer_id = consumer['id']
self.assertEqual(consumer['description'], description)
self.assertIsNotNone(consumer_id)
- self.assertIsNotNone(consumer.get('secret'))
+ self.assertIsNotNone(consumer['secret'])
+ return consumer
def test_consumer_create(self):
description = uuid.uuid4().hex
@@ -129,72 +130,105 @@ class ConsumerCRUDTests(OAuth1Tests):
def test_consumer_create_none_desc_2(self):
self._consumer_create(description_flag=False)
+ def test_consumer_create_normalize_field(self):
+ # If create a consumer with a field with : or - in the name,
+ # the name is normalized by converting those chars to _.
+ field_name = 'some:weird-field'
+ field_value = uuid.uuid4().hex
+ extra_fields = {field_name: field_value}
+ consumer = self._consumer_create(**extra_fields)
+ normalized_field_name = 'some_weird_field'
+ self.assertEqual(field_value, consumer[normalized_field_name])
+
def test_consumer_delete(self):
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- resp = self.delete('/OS-OAUTH1/consumers/%(consumer_id)s'
- % {'consumer_id': consumer_id})
+ consumer_id = consumer['id']
+ resp = self.delete('/OS-OAUTH1/consumers/%s' % consumer_id)
self.assertResponseStatus(resp, 204)
def test_consumer_get(self):
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- resp = self.get('/OS-OAUTH1/consumers/%(consumer_id)s'
- % {'consumer_id': consumer_id})
- self.assertEqual(resp.result.get('consumer').get('id'), consumer_id)
+ consumer_id = consumer['id']
+ resp = self.get('/OS-OAUTH1/consumers/%s' % consumer_id)
+ self.assertEqual(resp.result['consumer']['id'], consumer_id)
def test_consumer_list(self):
resp = self.get('/OS-OAUTH1/consumers')
- entities = resp.result.get('consumers')
+ entities = resp.result['consumers']
self.assertIsNotNone(entities)
- self.assertValidListLinks(resp.result.get('links'))
+ self.assertValidListLinks(resp.result['links'])
def test_consumer_update(self):
consumer = self._create_single_consumer()
- original_id = consumer.get('id')
- original_description = consumer.get('description')
- update_description = original_description + "_new"
+ original_id = consumer['id']
+ original_description = consumer['description']
+ update_description = original_description + '_new'
update_ref = {'description': update_description}
- update_resp = self.patch('/OS-OAUTH1/consumers/%(consumer_id)s'
- % {'consumer_id': original_id},
+ update_resp = self.patch('/OS-OAUTH1/consumers/%s' % original_id,
body={'consumer': update_ref})
- consumer = update_resp.result.get('consumer')
- self.assertEqual(consumer.get('description'), update_description)
- self.assertEqual(consumer.get('id'), original_id)
+ consumer = update_resp.result['consumer']
+ self.assertEqual(consumer['description'], update_description)
+ self.assertEqual(consumer['id'], original_id)
def test_consumer_update_bad_secret(self):
consumer = self._create_single_consumer()
- original_id = consumer.get('id')
+ original_id = consumer['id']
update_ref = copy.deepcopy(consumer)
update_ref['description'] = uuid.uuid4().hex
update_ref['secret'] = uuid.uuid4().hex
- self.patch('/OS-OAUTH1/consumers/%(consumer_id)s'
- % {'consumer_id': original_id},
+ self.patch('/OS-OAUTH1/consumers/%s' % original_id,
body={'consumer': update_ref},
expected_status=400)
def test_consumer_update_bad_id(self):
consumer = self._create_single_consumer()
- original_id = consumer.get('id')
- original_description = consumer.get('description')
+ original_id = consumer['id']
+ original_description = consumer['description']
update_description = original_description + "_new"
update_ref = copy.deepcopy(consumer)
update_ref['description'] = update_description
update_ref['id'] = update_description
- self.patch('/OS-OAUTH1/consumers/%(consumer_id)s'
- % {'consumer_id': original_id},
+ self.patch('/OS-OAUTH1/consumers/%s' % original_id,
body={'consumer': update_ref},
expected_status=400)
+ def test_consumer_update_normalize_field(self):
+ # If update a consumer with a field with : or - in the name,
+ # the name is normalized by converting those chars to _.
+ field1_name = 'some:weird-field'
+ field1_orig_value = uuid.uuid4().hex
+
+ extra_fields = {field1_name: field1_orig_value}
+ consumer = self._consumer_create(**extra_fields)
+ consumer_id = consumer['id']
+
+ field1_new_value = uuid.uuid4().hex
+
+ field2_name = 'weird:some-field'
+ field2_value = uuid.uuid4().hex
+
+ update_ref = {field1_name: field1_new_value,
+ field2_name: field2_value}
+
+ update_resp = self.patch('/OS-OAUTH1/consumers/%s' % consumer_id,
+ body={'consumer': update_ref})
+ consumer = update_resp.result['consumer']
+
+ normalized_field1_name = 'some_weird_field'
+ self.assertEqual(field1_new_value, consumer[normalized_field1_name])
+
+ normalized_field2_name = 'weird_some_field'
+ self.assertEqual(field2_value, consumer[normalized_field2_name])
+
def test_consumer_create_no_description(self):
resp = self.post('/OS-OAUTH1/consumers', body={'consumer': {}})
- consumer = resp.result.get('consumer')
- consumer_id = consumer.get('id')
- self.assertEqual(consumer.get('description'), None)
+ consumer = resp.result['consumer']
+ consumer_id = consumer['id']
+ self.assertIsNone(consumer['description'])
self.assertIsNotNone(consumer_id)
- self.assertIsNotNone(consumer.get('secret'))
+ self.assertIsNotNone(consumer['secret'])
def test_consumer_get_bad_id(self):
self.get('/OS-OAUTH1/consumers/%(consumer_id)s'
@@ -206,8 +240,8 @@ class OAuthFlowTests(OAuth1Tests):
def test_oauth_flow(self):
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- consumer_secret = consumer.get('secret')
+ consumer_id = consumer['id']
+ consumer_secret = consumer['secret']
self.consumer = {'key': consumer_id, 'secret': consumer_secret}
self.assertIsNotNone(self.consumer['secret'])
@@ -215,8 +249,8 @@ class OAuthFlowTests(OAuth1Tests):
self.project_id)
content = self.post(url, headers=headers)
credentials = urllib.parse.parse_qs(content.result)
- request_key = credentials.get('oauth_token')[0]
- request_secret = credentials.get('oauth_token_secret')[0]
+ request_key = credentials['oauth_token'][0]
+ request_secret = credentials['oauth_token_secret'][0]
self.request_token = oauth1.Token(request_key, request_secret)
self.assertIsNotNone(self.request_token.key)
@@ -230,16 +264,16 @@ class OAuthFlowTests(OAuth1Tests):
self.request_token)
content = self.post(url, headers=headers)
credentials = urllib.parse.parse_qs(content.result)
- access_key = credentials.get('oauth_token')[0]
- access_secret = credentials.get('oauth_token_secret')[0]
+ access_key = credentials['oauth_token'][0]
+ access_secret = credentials['oauth_token_secret'][0]
self.access_token = oauth1.Token(access_key, access_secret)
self.assertIsNotNone(self.access_token.key)
url, headers, body = self._get_oauth_token(self.consumer,
self.access_token)
content = self.post(url, headers=headers, body=body)
- self.keystone_token_id = content.headers.get('X-Subject-Token')
- self.keystone_token = content.result.get('token')
+ self.keystone_token_id = content.headers['X-Subject-Token']
+ self.keystone_token = content.result['token']
self.assertIsNotNone(self.keystone_token_id)
@@ -253,16 +287,16 @@ class AccessTokenCRUDTests(OAuthFlowTests):
def test_list_no_access_tokens(self):
resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
% {'user_id': self.user_id})
- entities = resp.result.get('access_tokens')
- self.assertTrue(len(entities) == 0)
- self.assertValidListLinks(resp.result.get('links'))
+ entities = resp.result['access_tokens']
+ self.assertEqual([], entities)
+ self.assertValidListLinks(resp.result['links'])
def test_get_single_access_token(self):
self.test_oauth_flow()
resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(key)s'
% {'user_id': self.user_id,
'key': self.access_token.key})
- entity = resp.result.get('access_token')
+ entity = resp.result['access_token']
self.assertEqual(entity['id'], self.access_token.key)
self.assertEqual(entity['consumer_id'], self.consumer['key'])
@@ -277,9 +311,9 @@ class AccessTokenCRUDTests(OAuthFlowTests):
resp = self.get('/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles'
% {'id': self.user_id,
'key': self.access_token.key})
- entities = resp.result.get('roles')
- self.assertTrue(len(entities) > 0)
- self.assertValidListLinks(resp.result.get('links'))
+ entities = resp.result['roles']
+ self.assertTrue(entities)
+ self.assertValidListLinks(resp.result['links'])
def test_get_role_in_access_token(self):
self.test_oauth_flow()
@@ -287,7 +321,7 @@ class AccessTokenCRUDTests(OAuthFlowTests):
% {'id': self.user_id, 'key': self.access_token.key,
'role': self.role_id})
resp = self.get(url)
- entity = resp.result.get('role')
+ entity = resp.result['role']
self.assertEqual(entity['id'], self.role_id)
def test_get_role_in_access_token_dne(self):
@@ -302,9 +336,9 @@ class AccessTokenCRUDTests(OAuthFlowTests):
# List access_tokens should be > 0
resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
% {'user_id': self.user_id})
- entities = resp.result.get('access_tokens')
- self.assertTrue(len(entities) > 0)
- self.assertValidListLinks(resp.result.get('links'))
+ entities = resp.result['access_tokens']
+ self.assertTrue(entities)
+ self.assertValidListLinks(resp.result['links'])
# Delete access_token
resp = self.delete('/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s'
@@ -315,9 +349,9 @@ class AccessTokenCRUDTests(OAuthFlowTests):
# List access_token should be 0
resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
% {'user_id': self.user_id})
- entities = resp.result.get('access_tokens')
- self.assertTrue(len(entities) == 0)
- self.assertValidListLinks(resp.result.get('links'))
+ entities = resp.result['access_tokens']
+ self.assertEqual([], entities)
+ self.assertValidListLinks(resp.result['links'])
class AuthTokenTests(OAuthFlowTests):
@@ -373,8 +407,8 @@ class AuthTokenTests(OAuthFlowTests):
# List access_token should be 0
resp = self.get('/users/%(user_id)s/OS-OAUTH1/access_tokens'
% {'user_id': self.user_id})
- entities = resp.result.get('access_tokens')
- self.assertEqual(len(entities), 0)
+ entities = resp.result['access_tokens']
+ self.assertEqual([], entities)
# Check Keystone Token no longer exists
headers = {'X-Subject-Token': self.keystone_token_id,
@@ -446,15 +480,15 @@ class MaliciousOAuth1Tests(OAuth1Tests):
def test_bad_consumer_secret(self):
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
+ consumer_id = consumer['id']
consumer = {'key': consumer_id, 'secret': uuid.uuid4().hex}
url, headers = self._create_request_token(consumer, self.project_id)
self.post(url, headers=headers, expected_status=401)
def test_bad_request_token_key(self):
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- consumer_secret = consumer.get('secret')
+ consumer_id = consumer['id']
+ consumer_secret = consumer['secret']
consumer = {'key': consumer_id, 'secret': consumer_secret}
url, headers = self._create_request_token(consumer, self.project_id)
self.post(url, headers=headers)
@@ -464,15 +498,15 @@ class MaliciousOAuth1Tests(OAuth1Tests):
def test_bad_verifier(self):
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- consumer_secret = consumer.get('secret')
+ consumer_id = consumer['id']
+ consumer_secret = consumer['secret']
consumer = {'key': consumer_id, 'secret': consumer_secret}
url, headers = self._create_request_token(consumer, self.project_id)
content = self.post(url, headers=headers)
credentials = urllib.parse.parse_qs(content.result)
- request_key = credentials.get('oauth_token')[0]
- request_secret = credentials.get('oauth_token_secret')[0]
+ request_key = credentials['oauth_token'][0]
+ request_secret = credentials['oauth_token_secret'][0]
request_token = oauth1.Token(request_key, request_secret)
url = self._authorize_request_token(request_key)
@@ -487,14 +521,14 @@ class MaliciousOAuth1Tests(OAuth1Tests):
def test_bad_authorizing_roles(self):
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- consumer_secret = consumer.get('secret')
+ consumer_id = consumer['id']
+ consumer_secret = consumer['secret']
consumer = {'key': consumer_id, 'secret': consumer_secret}
url, headers = self._create_request_token(consumer, self.project_id)
content = self.post(url, headers=headers)
credentials = urllib.parse.parse_qs(content.result)
- request_key = credentials.get('oauth_token')[0]
+ request_key = credentials['oauth_token'][0]
self.assignment_api.remove_role_from_user_and_project(
self.user_id, self.project_id, self.role_id)
@@ -507,8 +541,8 @@ class MaliciousOAuth1Tests(OAuth1Tests):
CONF.oauth1.request_token_duration = -1
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- consumer_secret = consumer.get('secret')
+ consumer_id = consumer['id']
+ consumer_secret = consumer['secret']
self.consumer = {'key': consumer_id, 'secret': consumer_secret}
self.assertIsNotNone(self.consumer['key'])
@@ -516,8 +550,8 @@ class MaliciousOAuth1Tests(OAuth1Tests):
self.project_id)
content = self.post(url, headers=headers)
credentials = urllib.parse.parse_qs(content.result)
- request_key = credentials.get('oauth_token')[0]
- request_secret = credentials.get('oauth_token_secret')[0]
+ request_key = credentials['oauth_token'][0]
+ request_secret = credentials['oauth_token_secret'][0]
self.request_token = oauth1.Token(request_key, request_secret)
self.assertIsNotNone(self.request_token.key)
@@ -528,8 +562,8 @@ class MaliciousOAuth1Tests(OAuth1Tests):
def test_expired_creating_keystone_token(self):
CONF.oauth1.access_token_duration = -1
consumer = self._create_single_consumer()
- consumer_id = consumer.get('id')
- consumer_secret = consumer.get('secret')
+ consumer_id = consumer['id']
+ consumer_secret = consumer['secret']
self.consumer = {'key': consumer_id, 'secret': consumer_secret}
self.assertIsNotNone(self.consumer['key'])
@@ -537,8 +571,8 @@ class MaliciousOAuth1Tests(OAuth1Tests):
self.project_id)
content = self.post(url, headers=headers)
credentials = urllib.parse.parse_qs(content.result)
- request_key = credentials.get('oauth_token')[0]
- request_secret = credentials.get('oauth_token_secret')[0]
+ request_key = credentials['oauth_token'][0]
+ request_secret = credentials['oauth_token_secret'][0]
self.request_token = oauth1.Token(request_key, request_secret)
self.assertIsNotNone(self.request_token.key)
@@ -552,8 +586,8 @@ class MaliciousOAuth1Tests(OAuth1Tests):
self.request_token)
content = self.post(url, headers=headers)
credentials = urllib.parse.parse_qs(content.result)
- access_key = credentials.get('oauth_token')[0]
- access_secret = credentials.get('oauth_token_secret')[0]
+ access_key = credentials['oauth_token'][0]
+ access_secret = credentials['oauth_token_secret'][0]
self.access_token = oauth1.Token(access_key, access_secret)
self.assertIsNotNone(self.access_token.key)
diff --git a/keystone/tests/test_v3_policy.py b/keystone/tests/test_v3_policy.py
index b49f4b38c..e8eecaf39 100644
--- a/keystone/tests/test_v3_policy.py
+++ b/keystone/tests/test_v3_policy.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/tests/test_v3_protection.py b/keystone/tests/test_v3_protection.py
index 864cffccf..4d2ec535a 100644
--- a/keystone/tests/test_v3_protection.py
+++ b/keystone/tests/test_v3_protection.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
diff --git a/keystone/tests/test_versions.py b/keystone/tests/test_versions.py
index 2bbeaca5a..1e8ed4f57 100644
--- a/keystone/tests/test_versions.py
+++ b/keystone/tests/test_versions.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/keystone/tests/test_wsgi.py b/keystone/tests/test_wsgi.py
index 73868b586..97b4e8c63 100644
--- a/keystone/tests/test_wsgi.py
+++ b/keystone/tests/test_wsgi.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,7 +14,11 @@
from babel import localedata
import gettext
+import mock
+import socket
+import webob
+from keystone.common import environment
from keystone.common import wsgi
from keystone import exception
from keystone.openstack.common.fixture import moxstubout
@@ -36,7 +38,7 @@ class BaseWSGITest(tests.TestCase):
super(BaseWSGITest, self).setUp()
def _make_request(self, url='/'):
- req = wsgi.Request.blank(url)
+ req = webob.Request.blank(url)
args = {'action': 'index', 'controller': None}
req.environ['wsgiorg.routing_args'] = [None, args]
return req
@@ -217,8 +219,8 @@ class LocalizedResponseTest(tests.TestCase):
def test_request_match_default(self):
# The default language if no Accept-Language is provided is None
- req = wsgi.Request.blank('/')
- self.assertIsNone(req.best_match_language())
+ req = webob.Request.blank('/')
+ self.assertIsNone(wsgi.best_match_language(req))
def test_request_match_language_expected(self):
# If Accept-Language is a supported language, best_match_language()
@@ -226,8 +228,8 @@ class LocalizedResponseTest(tests.TestCase):
self._set_expected_languages(all_locales=['it'])
- req = wsgi.Request.blank('/', headers={'Accept-Language': 'it'})
- self.assertEqual(req.best_match_language(), 'it')
+ req = webob.Request.blank('/', headers={'Accept-Language': 'it'})
+ self.assertEqual(wsgi.best_match_language(req), 'it')
def test_request_match_language_unexpected(self):
# If Accept-Language is a language we do not support,
@@ -235,8 +237,8 @@ class LocalizedResponseTest(tests.TestCase):
self._set_expected_languages(all_locales=['it'])
- req = wsgi.Request.blank('/', headers={'Accept-Language': 'zh'})
- self.assertIsNone(req.best_match_language())
+ req = webob.Request.blank('/', headers={'Accept-Language': 'zh'})
+ self.assertIsNone(wsgi.best_match_language(req))
def test_static_translated_string_is_Message(self):
# Statically created message strings are Message objects so that they
@@ -249,3 +251,62 @@ class LocalizedResponseTest(tests.TestCase):
# are lazy-translated.
self.assertIsInstance(_('The resource could not be found.'),
gettextutils.Message)
+
+
+class ServerTest(tests.TestCase):
+
+ def setUp(self):
+ super(ServerTest, self).setUp()
+ environment.use_eventlet()
+ self.host = '127.0.0.1'
+ self.port = '1234'
+
+ @mock.patch('eventlet.listen')
+ @mock.patch('socket.getaddrinfo')
+ def test_keepalive_unset(self, mock_getaddrinfo, mock_listen):
+ mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
+ mock_sock = mock.Mock()
+ mock_sock.setsockopt = mock.Mock()
+
+ mock_listen.return_value = mock_sock
+ server = environment.Server(mock.MagicMock(), host=self.host,
+ port=self.port)
+ server.start()
+ self.assertTrue(mock_listen.called)
+ self.assertFalse(mock_sock.setsockopt.called)
+
+ @mock.patch('eventlet.listen')
+ @mock.patch('socket.getaddrinfo')
+ def test_keepalive_set(self, mock_getaddrinfo, mock_listen):
+ mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
+ mock_sock = mock.Mock()
+ mock_sock.setsockopt = mock.Mock()
+
+ mock_listen.return_value = mock_sock
+ server = environment.Server(mock.MagicMock(), host=self.host,
+ port=self.port, keepalive=True)
+ server.start()
+ mock_sock.setsockopt.assert_called_once_with(socket.SOL_SOCKET,
+ socket.SO_KEEPALIVE,
+ 1)
+ self.assertTrue(mock_listen.called)
+
+ @mock.patch('eventlet.listen')
+ @mock.patch('socket.getaddrinfo')
+ def test_keepalive_and_keepidle_set(self, mock_getaddrinfo, mock_listen):
+ mock_getaddrinfo.return_value = [(1, 2, 3, 4, 5)]
+ mock_sock = mock.Mock()
+ mock_sock.setsockopt = mock.Mock()
+
+ mock_listen.return_value = mock_sock
+ server = environment.Server(mock.MagicMock(), host=self.host,
+ port=self.port, keepalive=True,
+ keepidle=1)
+ server.start()
+ self.assertEqual(mock_sock.setsockopt.call_count, 2)
+ # Test the last set of call args i.e. for the keepidle
+ mock_sock.setsockopt.assert_called_with(socket.IPPROTO_TCP,
+ socket.TCP_KEEPIDLE,
+ 1)
+
+ self.assertTrue(mock_listen.called)
diff --git a/keystone/tests/unit/__init__.py b/keystone/tests/unit/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/tests/unit/__init__.py
diff --git a/keystone/tests/unit/common/__init__.py b/keystone/tests/unit/common/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/tests/unit/common/__init__.py
diff --git a/keystone/tests/unit/common/test_sql_core.py b/keystone/tests/unit/common/test_sql_core.py
new file mode 100644
index 000000000..871c73aed
--- /dev/null
+++ b/keystone/tests/unit/common/test_sql_core.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy.ext import declarative
+import testtools
+
+from keystone.common import sql
+from keystone.tests import utils
+
+
+ModelBase = declarative.declarative_base()
+
+
+class TestModel(ModelBase, sql.ModelDictMixin):
+ __tablename__ = 'testmodel'
+ id = sql.Column(sql.String(64), primary_key=True)
+ text = sql.Column(sql.String(64), nullable=False)
+
+
+class TestModelDictMixin(testtools.TestCase):
+
+ def test_creating_a_model_instance_from_a_dict(self):
+ d = {'id': utils.new_uuid(), 'text': utils.new_uuid()}
+ m = TestModel.from_dict(d)
+ self.assertEqual(m.id, d['id'])
+ self.assertEqual(m.text, d['text'])
+
+ def test_creating_a_dict_from_a_model_instance(self):
+ m = TestModel(id=utils.new_uuid(), text=utils.new_uuid())
+ d = m.to_dict()
+ self.assertEqual(m.id, d['id'])
+ self.assertEqual(m.text, d['text'])
+
+ def test_creating_a_model_instance_from_an_invalid_dict(self):
+ d = {'id': utils.new_uuid(), 'text': utils.new_uuid(), 'extra': None}
+ self.assertRaises(TypeError, TestModel.from_dict, d)
+
+ def test_creating_a_dict_from_a_model_instance_that_has_extra_attrs(self):
+ expected = {'id': utils.new_uuid(), 'text': utils.new_uuid()}
+ m = TestModel(id=expected['id'], text=expected['text'])
+ m.extra = 'this should not be in the dictionary'
+ self.assertEqual(m.to_dict(), expected)
diff --git a/keystone/tests/unit/identity/__init__.py b/keystone/tests/unit/identity/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/keystone/tests/unit/identity/__init__.py
diff --git a/keystone/tests/unit/identity/test_core.py b/keystone/tests/unit/identity/test_core.py
new file mode 100644
index 000000000..2c1bf1193
--- /dev/null
+++ b/keystone/tests/unit/identity/test_core.py
@@ -0,0 +1,61 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for core identity behavior."""
+
+import os
+import uuid
+
+import mock
+import testtools
+
+from keystone import config
+from keystone import exception
+from keystone import identity
+from keystone import tests
+
+
+CONF = config.CONF
+
+
+class TestDomainConfigs(testtools.TestCase):
+
+ def setUp(self):
+ super(TestDomainConfigs, self).setUp()
+ self.addCleanup(CONF.reset)
+
+ self.tmp_dir = tests.dirs.tmp()
+ CONF.set_override('domain_config_dir', self.tmp_dir, 'identity')
+
+ def test_config_for_nonexistent_domain(self):
+ """Having a config for a non-existent domain will be ignored.
+
+ There are no assertions in this test because there are no side
+ effects. If there is a config file for a domain that does not
+ exist it should be ignored.
+
+ """
+ domain_id = uuid.uuid4().hex
+ domain_config_filename = os.path.join(self.tmp_dir,
+ 'keystone.%s.conf' % domain_id)
+ self.addCleanup(lambda: os.remove(domain_config_filename))
+ with open(domain_config_filename, 'w'):
+ """Write an empty config file."""
+
+ e = exception.DomainNotFound(domain_id=domain_id)
+ mock_assignment_api = mock.Mock()
+ mock_assignment_api.get_domain_by_name.side_effect = e
+
+ domain_config = identity.DomainConfigs()
+ fake_standard_driver = None
+ domain_config.setup_domain_drivers(fake_standard_driver,
+ mock_assignment_api)
diff --git a/keystone/tests/utils.py b/keystone/tests/utils.py
new file mode 100644
index 000000000..b082faa46
--- /dev/null
+++ b/keystone/tests/utils.py
@@ -0,0 +1,20 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Useful utilities for tests."""
+
+import uuid
+
+
+def new_uuid():
+ """Return a string UUID."""
+ return uuid.uuid4().hex
diff --git a/keystone/token/__init__.py b/keystone/token/__init__.py
index a120002c4..3b9c0f69d 100644
--- a/keystone/token/__init__.py
+++ b/keystone/token/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/token/backends/memcache.py b/keystone/token/backends/memcache.py
index f641e367f..a08e78dfc 100644
--- a/keystone/token/backends/memcache.py
+++ b/keystone/token/backends/memcache.py
@@ -1,5 +1,6 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# -*- coding: utf-8 -*-
+# Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,219 +15,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-from __future__ import absolute_import
-import copy
-
-import memcache
-
-from keystone.common import utils
-from keystone import config
-from keystone import exception
-from keystone.openstack.common import jsonutils
-from keystone.openstack.common import log
-from keystone.openstack.common import timeutils
-from keystone import token
+from keystone.common import config
+from keystone.token.backends import kvs
CONF = config.CONF
-LOG = log.getLogger(__name__)
-
-
-class Token(token.Driver):
- revocation_key = 'revocation-list'
-
- def __init__(self, client=None):
- self._memcache_client = client
-
- @property
- def client(self):
- return self._memcache_client or self._get_memcache_client()
-
- def _get_memcache_client(self):
- memcache_servers = CONF.memcache.servers
- # NOTE(morganfainberg): The memcache client library for python is NOT
- # thread safe and should not be passed between threads. This is highly
- # specific to the cas() (compare and set) methods and the caching of
- # the previous value(s). It appears greenthread should ensure there is
- # a single data structure per spawned greenthread.
- self._memcache_client = memcache.Client(memcache_servers, debug=0,
- cache_cas=True)
- return self._memcache_client
-
- def _prefix_token_id(self, token_id):
- return 'token-%s' % token_id.encode('utf-8')
-
- def _prefix_user_id(self, user_id):
- return 'usertokens-%s' % user_id.encode('utf-8')
-
- def get_token(self, token_id):
- if token_id is None:
- raise exception.TokenNotFound(token_id='')
- ptk = self._prefix_token_id(token_id)
- token_ref = self.client.get(ptk)
- if token_ref is None:
- raise exception.TokenNotFound(token_id=token_id)
-
- return token_ref
-
- def create_token(self, token_id, data):
- data_copy = copy.deepcopy(data)
- ptk = self._prefix_token_id(token_id)
- if not data_copy.get('expires'):
- data_copy['expires'] = token.default_expire_time()
- if not data_copy.get('user_id'):
- data_copy['user_id'] = data_copy['user']['id']
- kwargs = {}
- if data_copy['expires'] is not None:
- expires_ts = utils.unixtime(data_copy['expires'])
- kwargs['time'] = expires_ts
- self.client.set(ptk, data_copy, **kwargs)
- if 'id' in data['user']:
- token_data = jsonutils.dumps(token_id)
- user_id = data['user']['id']
- user_key = self._prefix_user_id(user_id)
- # Append the new token_id to the token-index-list stored in the
- # user-key within memcache.
- self._update_user_list_with_cas(user_key, token_data)
- return copy.deepcopy(data_copy)
-
- def _update_user_list_with_cas(self, user_key, token_id):
- cas_retry = 0
- max_cas_retry = CONF.memcache.max_compare_and_set_retry
- current_time = timeutils.normalize_time(
- timeutils.parse_isotime(timeutils.isotime()))
-
- self.client.reset_cas()
-
- while cas_retry <= max_cas_retry:
- # NOTE(morganfainberg): cas or "compare and set" is a function of
- # memcache. It will return false if the value has changed since the
- # last call to client.gets(). This is the memcache supported method
- # of avoiding race conditions on set(). Memcache is already atomic
- # on the back-end and serializes operations.
- #
- # cas_retry is for tracking our iterations before we give up (in
- # case memcache is down or something horrible happens we don't
- # iterate forever trying to compare and set the new value.
- cas_retry += 1
- record = self.client.gets(user_key)
- filtered_list = []
-
- if record is not None:
- token_list = jsonutils.loads('[%s]' % record)
- for token_i in token_list:
- ptk = self._prefix_token_id(token_i)
- token_ref = self.client.get(ptk)
- if not token_ref:
- # skip tokens that do not exist in memcache
- continue
-
- if 'expires' in token_ref:
- expires_at = timeutils.normalize_time(
- token_ref['expires'])
- if expires_at < current_time:
- # skip tokens that are expired.
- continue
-
- # Add the still valid token_id to the list.
- filtered_list.append(jsonutils.dumps(token_i))
- # Add the new token_id to the list.
- filtered_list.append(token_id)
-
- # Use compare-and-set (cas) to set the new value for the
- # token-index-list for the user-key. Cas is used to prevent race
- # conditions from causing the loss of valid token ids from this
- # list.
- if self.client.cas(user_key, ','.join(filtered_list)):
- msg = _('Successful set of token-index-list for user-key '
- '"%(user_key)s", #%(count)d records')
- LOG.debug(msg, {'user_key': user_key,
- 'count': len(filtered_list)})
- return filtered_list
-
- # The cas function will return true if it succeeded or false if it
- # failed for any reason, including memcache server being down, cas
- # id changed since gets() called (the data changed between when
- # this loop started and this point, etc.
- error_msg = _('Failed to set token-index-list for user-key '
- '"%(user_key)s". Attempt %(cas_retry)d of '
- '%(cas_retry_max)d')
- LOG.debug(error_msg,
- {'user_key': user_key,
- 'cas_retry': cas_retry,
- 'cas_retry_max': max_cas_retry})
-
- # Exceeded the maximum retry attempts.
- error_msg = _('Unable to add token user list')
- raise exception.UnexpectedError(error_msg)
-
- def _add_to_revocation_list(self, data):
- data_json = jsonutils.dumps(data)
- if not self.client.append(self.revocation_key, ',%s' % data_json):
- if not self.client.add(self.revocation_key, data_json):
- if not self.client.append(self.revocation_key,
- ',%s' % data_json):
- msg = _('Unable to add token to revocation list.')
- raise exception.UnexpectedError(msg)
-
- def delete_token(self, token_id):
- # Test for existence
- data = self.get_token(token_id)
- ptk = self._prefix_token_id(token_id)
- result = self.client.delete(ptk)
- self._add_to_revocation_list(data)
- return result
-
- def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
- consumer_id=None):
- return super(Token, self).delete_tokens(
- user_id=user_id,
- tenant_id=tenant_id,
- trust_id=trust_id,
- consumer_id=consumer_id,
- )
-
- def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
- consumer_id=None):
- tokens = []
- user_key = self._prefix_user_id(user_id)
- user_record = self.client.get(user_key) or ""
- token_list = jsonutils.loads('[%s]' % user_record)
- for token_id in token_list:
- ptk = self._prefix_token_id(token_id)
- token_ref = self.client.get(ptk)
- if token_ref:
- if tenant_id is not None:
- tenant = token_ref.get('tenant')
- if not tenant:
- continue
- if tenant.get('id') != tenant_id:
- continue
- if trust_id is not None:
- trust = token_ref.get('trust_id')
- if not trust:
- continue
- if trust != trust_id:
- continue
- if consumer_id is not None:
- try:
- oauth = token_ref['token_data']['token']['OS-OAUTH1']
- if oauth.get('consumer_id') != consumer_id:
- continue
- except KeyError:
- continue
-
- tokens.append(token_id)
- return tokens
- def list_revoked_tokens(self):
- list_json = self.client.get(self.revocation_key)
- if list_json:
- return jsonutils.loads('[%s]' % list_json)
- return []
+class Token(kvs.Token):
+ kvs_backend = 'openstack.kvs.Memcached'
- def flush_expired_tokens(self):
- """Archive or delete tokens that have expired.
- """
- raise exception.NotImplemented()
+ def __init__(self, *args, **kwargs):
+ kwargs['no_expiry_keys'] = [self.revocation_key]
+ kwargs['memcached_expire_time'] = CONF.token.expiration
+ kwargs['url'] = CONF.memcache.servers
+ super(Token, self).__init__(*args, **kwargs)
diff --git a/keystone/token/backends/sql.py b/keystone/token/backends/sql.py
index 89c9cdd97..8597f7bc2 100644
--- a/keystone/token/backends/sql.py
+++ b/keystone/token/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -38,7 +36,7 @@ class TokenModel(sql.ModelBase, sql.DictBase):
)
-class Token(sql.Base, token.Driver):
+class Token(token.Driver):
# Public interface
def get_token(self, token_id):
if token_id is None:
diff --git a/keystone/token/controllers.py b/keystone/token/controllers.py
index 05be38a54..593196862 100644
--- a/keystone/token/controllers.py
+++ b/keystone/token/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -83,8 +81,6 @@ class Auth(controller.V2Controller):
raise exception.ValidationError(attribute='auth',
target='request body')
- auth_token_data = None
-
if "token" in auth:
# Try to authenticate using a token
auth_info = self._authenticate_token(
@@ -106,7 +102,7 @@ class Auth(controller.V2Controller):
# The user_ref is encoded into the auth_token_data which is returned as
# part of the token data. The token provider doesn't care about the
# format.
- user_ref = self.identity_api.v3_to_v2_user(user_ref)
+ user_ref = self.v3_to_v2_user(user_ref)
if tenant_ref:
tenant_ref = self.filter_domain_id(tenant_ref)
auth_token_data = self._get_auth_token_data(user_ref,
@@ -267,10 +263,11 @@ class Auth(controller.V2Controller):
try:
user_ref = self.identity_api.authenticate(
+ context,
user_id=user_id,
password=password)
except AssertionError as e:
- raise exception.Unauthorized(e)
+ raise exception.Unauthorized(e.args[0])
metadata_ref = {}
tenant_id = self._get_project_id_from_auth(auth)
@@ -345,25 +342,6 @@ class Auth(controller.V2Controller):
raise exception.Unauthorized(e)
return tenant_id
- def _get_domain_id_from_auth(self, auth):
- """Extract domain information from v3 auth dict.
-
- Returns a valid domain_id if it exists, or None if not specified.
- """
- # FIXME(henry-nash): This is a placeholder that needs to be
- # only called in the v3 context, and the auth.get calls
- # converted to the v3 format
- domain_id = auth.get('domainId', None)
- domain_name = auth.get('domainName', None)
- if domain_name:
- try:
- domain_ref = self.assignment_api.get_domain_by_name(
- domain_name)
- domain_id = domain_ref['id']
- except exception.DomainNotFound as e:
- raise exception.Unauthorized(e)
- return domain_id
-
def _get_project_roles_and_ref(self, user_id, tenant_id):
"""Returns the project roles for this user, and the project ref."""
diff --git a/keystone/token/core.py b/keystone/token/core.py
index 829c69725..adfc56d6c 100644
--- a/keystone/token/core.py
+++ b/keystone/token/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/token/provider.py b/keystone/token/provider.py
index d6c797ca4..81f7125d8 100644
--- a/keystone/token/provider.py
+++ b/keystone/token/provider.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -175,7 +173,8 @@ class Manager(manager.Manager):
return self.driver.validate_v3_token(token_id)
def _is_valid_token(self, token):
- # Verify the token has not expired.
+ """Verify the token is valid format and has not expired."""
+
current_time = timeutils.normalize_time(timeutils.utcnow())
try:
@@ -195,11 +194,7 @@ class Manager(manager.Manager):
LOG.exception(_('Unexpected error or malformed token determining '
'token expiry: %s'), token)
- # FIXME(morganfainberg): This error message needs to be updated to
- # reflect the token couldn't be found, but this change needs to wait
- # until Icehouse due to string freeze in Havana. This should be:
- # "Failed to find valid token" or something similar.
- raise exception.TokenNotFound(_('Failed to validate token'))
+ raise exception.TokenNotFound(_("The token is malformed or expired."))
def _token_belongs_to(self, token, belongs_to):
"""Check if the token belongs to the right tenant.
diff --git a/keystone/token/providers/common.py b/keystone/token/providers/common.py
index 48925ac6d..9551720cf 100644
--- a/keystone/token/providers/common.py
+++ b/keystone/token/providers/common.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -139,7 +137,7 @@ class V3TokenDataHelper(object):
self.trust_api = trust.Manager()
def _get_filtered_domain(self, domain_id):
- domain_ref = self.identity_api.get_domain(domain_id)
+ domain_ref = self.assignment_api.get_domain(domain_id)
return {'id': domain_ref['id'], 'name': domain_ref['name']}
def _get_filtered_project(self, project_id):
diff --git a/keystone/token/providers/pki.py b/keystone/token/providers/pki.py
index 4a1763162..72ba009b6 100644
--- a/keystone/token/providers/pki.py
+++ b/keystone/token/providers/pki.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/token/providers/uuid.py b/keystone/token/providers/uuid.py
index a2e94a9d9..5ab4d6b71 100644
--- a/keystone/token/providers/uuid.py
+++ b/keystone/token/providers/uuid.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/token/routers.py b/keystone/token/routers.py
index 51c247bab..b106cb7ac 100644
--- a/keystone/token/routers.py
+++ b/keystone/token/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/trust/__init__.py b/keystone/trust/__init__.py
index aeda21037..327b1cfd2 100644
--- a/keystone/trust/__init__.py
+++ b/keystone/trust/__init__.py
@@ -1,4 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# flake8: noqa
# Copyright 2012 OpenStack Foundation
diff --git a/keystone/trust/backends/kvs.py b/keystone/trust/backends/kvs.py
index bf49d358a..f8c124237 100644
--- a/keystone/trust/backends/kvs.py
+++ b/keystone/trust/backends/kvs.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/keystone/trust/backends/sql.py b/keystone/trust/backends/sql.py
index beaea6e5d..f79962420 100644
--- a/keystone/trust/backends/sql.py
+++ b/keystone/trust/backends/sql.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -44,7 +42,7 @@ class TrustRole(sql.ModelBase):
role_id = sql.Column(sql.String(64), primary_key=True, nullable=False)
-class Trust(sql.Base, trust.Driver):
+class Trust(trust.Driver):
@sql.handle_conflicts(conflict_type='trust')
def create_trust(self, trust_id, trust, roles):
session = db_session.get_session()
diff --git a/keystone/trust/controllers.py b/keystone/trust/controllers.py
index d4a8831d2..e80d53301 100644
--- a/keystone/trust/controllers.py
+++ b/keystone/trust/controllers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -55,18 +53,12 @@ class TrustV3(controller.V3Controller):
@classmethod
def base_url(cls, path=None):
- endpoint = CONF.public_endpoint % CONF
-
- # allow a missing trailing slash in the config
- if endpoint[-1] != '/':
- endpoint += '/'
-
- url = endpoint + 'v3/OS-TRUST'
+ """Construct a path and pass it to V3Controller.base_url method."""
- if path:
- return url + path
- else:
- return url + '/' + cls.collection_name
+ # NOTE(stevemar): Overriding path to /OS-TRUST/trusts so that
+ # V3Controller.base_url handles setting the self link correctly.
+ path = '/OS-TRUST/' + cls.collection_name
+ return controller.V3Controller.base_url(path=path)
def _get_user_id(self, context):
if 'token_id' in context:
@@ -146,9 +138,7 @@ class TrustV3(controller.V3Controller):
user_id = self._get_user_id(context)
_trustor_only(context, trust, user_id)
#confirm that the trustee exists
- trustee_ref = self.identity_api.get_user(trust['trustee_user_id'])
- if not trustee_ref:
- raise exception.UserNotFound(user_id=trust['trustee_user_id'])
+ self.identity_api.get_user(trust['trustee_user_id'])
all_roles = self.assignment_api.list_roles()
clean_roles = self._clean_role_list(context, trust, all_roles)
if trust.get('project_id'):
@@ -242,29 +232,12 @@ class TrustV3(controller.V3Controller):
raise exception.TrustNotFound(trust_id)
user_id = self._get_user_id(context)
_trustor_trustee_only(trust, user_id)
- matching_roles = [x for x in trust['roles']
- if x['id'] == role_id]
- if not matching_roles:
+ if not any(role['id'] == role_id for role in trust['roles']):
raise exception.RoleNotFound(role_id=role_id)
@controller.protected()
def get_role_for_trust(self, context, trust_id, role_id):
- """Checks if a role has been assigned to a trust."""
- trust = self.trust_api.get_trust(trust_id)
- if not trust:
- raise exception.TrustNotFound(trust_id)
-
- user_id = self._get_user_id(context)
- _trustor_trustee_only(trust, user_id)
- matching_roles = [x for x in trust['roles']
- if x['id'] == role_id]
- if not matching_roles:
- raise exception.RoleNotFound(role_id=role_id)
- all_roles = self.assignment_api.list_roles()
- matching_roles = [x for x in all_roles if x['id'] == role_id]
- if matching_roles:
- full_role = (assignment.controllers.
- RoleV3.wrap_member(context, matching_roles[0]))
- return full_role
- else:
- raise exception.RoleNotFound(role_id=role_id)
+ """Get a role that has been assigned to a trust."""
+ self.check_role_for_trust(context, trust_id, role_id)
+ role = self.assignment_api.get_role(role_id)
+ return assignment.controllers.RoleV3.wrap_member(context, role)
diff --git a/keystone/trust/core.py b/keystone/trust/core.py
index 1a2d838c6..0181c732b 100644
--- a/keystone/trust/core.py
+++ b/keystone/trust/core.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -41,11 +39,12 @@ class Manager(manager.Manager):
dynamically calls the backend.
"""
+ _TRUST = "OS-TRUST:trust"
def __init__(self):
super(Manager, self).__init__(CONF.trust.driver)
- @notifications.created('OS-TRUST:trust')
+ @notifications.created(_TRUST)
def create_trust(self, trust_id, trust, roles):
"""Create a new trust.
@@ -53,7 +52,7 @@ class Manager(manager.Manager):
"""
return self.driver.create_trust(trust_id, trust, roles)
- @notifications.deleted('OS-TRUST:trust')
+ @notifications.deleted(_TRUST)
def delete_trust(self, trust_id):
"""Remove a trust.
diff --git a/keystone/trust/routers.py b/keystone/trust/routers.py
index e464a106f..279f740ab 100644
--- a/keystone/trust/routers.py
+++ b/keystone/trust/routers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/openstack-common.conf b/openstack-common.conf
index 3b4a72669..4b6e6f0d0 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -3,6 +3,7 @@
# The list of modules to copy from openstack-common
module=db
module=db.sqlalchemy
+module=config
module=colorizer
module=crypto
module=fixture
diff --git a/requirements.txt b/requirements.txt
index cdb5c697b..217a243b1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
# keystone dependencies
-pbr>=0.5.21,<1.0
+pbr>=0.6,<1.0
pam>=0.1.4
-WebOb>=1.2.3,<1.3
+WebOb>=1.2.3
eventlet>=0.13.0
greenlet>=0.3.2
netaddr>=0.7.6
@@ -9,17 +9,19 @@ PasteDeploy>=1.5.0
Paste
Routes>=1.12.3
six>=1.4.1
-SQLAlchemy>=0.7.8,<=0.7.99
+SQLAlchemy>=0.7.8,<=0.8.99
sqlalchemy-migrate>=0.8.2
passlib
lxml>=2.3
iso8601>=0.1.8
-python-keystoneclient>=0.4.1
+python-keystoneclient>=0.5.0
oslo.config>=1.2.0
+oslo.messaging>=1.3.0a4
Babel>=1.3
-oauthlib
+oauthlib>=0.6
dogpile.cache>=0.5.0
-jsonschema>=1.3.0,!=1.4.0
+jsonschema>=2.0.0,<3.0.0
+pycadf>=0.1.9
# KDS exclusive dependencies
diff --git a/setup.cfg b/setup.cfg
index db6c64b2d..1010f9498 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -58,7 +58,8 @@ msgid_bugs_address = https://bugs.launchpad.net/keystone
# NOTE(dstanek): Uncomment the [pbr] section below and remove the ext.apidoc
# Sphinx extension when https://launchpad.net/bugs/1260495 is fixed.
-#[pbr]
+[pbr]
+warnerrors = True
#autodoc_tree_index_modules = True
#autodoc_tree_root = ./keystone
@@ -66,3 +67,6 @@ msgid_bugs_address = https://bugs.launchpad.net/keystone
console_scripts =
kds-api = keystone.contrib.kds.cli.api:main
kds-manage = keystone.contrib.kds.cli.manage:main
+
+oslo.config.opts =
+ keystone = keystone.common.config:list_opts
diff --git a/test-requirements.txt b/test-requirements.txt
index 6d7980383..5adf1fa71 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -24,17 +24,22 @@ sphinx>=1.1.2,<1.2
WebTest>=2.0
discover
-python-subunit
+python-subunit>=0.0.18
testrepository>=0.0.17
-testtools>=0.9.32
+testtools>=0.9.34
testscenarios>=0.4
# for python-keystoneclient
# keystoneclient <0.2.1
-httplib2
+httplib2>=0.7.5
# replaces httplib2 in keystoneclient >=0.2.1
requests>=1.1
keyring>=1.6.1,<2.0,>=2.1
# For documentation
-oslo.sphinx
+oslosphinx
+
+# Used only by oslo
+kombu>=2.4.8
+lockfile>=0.8
+stevedore>=0.14
diff --git a/tools/config/README b/tools/config/README
new file mode 100644
index 000000000..c6079dab0
--- /dev/null
+++ b/tools/config/README
@@ -0,0 +1,38 @@
+This generate_sample.sh tool is used to generate sample config files
+from OpenStack project source trees.
+
+Run it by passing the base directory and package name i.e.
+
+ $> generate_sample.sh --base-dir /opt/stack/nova --package-name nova \
+ --output-dir /opt/stack/nova/etc
+ $> generate_sample.sh -b /opt/stack/neutron -p nova -o /opt/stack/neutron/etc
+
+Optionally, include libraries that register entry points for option
+discovery, such as oslo.messaging:
+
+ $> generate_sample.sh -b /opt/stack/ceilometer -p ceilometer \
+ -o /opt/stack/ceilometer/etc -l oslo.messaging
+
+Watch out for warnings about modules like libvirt, qpid and zmq not
+being found - these warnings are significant because they result
+in options not appearing in the generated config file.
+
+
+
+This check_uptodate.sh tool is used to ensure that the generated sample
+config file in the OpenStack project source tree is continually kept up
+to date with the code itself.
+
+This can be done by adding a hook to tox.ini. For example, if a project
+already had flake8 enabled in a section like this:
+
+ [testenv.pep8]
+ commands =
+ flake8 {posargs}
+
+This section would be changed to:
+
+ [testenv.pep8]
+ commands =
+ flake8 {posargs}
+ {toxinidir}/tools/config/check_uptodate.sh
diff --git a/tools/config/check_uptodate.sh b/tools/config/check_uptodate.sh
new file mode 100755
index 000000000..1ea2c381d
--- /dev/null
+++ b/tools/config/check_uptodate.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+PROJECT_NAME=${PROJECT_NAME:-keystone}
+CFGFILE_NAME=${PROJECT_NAME}.conf.sample
+
+if [ -e etc/${PROJECT_NAME}/${CFGFILE_NAME} ]; then
+ CFGFILE=etc/${PROJECT_NAME}/${CFGFILE_NAME}
+elif [ -e etc/${CFGFILE_NAME} ]; then
+ CFGFILE=etc/${CFGFILE_NAME}
+else
+ echo "${0##*/}: can not find config file"
+ exit 1
+fi
+
+TEMPDIR=`mktemp -d /tmp/${PROJECT_NAME}.XXXXXX`
+trap "rm -rf $TEMPDIR" EXIT
+
+tools/config/generate_sample.sh -b ./ -p ${PROJECT_NAME} -o ${TEMPDIR}
+
+if ! diff -u ${TEMPDIR}/${CFGFILE_NAME} ${CFGFILE}
+then
+ echo "${0##*/}: ${PROJECT_NAME}.conf.sample is not up to date."
+ echo "${0##*/}: Please run 'tox -esample_config -r' (see doc/source/developing.rst for more info)"
+ exit 1
+fi
diff --git a/tools/config/generate_sample.sh b/tools/config/generate_sample.sh
new file mode 100755
index 000000000..4a7208478
--- /dev/null
+++ b/tools/config/generate_sample.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+
+print_hint() {
+ echo "Try \`${0##*/} --help' for more information." >&2
+}
+
+PARSED_OPTIONS=$(getopt -n "${0##*/}" -o hb:p:m:l:o: \
+ --long help,base-dir:,package-name:,output-dir:,module:,library: -- "$@")
+
+if [ $? != 0 ] ; then print_hint ; exit 1 ; fi
+
+eval set -- "$PARSED_OPTIONS"
+
+while true; do
+ case "$1" in
+ -h|--help)
+ echo "${0##*/} [options]"
+ echo ""
+ echo "options:"
+ echo "-h, --help show brief help"
+ echo "-b, --base-dir=DIR project base directory"
+ echo "-p, --package-name=NAME project package name"
+ echo "-o, --output-dir=DIR file output directory"
+ echo "-m, --module=MOD extra python module to interrogate for options"
+ echo "-l, --library=LIB extra library that registers options for discovery"
+ exit 0
+ ;;
+ -b|--base-dir)
+ shift
+ BASEDIR=`echo $1 | sed -e 's/\/*$//g'`
+ shift
+ ;;
+ -p|--package-name)
+ shift
+ PACKAGENAME=`echo $1`
+ shift
+ ;;
+ -o|--output-dir)
+ shift
+ OUTPUTDIR=`echo $1 | sed -e 's/\/*$//g'`
+ shift
+ ;;
+ -m|--module)
+ shift
+ MODULES="$MODULES -m $1"
+ shift
+ ;;
+ -l|--library)
+ shift
+ LIBRARIES="$LIBRARIES -l $1"
+ shift
+ ;;
+ --)
+ break
+ ;;
+ esac
+done
+
+BASEDIR=${BASEDIR:-`pwd`}
+if ! [ -d $BASEDIR ]
+then
+ echo "${0##*/}: missing project base directory" >&2 ; print_hint ; exit 1
+elif [[ $BASEDIR != /* ]]
+then
+ BASEDIR=$(cd "$BASEDIR" && pwd)
+fi
+
+PACKAGENAME=${PACKAGENAME:-${BASEDIR##*/}}
+TARGETDIR=$BASEDIR/$PACKAGENAME
+if ! [ -d $TARGETDIR ]
+then
+ echo "${0##*/}: invalid project package name" >&2 ; print_hint ; exit 1
+fi
+
+OUTPUTDIR=${OUTPUTDIR:-$BASEDIR/etc}
+# NOTE(bnemec): Some projects put their sample config in etc/,
+# some in etc/$PACKAGENAME/
+if [ -d $OUTPUTDIR/$PACKAGENAME ]
+then
+ OUTPUTDIR=$OUTPUTDIR/$PACKAGENAME
+elif ! [ -d $OUTPUTDIR ]
+then
+ echo "${0##*/}: cannot access \`$OUTPUTDIR': No such file or directory" >&2
+ exit 1
+fi
+
+BASEDIRESC=`echo $BASEDIR | sed -e 's/\//\\\\\//g'`
+find $TARGETDIR -type f -name "*.pyc" -delete
+FILES=$(find $TARGETDIR -type f -name "*.py" ! -path "*/tests/*" \
+ -exec grep -l "Opt(" {} + | sed -e "s/^$BASEDIRESC\///g" | sort -u)
+
+RC_FILE="`dirname $0`/oslo.config.generator.rc"
+if test -r "$RC_FILE"
+then
+ source "$RC_FILE"
+fi
+
+for mod in ${KEYSTONE_CONFIG_GENERATOR_EXTRA_MODULES}; do
+ MODULES="$MODULES -m $mod"
+done
+
+for lib in ${KEYSTONE_CONFIG_GENERATOR_EXTRA_LIBRARIES}; do
+ LIBRARIES="$LIBRARIES -l $lib"
+done
+
+export EVENTLET_NO_GREENDNS=yes
+
+OS_VARS=$(set | sed -n '/^OS_/s/=[^=]*$//gp' | xargs)
+[ "$OS_VARS" ] && eval "unset \$OS_VARS"
+DEFAULT_MODULEPATH=keystone.openstack.common.config.generator
+MODULEPATH=${MODULEPATH:-$DEFAULT_MODULEPATH}
+OUTPUTFILE=$OUTPUTDIR/$PACKAGENAME.conf.sample
+python -m $MODULEPATH $MODULES $LIBRARIES $FILES > $OUTPUTFILE
+
+# Hook to allow projects to append custom config file snippets
+CONCAT_FILES=$(ls $BASEDIR/tools/config/*.conf.sample 2>/dev/null)
+for CONCAT_FILE in $CONCAT_FILES; do
+ cat $CONCAT_FILE >> $OUTPUTFILE
+done
diff --git a/tools/config/oslo.config.generator.rc b/tools/config/oslo.config.generator.rc
new file mode 100644
index 000000000..2f97726a6
--- /dev/null
+++ b/tools/config/oslo.config.generator.rc
@@ -0,0 +1,4 @@
+# Environmental Variables that affect the automatic sample config generation.
+# Additions to any of these variables are space delimited. See the "generate_sample.sh"
+# script for the variables that can be used.
+KEYSTONE_CONFIG_GENERATOR_EXTRA_LIBRARIES='keystone oslo.messaging'
diff --git a/tools/install_venv.py b/tools/install_venv.py
index e0f6ba39a..e01ae3f06 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2013 IBM Corp.
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
diff --git a/tox.ini b/tox.ini
index f77b6da4d..00bc5c803 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,21 +1,21 @@
[tox]
minversion = 1.6
skipsdist = True
-envlist = py26,py27,py33,pep8,docs
+envlist = py26,py27,py33,pep8,docs,sample_config
[testenv]
usedevelop = True
install_command = pip install -U {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
- LANG=en_US.UTF-8
- LANGUAGE=en_US:en
- LC_ALL=C
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands = python setup.py testr --testr-args='{posargs}'
[testenv:pep8]
-commands = flake8
+envdir = {toxworkdir}/venv
+commands =
+ flake8 {posargs}
+ {toxinidir}/tools/config/check_uptodate.sh
[tox:jenkins]
downloadcache = ~/cache/pip
@@ -43,5 +43,10 @@ builtins = _
exclude=.venv,.git,.tox,build,dist,doc,*openstack/common*,*lib/python*,*egg,tools,vendor,.update-venv,*.ini
[testenv:docs]
+envdir = {toxworkdir}/venv
commands=
python setup.py build_sphinx
+
+[testenv:sample_config]
+envdir = {toxworkdir}/venv
+commands = {toxinidir}/tools/config/generate_sample.sh