summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.rst5
-rwxr-xr-xapi-ref/source/conf.py9
-rwxr-xr-xapi-ref/source/parameters.yaml62
-rw-r--r--api-ref/source/samples/db-backup-restore-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-backup-restore-response.json1
-rw-r--r--api-ref/source/samples/db-create-instance-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-create-instance-response.json1
-rw-r--r--api-ref/source/samples/db-instance-status-detail-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-instance-status-detail-response.json1
-rw-r--r--api-ref/source/samples/db-instances-index-pagination-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-instances-index-pagination-response.json2
-rw-r--r--api-ref/source/samples/db-instances-index-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-instances-index-response.json1
-rw-r--r--api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-mgmt-get-instance-details-response.json5
-rw-r--r--api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-mgmt-instance-index-response.json5
-rw-r--r--devstack/plugin.sh231
-rw-r--r--devstack/settings4
-rw-r--r--doc/source/dev/building_guest_images.rst117
-rw-r--r--doc/source/dev/install.rst68
-rw-r--r--doc/source/dev/manual_install.rst25
-rw-r--r--doc/source/index.rst4
-rw-r--r--etc/trove/api-paste.ini5
-rw-r--r--etc/trove/api-paste.ini.test5
-rw-r--r--etc/trove/policy.json96
-rw-r--r--etc/trove/trove.conf.sample12
-rw-r--r--install-guide/source/conf.py8
-rw-r--r--integration/README.md11
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra16
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase4
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb6
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-db2/install.d/10-db24
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep6
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-mongodb/install.d/25-trove-mongo-dep2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-mysql/pre-install.d/10-percona-apt-key2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-percona/install.d/30-mysql2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql (renamed from integration/scripts/files/elements/ubuntu-postgresql/install.d/10-postgresql)12
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo4
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-redis/install.d/30-redis (renamed from integration/scripts/files/elements/ubuntu-redis/install.d/10-redis)6
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-cassandra/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-couchbase/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-couchdb/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-db2/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-guest/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-trusty-guest/extra-data.d/20-guest-upstart (renamed from integration/scripts/files/elements/ubuntu-guest/extra-data.d/20-guest-upstart)0
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-trusty-guest/install.d/20-etc (renamed from integration/scripts/files/elements/ubuntu-guest/install.d/20-etc)0
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-trusty-guest/pre-install.d/01-trim-pkgs (renamed from integration/scripts/files/elements/ubuntu-guest/pre-install.d/01-trim-pkgs)0
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-mariadb/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb (renamed from integration/scripts/files/elements/ubuntu-mariadb/install.d/30-mariadb)6
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-mongodb/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-mysql/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql (renamed from integration/scripts/files/elements/ubuntu-mysql/install.d/30-mysql)2
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-percona/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-postgresql/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-pxc/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-redis/element-deps1
-rw-r--r--integration/scripts/files/elements/ubuntu-trusty-vertica/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica10
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-guest/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd22
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc10
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs90
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb39
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql32
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-postgresql/install.d/31-fix-init-script7
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-pxc/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf15
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-redis/element-deps1
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-redis/install.d/31-fix-init-file28
-rw-r--r--integration/scripts/functions28
-rw-r--r--integration/scripts/functions_qemu8
-rw-r--r--integration/scripts/localrc.rc6
-rwxr-xr-xintegration/scripts/trovestack335
-rw-r--r--integration/scripts/trovestack.rc21
-rw-r--r--integration/tests/integration/tests/api/__init__.py13
-rw-r--r--integration/tests/integration/tests/util/__init__.py16
-rw-r--r--releasenotes/notes/cluster_list_show_all_ips-3547635440.yaml6
-rw-r--r--releasenotes/notes/fix-cluster-show-346798b3e3.yaml5
-rw-r--r--releasenotes/notes/fix_module_apply-042fc6e61f721540.yaml6
-rw-r--r--releasenotes/notes/grow-cluster-nic-az-0e0fe4083666c300.yaml4
-rw-r--r--releasenotes/notes/instance-show-comp-vol-id-964db9f52a5ac9c1.yaml4
-rw-r--r--releasenotes/notes/mountpoint-detection-096734f0097eb75a.yaml4
-rw-r--r--releasenotes/notes/multi-region-cd8da560bfe00de5.yaml3
-rw-r--r--releasenotes/notes/post-upgrade-fixes-828811607826d433.yaml4
-rw-r--r--releasenotes/notes/use-oslo-policy-bbd1b911e6487c36.yaml8
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/liberty.rst6
-rw-r--r--requirements.txt25
-rw-r--r--test-requirements.txt3
-rw-r--r--tools/trove-pylint.config24
-rwxr-xr-xtools/trove-pylint.py12
-rw-r--r--tox.ini2
-rw-r--r--trove/backup/service.py8
-rw-r--r--trove/cluster/models.py57
-rw-r--r--trove/cluster/service.py32
-rw-r--r--trove/cluster/views.py2
-rw-r--r--trove/cmd/conductor.py4
-rw-r--r--trove/cmd/guest.py4
-rw-r--r--trove/cmd/taskmanager.py4
-rw-r--r--trove/common/apischema.py20
-rw-r--r--trove/common/cfg.py29
-rw-r--r--trove/common/exception.py19
-rw-r--r--trove/common/glance_remote.py53
-rw-r--r--trove/common/models.py17
-rw-r--r--trove/common/notification.py24
-rw-r--r--trove/common/policy.py260
-rw-r--r--trove/common/remote.py20
-rw-r--r--trove/common/single_tenant_remote.py11
-rw-r--r--trove/common/strategies/cluster/experimental/cassandra/api.py3
-rw-r--r--trove/common/strategies/cluster/experimental/cassandra/guestagent.py59
-rw-r--r--trove/common/strategies/cluster/experimental/galera_common/api.py18
-rw-r--r--trove/common/strategies/cluster/experimental/galera_common/guestagent.py30
-rw-r--r--trove/common/strategies/cluster/experimental/mongodb/api.py18
-rw-r--r--trove/common/strategies/cluster/experimental/mongodb/guestagent.py55
-rw-r--r--trove/common/strategies/cluster/experimental/mongodb/taskmanager.py3
-rw-r--r--trove/common/strategies/cluster/experimental/redis/api.py4
-rw-r--r--trove/common/strategies/cluster/experimental/redis/guestagent.py37
-rw-r--r--trove/common/strategies/cluster/experimental/vertica/api.py6
-rw-r--r--trove/common/strategies/cluster/experimental/vertica/guestagent.py39
-rw-r--r--trove/common/trove_remote.py56
-rw-r--r--trove/common/wsgi.py5
-rw-r--r--trove/conductor/api.py51
-rw-r--r--trove/configuration/service.py40
-rw-r--r--trove/datastore/service.py15
-rw-r--r--trove/db/sqlalchemy/migrate_repo/versions/039_region.py35
-rw-r--r--trove/extensions/common/service.py38
-rw-r--r--trove/extensions/mgmt/instances/models.py6
-rw-r--r--trove/extensions/mgmt/instances/service.py22
-rw-r--r--trove/extensions/mgmt/volume/models.py4
-rw-r--r--trove/extensions/mgmt/volume/service.py4
-rw-r--r--trove/extensions/mysql/models.py10
-rw-r--r--trove/extensions/mysql/service.py30
-rw-r--r--trove/extensions/security_group/models.py53
-rw-r--r--trove/extensions/security_group/service.py5
-rw-r--r--trove/flavor/service.py5
-rw-r--r--trove/guestagent/api.py268
-rw-r--r--trove/guestagent/common/operating_system.py20
-rw-r--r--trove/guestagent/datastore/experimental/mariadb/service.py2
-rw-r--r--trove/guestagent/datastore/experimental/postgresql/manager.py3
-rw-r--r--trove/guestagent/datastore/experimental/postgresql/service.py2
-rw-r--r--trove/guestagent/datastore/mysql_common/manager.py1
-rw-r--r--trove/guestagent/datastore/mysql_common/service.py4
-rw-r--r--trove/guestagent/datastore/service.py11
-rw-r--r--trove/guestagent/guest_log.py11
-rw-r--r--trove/guestagent/strategies/backup/experimental/db2_impl.py2
-rw-r--r--trove/guestagent/volume.py197
-rw-r--r--trove/instance/models.py148
-rw-r--r--trove/instance/service.py68
-rw-r--r--trove/instance/views.py7
-rw-r--r--trove/limits/service.py3
-rw-r--r--trove/module/models.py56
-rw-r--r--trove/module/service.py21
-rw-r--r--trove/network/neutron.py4
-rw-r--r--trove/network/nova.py4
-rw-r--r--trove/taskmanager/api.py86
-rw-r--r--trove/taskmanager/manager.py19
-rwxr-xr-xtrove/taskmanager/models.py26
-rw-r--r--trove/tests/api/backups.py68
-rw-r--r--trove/tests/api/configurations.py8
-rw-r--r--trove/tests/api/flavors.py11
-rw-r--r--trove/tests/api/instances.py89
-rw-r--r--trove/tests/api/instances_delete.py3
-rw-r--r--trove/tests/api/instances_mysql_down.py8
-rw-r--r--trove/tests/api/mgmt/instances.py3
-rw-r--r--trove/tests/api/mgmt/instances_actions.py11
-rw-r--r--trove/tests/api/replication.py3
-rw-r--r--trove/tests/fakes/guestagent.py3
-rw-r--r--trove/tests/fakes/nova.py6
-rw-r--r--trove/tests/int_tests.py231
-rw-r--r--trove/tests/scenario/groups/__init__.py32
-rw-r--r--trove/tests/scenario/groups/backup_group.py50
-rw-r--r--trove/tests/scenario/groups/cluster_actions_group.py162
-rw-r--r--trove/tests/scenario/groups/cluster_group.py341
-rw-r--r--trove/tests/scenario/groups/configuration_group.py13
-rw-r--r--trove/tests/scenario/groups/database_actions_group.py12
-rw-r--r--trove/tests/scenario/groups/instance_actions_group.py49
-rw-r--r--trove/tests/scenario/groups/instance_create_group.py40
-rw-r--r--trove/tests/scenario/groups/instance_upgrade_group.py21
-rw-r--r--trove/tests/scenario/groups/user_actions_group.py10
-rw-r--r--trove/tests/scenario/helpers/test_helper.py28
-rw-r--r--trove/tests/scenario/runners/__init__.py1
-rw-r--r--trove/tests/scenario/runners/backup_runners.py85
-rw-r--r--trove/tests/scenario/runners/cluster_runners.py (renamed from trove/tests/scenario/runners/cluster_actions_runners.py)394
-rw-r--r--trove/tests/scenario/runners/configuration_runners.py59
-rw-r--r--trove/tests/scenario/runners/database_actions_runners.py50
-rw-r--r--trove/tests/scenario/runners/guest_log_runners.py65
-rw-r--r--trove/tests/scenario/runners/instance_actions_runners.py33
-rw-r--r--trove/tests/scenario/runners/instance_create_runners.py120
-rw-r--r--trove/tests/scenario/runners/instance_delete_runners.py5
-rw-r--r--trove/tests/scenario/runners/instance_error_create_runners.py21
-rw-r--r--trove/tests/scenario/runners/instance_force_delete_runners.py12
-rw-r--r--trove/tests/scenario/runners/instance_upgrade_runners.py20
-rw-r--r--trove/tests/scenario/runners/module_runners.py118
-rw-r--r--trove/tests/scenario/runners/negative_cluster_actions_runners.py3
-rw-r--r--trove/tests/scenario/runners/replication_runners.py68
-rw-r--r--trove/tests/scenario/runners/root_actions_runners.py26
-rw-r--r--trove/tests/scenario/runners/test_runners.py108
-rw-r--r--trove/tests/scenario/runners/user_actions_runners.py79
-rw-r--r--trove/tests/unittests/api/common/test_limits.py5
-rw-r--r--trove/tests/unittests/backup/test_backup_models.py3
-rw-r--r--trove/tests/unittests/backup/test_backupagent.py6
-rw-r--r--trove/tests/unittests/cluster/test_cluster.py4
-rw-r--r--trove/tests/unittests/cluster/test_cluster_controller.py8
-rw-r--r--trove/tests/unittests/cluster/test_cluster_pxc_controller.py1
-rw-r--r--trove/tests/unittests/cluster/test_cluster_redis_controller.py3
-rw-r--r--trove/tests/unittests/cluster/test_cluster_vertica_controller.py1
-rw-r--r--trove/tests/unittests/cluster/test_cluster_views.py8
-rw-r--r--trove/tests/unittests/cluster/test_galera_cluster.py40
-rw-r--r--trove/tests/unittests/cluster/test_models.py10
-rw-r--r--trove/tests/unittests/common/test_common_extensions.py58
-rw-r--r--trove/tests/unittests/common/test_dbmodels.py4
-rw-r--r--trove/tests/unittests/common/test_policy.py53
-rw-r--r--trove/tests/unittests/common/test_remote.py42
-rw-r--r--trove/tests/unittests/guestagent/test_volume.py307
-rw-r--r--trove/tests/unittests/instance/test_instance_models.py3
-rw-r--r--trove/tests/unittests/instance/test_instance_views.py28
-rw-r--r--trove/tests/unittests/network/test_neutron_driver.py30
-rw-r--r--trove/tests/unittests/secgroups/test_security_group.py30
-rw-r--r--trove/tests/unittests/trove_testtools.py6
-rw-r--r--trove/tests/util/mysql.py3
-rw-r--r--trove/tests/util/server_connection.py9
230 files changed, 4852 insertions, 2047 deletions
diff --git a/README.rst b/README.rst
index db1a2b47..8696cdf8 100644
--- a/README.rst
+++ b/README.rst
@@ -2,6 +2,9 @@
Trove
=====
+.. image:: http://governance.openstack.org/badges/trove.svg
+ :target: http://governance.openstack.org/reference/tags/index.html
+
Trove is Database as a Service for OpenStack.
Getting Started
@@ -28,7 +31,5 @@ References
http://docs.openstack.org/developer/trove/dev/install.html
* Manual installation docs:
http://docs.openstack.org/developer/trove/dev/manual_install.html
-* Trove integration:
- https://github.com/openstack/trove-integration
* Build guest image:
http://docs.openstack.org/developer/trove/dev/building_guest_images.html
diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py
index 75c55ee1..52e655ca 100755
--- a/api-ref/source/conf.py
+++ b/api-ref/source/conf.py
@@ -114,6 +114,15 @@ show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
+# Config logABug feature
+# source tree
+giturl = (
+ u'http://git.openstack.org/cgit/openstack/trove/tree/api-ref/source')
+# html_context allows us to pass arbitrary values into the html template
+html_context = {'bug_tag': 'api-ref',
+ 'giturl': giturl,
+ 'bug_project': 'trove'}
+
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 96f06265..13be71a8 100755
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -75,20 +75,6 @@ configuration:
in: body
required: true
type: string
-configuration_1:
- description: |
- ID of the configuration group to attach to the
- instance.
- in: body
- required: false
- type: string
-configuration_2:
- description: |
- To detach a configuration group, set the
- configuration parameter to null.
- in: body
- required: true
- type: string
created:
description: |
The date and time when the resource was created.
@@ -120,13 +106,6 @@ datastore:
in: body
required: true
type: string
-datastore_1:
- description: |
- Name of the datastore to use when creating the
- instance.
- in: body
- required: false
- type: string
datastore_version:
description: |
Name of the datastore version to use when
@@ -168,33 +147,6 @@ name:
in: body
required: true
type: string
-name_1:
- description: |
- A database name. You cannot use the
- ``lost+found``, ``information_schema``, or ``mysql`` database name
- to create a database because these names are reserved for system
- databases. Valid characters in a database name are - Upper and
- lower case letters. - Numbers. - ``@``, ``?``, ``#``, and spaces
- except at the beginning or end of the database name. - ``_`` is
- allowed anywhere in the database name. You cannot use these
- characters in a database name. The maximum length of a database
- name is 64 characters.
- in: body
- required: false
- type: string
-name_2:
- description: |
- The user name for the database on instance
- creation.
- in: body
- required: false
- type: string
-name_3:
- description: |
- New name of the configuration group.
- in: body
- required: true
- type: string
password:
description: |
The password for those users on instance
@@ -215,12 +167,6 @@ replica_of:
in: body
required: false
type: string
-replica_of_1:
- description: |
- To detach a replica, set ``replica_of`` to null.
- in: body
- required: true
- type: string
size:
description: |
The volume size, in gigabytes (GB). A valid value
@@ -276,11 +222,3 @@ values:
in: body
required: true
type: string
-values_1:
- description: |
- Dictionary that lists configuration parameter
- names and associated values.
- in: body
- required: true
- type: object
-
diff --git a/api-ref/source/samples/db-backup-restore-response-json-http.txt b/api-ref/source/samples/db-backup-restore-response-json-http.txt
index 2ce0d753..ce9bfb08 100644
--- a/api-ref/source/samples/db-backup-restore-response-json-http.txt
+++ b/api-ref/source/samples/db-backup-restore-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 694
+Content-Length: 717
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-backup-restore-response.json b/api-ref/source/samples/db-backup-restore-response.json
index 005899d1..cafd6197 100644
--- a/api-ref/source/samples/db-backup-restore-response.json
+++ b/api-ref/source/samples/db-backup-restore-response.json
@@ -31,6 +31,7 @@
}
],
"name": "backup_instance",
+ "region": "RegionOne",
"status": "BUILD",
"updated": "2014-10-30T12:30:00",
"volume": {
diff --git a/api-ref/source/samples/db-create-instance-response-json-http.txt b/api-ref/source/samples/db-create-instance-response-json-http.txt
index 21284541..646d02a6 100644
--- a/api-ref/source/samples/db-create-instance-response-json-http.txt
+++ b/api-ref/source/samples/db-create-instance-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 697
+Content-Length: 720
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-create-instance-response.json b/api-ref/source/samples/db-create-instance-response.json
index a1bf8e9a..47e75bfe 100644
--- a/api-ref/source/samples/db-create-instance-response.json
+++ b/api-ref/source/samples/db-create-instance-response.json
@@ -31,6 +31,7 @@
}
],
"name": "json_rack_instance",
+ "region": "RegionOne",
"status": "BUILD",
"updated": "2014-10-30T12:30:00",
"volume": {
diff --git a/api-ref/source/samples/db-instance-status-detail-response-json-http.txt b/api-ref/source/samples/db-instance-status-detail-response-json-http.txt
index 0825e835..ab457b30 100644
--- a/api-ref/source/samples/db-instance-status-detail-response-json-http.txt
+++ b/api-ref/source/samples/db-instance-status-detail-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 712
+Content-Length: 735
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-instance-status-detail-response.json b/api-ref/source/samples/db-instance-status-detail-response.json
index fa96976a..eacb3fc4 100644
--- a/api-ref/source/samples/db-instance-status-detail-response.json
+++ b/api-ref/source/samples/db-instance-status-detail-response.json
@@ -31,6 +31,7 @@
}
],
"name": "json_rack_instance",
+ "region": "RegionOne",
"status": "ACTIVE",
"updated": "2014-10-30T12:30:00",
"volume": {
diff --git a/api-ref/source/samples/db-instances-index-pagination-response-json-http.txt b/api-ref/source/samples/db-instances-index-pagination-response-json-http.txt
index 0ab8faf1..b1c87ce6 100644
--- a/api-ref/source/samples/db-instances-index-pagination-response-json-http.txt
+++ b/api-ref/source/samples/db-instances-index-pagination-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 1251
+Content-Length: 1297
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-instances-index-pagination-response.json b/api-ref/source/samples/db-instances-index-pagination-response.json
index 9dcd8480..85569350 100644
--- a/api-ref/source/samples/db-instances-index-pagination-response.json
+++ b/api-ref/source/samples/db-instances-index-pagination-response.json
@@ -31,6 +31,7 @@
}
],
"name": "The Third Instance",
+ "region": "RegionOne",
"status": "ACTIVE",
"volume": {
"size": 2
@@ -67,6 +68,7 @@
}
],
"name": "json_rack_instance",
+ "region": "RegionOne",
"status": "ACTIVE",
"volume": {
"size": 2
diff --git a/api-ref/source/samples/db-instances-index-response-json-http.txt b/api-ref/source/samples/db-instances-index-response-json-http.txt
index f7a85bd0..dd2cde67 100644
--- a/api-ref/source/samples/db-instances-index-response-json-http.txt
+++ b/api-ref/source/samples/db-instances-index-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 633
+Content-Length: 656
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-instances-index-response.json b/api-ref/source/samples/db-instances-index-response.json
index c644e7c8..341308ea 100644
--- a/api-ref/source/samples/db-instances-index-response.json
+++ b/api-ref/source/samples/db-instances-index-response.json
@@ -31,6 +31,7 @@
}
],
"name": "json_rack_instance",
+ "region": "RegionOne",
"status": "ACTIVE",
"volume": {
"size": 2
diff --git a/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt b/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt
index 97dd151e..feb89a88 100644
--- a/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt
+++ b/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 1533
+Content-Length: 1676
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-mgmt-get-instance-details-response.json b/api-ref/source/samples/db-mgmt-get-instance-details-response.json
index c58aeb20..203159d6 100644
--- a/api-ref/source/samples/db-mgmt-get-instance-details-response.json
+++ b/api-ref/source/samples/db-mgmt-get-instance-details-response.json
@@ -36,6 +36,7 @@
}
],
"name": "json_rack_instance",
+ "region": "RegionOne",
"root_enabled": "2014-10-30T12:30:00",
"root_enabled_by": "3000",
"server": {
@@ -55,6 +56,7 @@
"status": "ACTIVE",
"tenant_id": "3000"
},
+ "server_id": "44b277eb-39be-4921-be31-3d61b43651d7",
"service_status": "ACTIVE",
"status": "ACTIVE",
"task_description": "No tasks for the instance.",
@@ -74,6 +76,7 @@
"status": "in-use",
"total": 4.0,
"used": 0.16
- }
+ },
+ "volume_id": "VOL_44b277eb-39be-4921-be31-3d61b43651d7"
}
}
diff --git a/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt b/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt
index 6eb4f3ca..875f0f20 100644
--- a/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt
+++ b/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 1082
+Content-Length: 1225
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-mgmt-instance-index-response.json b/api-ref/source/samples/db-mgmt-instance-index-response.json
index 9c6eb89f..5736bb17 100644
--- a/api-ref/source/samples/db-mgmt-instance-index-response.json
+++ b/api-ref/source/samples/db-mgmt-instance-index-response.json
@@ -34,6 +34,7 @@
}
],
"name": "json_rack_instance",
+ "region": "RegionOne",
"server": {
"deleted": false,
"deleted_at": null,
@@ -44,6 +45,7 @@
"status": "ACTIVE",
"tenant_id": "3000"
},
+ "server_id": "44b277eb-39be-4921-be31-3d61b43651d7",
"service_status": "ACTIVE",
"status": "ACTIVE",
"task_description": "No tasks for the instance.",
@@ -51,7 +53,8 @@
"updated": "2014-10-30T12:30:00",
"volume": {
"size": 4
- }
+ },
+ "volume_id": "VOL_44b277eb-39be-4921-be31-3d61b43651d7"
}
]
}
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index a79a0711..42f6b827 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -54,16 +54,13 @@ function create_trove_accounts {
create_service_user "trove"
- if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
- local trove_service=$(get_or_create_service "trove" \
- "database" "Trove Service")
- get_or_create_endpoint $trove_service \
- "$REGION_NAME" \
- "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
- "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
- "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s"
- fi
+ local trove_service=$(get_or_create_service "trove" \
+ "database" "Trove Service")
+ get_or_create_endpoint $trove_service \
+ "$REGION_NAME" \
+ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
+ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
+ "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s"
fi
}
@@ -103,16 +100,52 @@ function iniset_conditional {
}
+# configure_nova_kvm() - update the nova hypervisor configuration if possible
+function configure_nova_kvm {
+ cpu="unknown"
+
+ if [ -e /sys/module/kvm_*/parameters/nested ]; then
+ reconfigure_nova="F"
+
+ if [ -e /sys/module/kvm_intel/parameters/nested ]; then
+ cpu="Intel"
+ if [[ "$(cat /sys/module/kvm_*/parameters/nested)" == "Y" ]]; then
+ reconfigure_nova="Y"
+ fi
+ elif [ -e /sys/module/kvm_amd/parameters/nested ]; then
+ cpu="AMD"
+ if [[ "$(cat /sys/module/kvm_*/parameters/nested)" == "1" ]]; then
+ reconfigure_nova="Y"
+ fi
+ fi
+
+ if [ "${reconfigure_nova}" == "Y" ]; then
+ NOVA_CONF_DIR=${NOVA_CONF_DIR:-/etc/nova}
+ NOVA_CONF=${NOVA_CONF:-${NOVA_CONF_DIR}/nova.conf}
+ iniset $NOVA_CONF libvirt cpu_mode "none"
+ iniset $NOVA_CONF libvirt virt_type "kvm"
+ fi
+ fi
+
+ virt_type=$(iniget $NOVA_CONF libvirt virt_type)
+ echo "configure_nova_kvm: using virt_type: ${virt_type} for cpu: ${cpu}."
+}
+
# configure_trove() - Set config files, create data dirs, etc
function configure_trove {
setup_develop $TROVE_DIR
+ configure_nova_kvm
+
# Create the trove conf dir and cache dirs if they don't exist
sudo install -d -o $STACK_USER ${TROVE_CONF_DIR} ${TROVE_AUTH_CACHE_DIR}
# Copy api-paste file over to the trove conf dir
cp $TROVE_LOCAL_API_PASTE_INI $TROVE_API_PASTE_INI
+ # Copy the default policy file over to the trove conf dir
+ cp $TROVE_LOCAL_POLICY_JSON $TROVE_POLICY_JSON
+
# (Re)create trove conf files
rm -f $TROVE_CONF
rm -f $TROVE_TASKMANAGER_CONF
@@ -163,6 +196,20 @@ function configure_trove {
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name trove
iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_TASKMANAGER_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+
+ iniset $TROVE_TASKMANAGER_CONF cassandra tcp_ports 22,7000,7001,7199,9042,9160
+ iniset $TROVE_TASKMANAGER_CONF couchbase tcp_ports 22,8091,8092,4369,11209-11211,21100-21199
+ iniset $TROVE_TASKMANAGER_CONF couchdb tcp_ports 22,5984
+ iniset $TROVE_TASKMANAGER_CONF db2 tcp_ports 22,50000
+ iniset $TROVE_TASKMANAGER_CONF mariadb tcp_ports 22,3306,4444,4567,4568
+ iniset $TROVE_TASKMANAGER_CONF mongodb tcp_ports 22,2500,27017,27019
+ iniset $TROVE_TASKMANAGER_CONF mysql tcp_ports 22,3306
+ iniset $TROVE_TASKMANAGER_CONF percona tcp_ports 22,3306
+ iniset $TROVE_TASKMANAGER_CONF postgresql tcp_ports 22,5432
+ iniset $TROVE_TASKMANAGER_CONF pxc tcp_ports 22,3306,4444,4567,4568
+ iniset $TROVE_TASKMANAGER_CONF redis tcp_ports 22,6379,16379
+ iniset $TROVE_TASKMANAGER_CONF vertica tcp_ports 22,5433,5434,5444,5450,4803
+
setup_trove_logging $TROVE_TASKMANAGER_CONF
fi
@@ -179,6 +226,7 @@ function configure_trove {
iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
iniset $TROVE_CONDUCTOR_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
iniset $TROVE_CONDUCTOR_CONF DEFAULT control_exchange trove
+
setup_trove_logging $TROVE_CONDUCTOR_CONF
fi
@@ -187,7 +235,6 @@ function configure_trove {
# Set up Guest Agent conf
iniset $TROVE_GUESTAGENT_CONF DEFAULT rpc_backend "rabbit"
-
iniset $TROVE_GUESTAGENT_CONF oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD
iniset $TROVE_GUESTAGENT_CONF oslo_messaging_rabbit rabbit_userid $RABBIT_USERID
iniset $TROVE_GUESTAGENT_CONF oslo_messaging_rabbit rabbit_hosts $TROVE_HOST_GATEWAY
@@ -200,6 +247,7 @@ function configure_trove {
iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin
iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/
iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log
+
setup_trove_logging $TROVE_GUESTAGENT_CONF
}
@@ -240,7 +288,7 @@ function init_trove {
# The first part mimics the tempest setup, so make sure we have that.
ALT_USERNAME=${ALT_USERNAME:-alt_demo}
ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo}
- get_or_create_project ${ALT_TENANT_NAME} default
+ ALT_TENANT_ID=$(get_or_create_project ${ALT_TENANT_NAME} default)
get_or_create_user ${ALT_USERNAME} "$ADMIN_PASSWORD" "default" "alt_demo@example.com"
get_or_add_user_project_role Member ${ALT_USERNAME} ${ALT_TENANT_NAME}
@@ -248,6 +296,19 @@ function init_trove {
ADMIN_ALT_USERNAME=${ADMIN_ALT_USERNAME:-admin_${ALT_USERNAME}}
get_or_create_user ${ADMIN_ALT_USERNAME} "$ADMIN_PASSWORD" "default" "admin_alt_demo@example.com"
get_or_add_user_project_role admin ${ADMIN_ALT_USERNAME} ${ALT_TENANT_NAME}
+ # Now add these credentials to the clouds.yaml file
+ ADMIN_ALT_DEMO_CLOUD=devstack-alt-admin
+ CLOUDS_YAML=${CLOUDS_YAML:-/etc/openstack/clouds.yaml}
+ $TOP_DIR/tools/update_clouds_yaml.py \
+ --file ${CLOUDS_YAML} \
+ --os-cloud ${ADMIN_ALT_DEMO_CLOUD} \
+ --os-region-name ${REGION_NAME} \
+ --os-identity-api-version 3 \
+ ${CA_CERT_ARG} \
+ --os-auth-url ${KEYSTONE_AUTH_URI} \
+ --os-username ${ADMIN_ALT_USERNAME} \
+ --os-password ${ADMIN_PASSWORD} \
+ --os-project-name ${ALT_TENANT_NAME}
# If no guest image is specified, skip remaining setup
[ -z "$TROVE_GUEST_IMAGE_URL" ] && return 0
@@ -283,31 +344,139 @@ function init_trove {
fi
}
+# Create private IPv4 subnet
+# Note: This was taken from devstack:lib/neutron_plugins/services/l3 and will need to be maintained
+function _create_private_subnet_v4 {
+ local project_id=$1
+ local net_id=$2
+ local name=${3:-$PRIVATE_SUBNET_NAME}
+ local os_cloud=${4:-devstack-admin}
+
+ local subnet_params="--project $project_id "
+ subnet_params+="--ip-version 4 "
+ if [[ -n "$NETWORK_GATEWAY" ]]; then
+ subnet_params+="--gateway $NETWORK_GATEWAY "
+ fi
+ if [ -n $SUBNETPOOL_V4_ID ]; then
+ subnet_params+="--subnet-pool $SUBNETPOOL_V4_ID "
+ else
+ subnet_params+="--subnet-range $FIXED_RANGE "
+ fi
+ subnet_params+="--network $net_id $name"
+ local subnet_id
+ subnet_id=$(openstack --os-cloud $os_cloud --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
+ echo $subnet_id
+}
+
+# Create private IPv6 subnet
+# Note: This was taken from devstack:lib/neutron_plugins/services/l3 and will need to be maintained
+function _create_private_subnet_v6 {
+ local project_id=$1
+ local net_id=$2
+ local name=${3:-$IPV6_PRIVATE_SUBNET_NAME}
+ local os_cloud=${4:-devstack-admin}
+
+ die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set"
+ die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set"
+ local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE"
+ local subnet_params="--project $project_id "
+ subnet_params+="--ip-version 6 "
+ if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
+ subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
+ fi
+ if [ -n $SUBNETPOOL_V6_ID ]; then
+ subnet_params+="--subnet-pool $SUBNETPOOL_V6_ID "
+ else
+ subnet_params+="--subnet-range $FIXED_RANGE_V6 $ipv6_modes} "
+ fi
+ subnet_params+="--network $net_id $name "
+ local ipv6_subnet_id
+ ipv6_subnet_id=$(openstack --os-cloud $os_cloud --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+ die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
+ echo $ipv6_subnet_id
+}
+
+# Set up a network on the alt_demo tenant. Requires ROUTER_ID, REGION_NAME and IP_VERSION to be set
+function set_up_network() {
+ local CLOUD_USER=$1
+ local PROJECT_ID=$2
+ local NET_NAME=$3
+ local SUBNET_NAME=$4
+ local IPV6_SUBNET_NAME=$5
+
+ NEW_NET_ID=$(openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" network create --project ${PROJECT_ID} "$NET_NAME" | grep ' id ' | get_field 2)
+ if [[ "$IP_VERSION" =~ 4.* ]]; then
+ NEW_SUBNET_ID=$(_create_private_subnet_v4 ${PROJECT_ID} ${NEW_NET_ID} ${SUBNET_NAME} ${CLOUD_USER})
+ openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $NEW_SUBNET_ID
+ fi
+ if [[ "$IP_VERSION" =~ .*6 ]]; then
+ NEW_IPV6_SUBNET_ID=$(_create_private_subnet_v6 ${PROJECT_ID} ${NEW_NET_ID} ${IPV6_SUBNET_NAME} ${CLOUD_USER})
+ openstack --os-cloud ${CLOUD_USER} --os-region "$REGION_NAME" router add subnet $ROUTER_ID $NEW_IPV6_SUBNET_ID
+ fi
+
+ echo $NEW_NET_ID
+}
+
# finalize_trove_network() - do the last thing(s) before starting Trove
function finalize_trove_network {
- management_network_id=$(neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $PRIVATE_NETWORK_NAME | awk '{print $2}')
- echo "finalize_trove_network: found network id $management_network_id"
-
- iniset $TROVE_CONF DEFAULT network_label_regex .*
- iniset $TROVE_CONF DEFAULT ip_regex .*
- iniset $TROVE_CONF DEFAULT black_list_regex ^10.0.1.*
- iniset $TROVE_CONF DEFAULT default_neutron_networks $management_network_id
+ echo "Finalizing Neutron networking for Trove"
+ echo "Dumping current network parameters:"
+ echo " SERVICE_HOST: $SERVICE_HOST"
+ echo " BRIDGE_IP: $BRIDGE_IP"
+ echo " PUBLIC_NETWORK_GATEWAY: $PUBLIC_NETWORK_GATEWAY"
+ echo " NETWORK_GATEWAY: $NETWORK_GATEWAY"
+ echo " IPV4_ADDRS_SAFE_TO_USE: $IPV4_ADDRS_SAFE_TO_USE"
+ echo " IPV6_ADDRS_SAFE_TO_USE: $IPV6_ADDRS_SAFE_TO_USE"
+ echo " FIXED_RANGE: $FIXED_RANGE"
+ echo " FLOATING_RANGE: $FLOATING_RANGE"
+ echo " SUBNETPOOL_PREFIX_V4: $SUBNETPOOL_PREFIX_V4"
+ echo " SUBNETPOOL_SIZE_V4: $SUBNETPOOL_SIZE_V4"
+ echo " SUBNETPOOL_V4_ID: $SUBNETPOOL_V4_ID"
+ echo " ROUTER_GW_IP: $ROUTER_GW_IP"
+
+ # Create the net/subnet for the alt_demo tenant so the int-tests have a proper network
+ echo "Creating network/subnets for ${ALT_TENANT_NAME} project"
+ ALT_PRIVATE_NETWORK_NAME=${TROVE_PRIVATE_NETWORK_NAME}
+ ALT_PRIVATE_SUBNET_NAME=${TROVE_PRIVATE_SUBNET_NAME}
+ ALT_PRIVATE_IPV6_SUBNET_NAME=ipv6-${ALT_PRIVATE_SUBNET_NAME}
+ ALT_NET_ID=$(set_up_network $ADMIN_ALT_DEMO_CLOUD $ALT_TENANT_ID $ALT_PRIVATE_NETWORK_NAME $ALT_PRIVATE_SUBNET_NAME $ALT_PRIVATE_IPV6_SUBNET_NAME)
+ echo "Created network ${ALT_PRIVATE_NETWORK_NAME} (${ALT_NET_ID})"
+
+ # Set up a management network to test that functionality
+ ALT_MGMT_NETWORK_NAME=trove-mgmt
+ ALT_MGMT_SUBNET_NAME=${ALT_MGMT_NETWORK_NAME}-subnet
+ ALT_MGMT_IPV6_SUBNET_NAME=ipv6-${ALT_MGMT_SUBNET_NAME}
+ ALT_MGMT_ID=$(set_up_network $ADMIN_ALT_DEMO_CLOUD $ALT_TENANT_ID $ALT_MGMT_NETWORK_NAME $ALT_MGMT_SUBNET_NAME $ALT_MGMT_IPV6_SUBNET_NAME)
+ echo "Created network ${ALT_MGMT_NETWORK_NAME} (${ALT_MGMT_ID})"
+
+ # Make sure we can reach the VMs
+ local replace_range=${SUBNETPOOL_PREFIX_V4}
+ if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then
+ replace_range=${FIXED_RANGE}
+ fi
+ sudo ip route replace $replace_range via $ROUTER_GW_IP
+
+ echo "Neutron network list:"
+ openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network list
+
+ # Now make sure the conf settings are right
+ iniset $TROVE_CONF DEFAULT network_label_regex "${ALT_PRIVATE_NETWORK_NAME}"
+ iniset $TROVE_CONF DEFAULT ip_regex ""
+ iniset $TROVE_CONF DEFAULT black_list_regex ""
+ # Don't use a default network for now, until the neutron issues are figured out
+ #iniset $TROVE_CONF DEFAULT default_neutron_networks "${ALT_MGMT_ID}"
+ iniset $TROVE_CONF DEFAULT default_neutron_networks ""
iniset $TROVE_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT network_label_regex "${ALT_PRIVATE_NETWORK_NAME}"
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT ip_regex ""
+ iniset $TROVE_TASKMANAGER_CONF DEFAULT black_list_regex ""
+ # Don't use a default network for now, until the neutron issues are figured out
+ #iniset $TROVE_TASKMANAGER_CONF DEFAULT default_neutron_networks "${ALT_MGMT_ID}"
+ iniset $TROVE_CONF DEFAULT default_neutron_networks ""
iniset $TROVE_TASKMANAGER_CONF DEFAULT network_driver trove.network.neutron.NeutronDriver
- iniset $TROVE_TASKMANAGER_CONF cassandra tcp_ports 22,7000,7001,7199,9042,9160
- iniset $TROVE_TASKMANAGER_CONF couchbase tcp_ports 22,8091,8092,4369,11209-11211,21100-21199
- iniset $TROVE_TASKMANAGER_CONF couchdb tcp_ports 22,5984
- iniset $TROVE_TASKMANAGER_CONF db2 tcp_ports 22,50000
- iniset $TROVE_TASKMANAGER_CONF mariadb tcp_ports 22,3306,4444,4567,4568
- iniset $TROVE_TASKMANAGER_CONF mongodb tcp_ports 22,2500,27017,27019
- iniset $TROVE_TASKMANAGER_CONF mysql tcp_ports 22,3306
- iniset $TROVE_TASKMANAGER_CONF percona tcp_ports 22,3306
- iniset $TROVE_TASKMANAGER_CONF postgresql tcp_ports 22,5432
- iniset $TROVE_TASKMANAGER_CONF pxc tcp_ports 22,3306,4444,4567,4568
- iniset $TROVE_TASKMANAGER_CONF redis tcp_ports 22,6379,16379
- iniset $TROVE_TASKMANAGER_CONF vertica tcp_ports 22,5433,5434,5444,5450,4803
}
# start_trove() - Start running processes, including screen
diff --git a/devstack/settings b/devstack/settings
index 1fe89543..be3074f4 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -21,9 +21,11 @@ TROVE_TASKMANAGER_CONF=${TROVE_TASKMANAGER_CONF:-${TROVE_CONF_DIR}/trove-taskman
TROVE_CONDUCTOR_CONF=${TROVE_CONDUCTOR_CONF:-${TROVE_CONF_DIR}/trove-conductor.conf}
TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-${TROVE_CONF_DIR}/trove-guestagent.conf}
TROVE_API_PASTE_INI=${TROVE_API_PASTE_INI:-${TROVE_CONF_DIR}/api-paste.ini}
+TROVE_POLICY_JSON=${TROVE_POLICY_JSON:-${TROVE_CONF_DIR}/policy.json}
TROVE_LOCAL_CONF_DIR=${TROVE_LOCAL_CONF_DIR:-${TROVE_DIR}/etc/trove}
TROVE_LOCAL_API_PASTE_INI=${TROVE_LOCAL_API_PASTE_INI:-${TROVE_LOCAL_CONF_DIR}/api-paste.ini}
+TROVE_LOCAL_POLICY_JSON=${TROVE_LOCAL_POLICY_JSON:-${TROVE_LOCAL_CONF_DIR}/policy.json}
TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"}
TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"}
@@ -42,6 +44,8 @@ TROVE_STATE_CHANGE_WAIT_TIME=${TROVE_STATE_CHANGE_WAIT_TIME}
# Set up the host gateway
if is_service_enabled neutron; then
TROVE_HOST_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1}
+ TROVE_PRIVATE_NETWORK_NAME=${TROVE_PRIVATE_NETWORK_NAME:-alt-private}
+ TROVE_PRIVATE_SUBNET_NAME=${TROVE_PRIVATE_SUBNET_NAME:-${TROVE_PRIVATE_NETWORK_NAME}-subnet}
else
TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
fi
diff --git a/doc/source/dev/building_guest_images.rst b/doc/source/dev/building_guest_images.rst
index 4e617050..1ce3254f 100644
--- a/doc/source/dev/building_guest_images.rst
+++ b/doc/source/dev/building_guest_images.rst
@@ -206,25 +206,35 @@ This command will create a guest image usable by Trove:
.. code-block:: bash
+ # assign a suitable value for each of these environment
+ # variables that change the way the elements behave.
export HOST_USERNAME
export HOST_SCP_USERNAME
export GUEST_USERNAME
- export NETWORK_GATEWAY
- export REDSTACK_SCRIPTS
+ export CONTROLLER_IP
+ export TROVESTACK_SCRIPTS
export SERVICE_TYPE
export PATH_TROVE
export ESCAPED_PATH_TROVE
export SSH_DIR
export GUEST_LOGDIR
export ESCAPED_GUEST_LOGDIR
- export ELEMENTS_PATH=$REDSTACK_SCRIPTS/files/elements:$PATH_TRIPLEO_ELEMENTS/elements
export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive"
- local QEMU_IMG_OPTIONS=$(! $(qemu-img | grep -q 'version 1') && \
- echo "--qemu-img-options compat=0.10")
- ${PATH_DISKIMAGEBUILDER}/bin/disk-image-create -a amd64 -o "${IMAGE_NAME}" \
- -x ${QEMU_IMG_OPTIONS} ${DISTRO} ${EXTRA_ELEMENTS} \
- vm heat-cfntools cloud-init-datasources ${DISTRO}-guest \
- ${DISTRO}-${SERVICE_TYPE}
+ export DATASTORE_PKG_LOCATION
+ export BRANCH_OVERRIDE
+
+ # you typically do not have to change these variables
+ export ELEMENTS_PATH=$TROVESTACK_SCRIPTS/files/elements
+ export ELEMENTS_PATH+=:$PATH_DISKIMAGEBUILDER/elements
+ export ELEMENTS_PATH+=:$PATH_TRIPLEO_ELEMENTS/elements
+ export DIB_APT_CONF_DIR=/etc/apt/apt.conf.d
+ export DIB_CLOUD_INIT_ETC_HOSTS=true
+ local QEMU_IMG_OPTIONS=$(! $(qemu-img | grep -q 'version 1') && echo "--qemu-img-options compat=0.10")
+
+ # run disk-image-create that actually causes the image to be built
+ ${PATH_DISKIMAGEBUILDER}/bin/disk-image-create -a amd64 -o "${VM}" \
+ -x ${QEMU_IMG_OPTIONS} ${DISTRO} ${EXTRA_ELEMENTS} vm heat-cfntools \
+ cloud-init-datasources ${DISTRO}-guest ${DISTRO}-${SERVICE_TYPE}
-----------------------------
Disk Image Builder 'Elements'
@@ -281,14 +291,13 @@ DIB comes with some tools that are located in the elements directory.
In addition, projects like TripleO [5]_ provide elements as well.
-Trove provides a set of elements as part of the trove-integration [6]_
+Trove provides a set of elements as part of the trove [6]_
project which will be described in the next section.
Trove Reference Elements
========================
-Reference elements provided by Trove are part of the trove-integration
-project.
+Reference elements provided by Trove are part of the trove project.
In keeping with the philosophy of making elements 'layered', Trove
provides two sets of elements. The first implements the guest agent
@@ -300,14 +309,14 @@ Provided Reference Elements
---------------------------
The Trove reference elements are located in the
-trove-integration/scripts/files/elements directory. The elements
+trove/integration/scripts/files/elements directory. The elements
[operating-system]-guest provide the Trove Guest capabilities and the
[operating-system]-[database] elements provide support for each
database on the specified database.
.. code-block:: bash
- user@machine:/opt/stack/trove-integration/scripts/files/elements$ ls -l
+ user@machine:/opt/stack/trove/integration/scripts/files/elements$ ls -l
total 56
drwxrwxr-x 5 user group 4096 Jan 7 12:47 fedora-guest
drwxrwxr-x 3 user group 4096 Jan 7 12:47 fedora-mongodb
@@ -323,7 +332,7 @@ database on the specified database.
drwxrwxr-x 4 user group 4096 Jan 7 12:47 ubuntu-percona
drwxrwxr-x 3 user group 4096 Jan 7 12:47 ubuntu-postgresql
drwxrwxr-x 3 user group 4096 Jan 7 12:47 ubuntu-redis
- user@machine:/opt/stack/trove-integration/scripts/files/elements$
+ user@machine:/opt/stack/trove/integration/scripts/files/elements$
With this infrastructure in place, and the elements from DIB and
TripleO accessible to the DIB command, one can generate the (for
@@ -347,7 +356,7 @@ that will allow any user of Trove to be able to build a guest image
for that datastore.
This is typically accomplished by submitting files into the
-trove-integration project, as above.
+trove project, as above.
Getting the Guest Agent Code onto a Trove Guest Instance
========================================================
@@ -505,68 +514,89 @@ the guest image can be created by executing the following:
DATASTORE_PKG_LOCATION=/path/to/new_db.deb ./script_to_call_dib.sh
-Assuming the elements for new_db are available in redstack, this would
-equate to:
+Assuming the elements for new_db are available in the trove
+repository, this would equate to:
.. code-block:: bash
- DATASTORE_PKG_LOCATION=/path/to/new_db.deb ./redstack kick-start new_db
+ DATASTORE_PKG_LOCATION=/path/to/new_db.deb ./trovestack kick-start new_db
Building Guest Images Using Standard Elements
=============================================
A very good reference for how one builds guest images can be found by
-reviewing the redstack script (trove-integration/scripts). Lower level
+reviewing the trovestack script (trove/integration/scripts). Lower level
routines that actually invoke Disk Image Builder can be found in
-trove-integration/scripts/functions_qemu.
+trove/integration/scripts/functions_qemu.
The following block of code illustrates the most basic invocation of
DIB to create a guest image. This code is in
-trove-integration/scripts/functions_qemu as part of the function
+trove/integration/scripts/functions_qemu as part of the function
build_vm(). We look at this section of code in detail below.
.. code-block:: bash
+ # assign a suitable value for each of these environment
+ # variables that change the way the elements behave.
export HOST_USERNAME
export HOST_SCP_USERNAME
export GUEST_USERNAME
- export NETWORK_GATEWAY
- export REDSTACK_SCRIPTS
+ export CONTROLLER_IP
+ export TROVESTACK_SCRIPTS
export SERVICE_TYPE
export PATH_TROVE
export ESCAPED_PATH_TROVE
export SSH_DIR
export GUEST_LOGDIR
export ESCAPED_GUEST_LOGDIR
- export ELEMENTS_PATH=$REDSTACK_SCRIPTS/files/elements:$PATH_TRIPLEO_ELEMENTS/elements
export DIB_CLOUD_INIT_DATASOURCES="ConfigDrive"
- local QEMU_IMG_OPTIONS=$(! $(qemu-img | grep -q 'version 1') && \
- echo "--qemu-img-options compat=0.10")
- ${PATH_DISKIMAGEBUILDER}/bin/disk-image-create -a amd64 -o "${IMAGE_NAME}" \
- -x ${QEMU_IMG_OPTIONS} ${DISTRO} ${EXTRA_ELEMENTS} \
- vm heat-cfntools cloud-init-datasources ${DISTRO}-guest \
- ${DISTRO}-${SERVICE_TYPE}
+ export DATASTORE_PKG_LOCATION
+ export BRANCH_OVERRIDE
+
+ # you typically do not have to change these variables
+ export ELEMENTS_PATH=$TROVESTACK_SCRIPTS/files/elements
+ export ELEMENTS_PATH+=:$PATH_DISKIMAGEBUILDER/elements
+ export ELEMENTS_PATH+=:$PATH_TRIPLEO_ELEMENTS/elements
+ export DIB_APT_CONF_DIR=/etc/apt/apt.conf.d
+ export DIB_CLOUD_INIT_ETC_HOSTS=true
+ local QEMU_IMG_OPTIONS=$(! $(qemu-img | grep -q 'version 1') && echo "--qemu-img-options compat=0.10")
+
+ # run disk-image-create that actually causes the image to be built
+ ${PATH_DISKIMAGEBUILDER}/bin/disk-image-create -a amd64 -o "${VM}" \
+ -x ${QEMU_IMG_OPTIONS} ${DISTRO} ${EXTRA_ELEMENTS} vm heat-cfntools \
+ cloud-init-datasources ${DISTRO}-guest ${DISTRO}-${SERVICE_TYPE}
Several of the environment variables referenced above are referenced
in the course of the Disk Image Building process.
For example, let's look at GUEST_LOGDIR. Looking at the element
-elements/fedora-guest/extra-data.d/20-guest-upstart, we find:
+elements/fedora-guest/extra-data.d/20-guest-systemd, we find:
.. code-block:: bash
- #!/bin/bash
+ #!/bin/bash
+
+ set -e
+ set -o xtrace
- set -e
- set -o xtrace
+ # CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER
+ # PURPOSE: stages the bootstrap file and upstart conf file while replacing variables so that guest image is properly
+ # configured
- [...]
+ source $_LIB/die
- [ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir"
+ [ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set"
- sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/NETWORK_GATEWAY/${NETWORK_GATEWAY}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" \
- ${REDSTACK_SCRIPTS}/files/trove-guest.systemd.conf > \
- ${TMP_HOOKS_PATH}/trove-guest.service
+ [ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image"
+ [ -n "${HOST_SCP_USERNAME}" ] || die "HOST_SCP_USERNAME needs to be set to the user for the host instance"
+ [ -n "${CONTROLLER_IP}" ] || die "CONTROLLER_IP needs to be set to the ip address that guests will use to contact the controller"
+ [ -n "${ESCAPED_PATH_TROVE}" ] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host"
+ [ -n "${TROVESTACK_SCRIPTS}" ] || die "TROVESTACK_SCRIPTS needs to be set to the trove/integration/scripts dir"
+ [ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir"
+
+ sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/CONTROLLER_IP/${CONTROLLER_IP}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" \
+ ${TROVESTACK_SCRIPTS}/files/trove-guest.systemd.conf >
+ ${TMP_HOOKS_PATH}/trove-guest.service
As you can see, the value of GUEST_LOGDIR is used in the extra-data.d
script to appropriately configure the trove-guest.systemd.conf file.
@@ -597,8 +627,8 @@ MySQL. And therefore these would end up being the elements:
vm From diskimage-builder/elements/vm
heat-cfntools From tripleo-image-elements/elements/heat-cfntools
cloud-init-datasources From diskimage-builder/elements/cloud-init-datasources
- ubuntu-guest From trove-integration/scripts/files/elements/ubuntu-guest
- ubuntu-mysql From trove-integration/scripts/files/elements/ubuntu-mysql
+ ubuntu-guest From trove/integration/scripts/files/elements/ubuntu-guest
+ ubuntu-mysql From trove/integration/scripts/files/elements/ubuntu-mysql
References
==========
@@ -608,5 +638,4 @@ References
.. [3] User (especially in the USA) are cautioned about this spelling which once resulted in several sleepless nights.
.. [4] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/README.rst#writing-an-element
.. [5] https://git.openstack.org/cgit/openstack/tripleo-image-elements/tree/elements
-.. [6] https://git.openstack.org/cgit/openstack/trove-integration/tree/scripts/files/elements
-
+.. [6] https://git.openstack.org/cgit/openstack/trove/integration/tree/scripts/files/elements
diff --git a/doc/source/dev/install.rst b/doc/source/dev/install.rst
index e863c7cc..aa98684c 100644
--- a/doc/source/dev/install.rst
+++ b/doc/source/dev/install.rst
@@ -6,15 +6,15 @@ Trove Installation
Trove is constantly under development. The easiest way to install
Trove is using the Trove integration scripts that can be found in
-git in the `Trove Integration Repository`_.
+git in the `Trove Repository`_.
Steps to set up a Trove Developer Environment
=============================================
-----------------------------
-Installing trove-integration
-----------------------------
+----------------
+Installing trove
+----------------
* Install a fresh Ubuntu 14.04 (Trusty Tahr) image (preferably a
virtual machine)
@@ -37,45 +37,36 @@ Installing trove-integration
* Login with ubuntu::
# su ubuntu
- # cd ~
+ # mkdir -p /opt/stack
+ # cd /opt/stack
* Clone this repo::
- # git clone https://git.openstack.org/openstack/trove-integration.git
+ # git clone https://git.openstack.org/openstack/trove.git
* cd into the scripts directory::
- # cd trove-integration/scripts/
+ # cd trove/integration/scripts/
+
+It is important to understand that this process is different now with
+the elements and scripts being part of the trove repository. In the
+past, one could clone trove-integration into the home directory and
+run redstack from there, and it would clone trove in the right
+place. And if you were making changes in trove-integration, it didn't
+really matter where trove-integration was; it could be in home
+directory or /opt/stack, or for that matter, anywhere. This is no
+longer the case. If you are making changes to trove and would like to
+run the trovestack script, you have to be sure that trove is in fact
+cloned in /opt/stack as shown above.
---------------------------------
-Running redstack to install Trove
+Running trovestack to setup Trove
---------------------------------
-Redstack is the core script that allows you to install and interact
-with your developer installation of Trove. Redstack has the following
-options that you can run.
-
-* Get the command list with a short description of each command and
- what it does::
-
- # ./redstack
-
-* Install all the dependencies and then install Trove. This brings up
- trove (tr-api tr-tmgr tr-cond) and initializes the trove database::
-
- # ./redstack install
-
-* Kick start the build/test-init/build-image commands. Add mysql as a
- parameter to set build and add the mysql guest image::
-
- # ./redstack kick-start mysql
-
-* You may need to add this iptables rule, so be sure to save it!::
-
- # sudo iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -o eth0 -j
- MASQUERADE
-
+Now you run trovestack to help setup your development environment. For
+complete details about the trovestack script refer to
+trove/integration/README.md
------------------------
Running the trove client
@@ -96,14 +87,7 @@ Running the nova client
# nova help
+References
+==========
-More information
-================
-
-For more information and help on how to use redstack and other
-trove-integration scripts, please look at the `README documentation`_
-in the `Trove Integration Repository`_.
-
-
-.. _Trove Integration Repository: https://git.openstack.org/cgit/openstack/trove-integration
-.. _README documentation: https://git.openstack.org/cgit/openstack/trove-integration/plain/README.md
+.. _Trove Repository: https://git.openstack.org/cgit/openstack/trove
diff --git a/doc/source/dev/manual_install.rst b/doc/source/dev/manual_install.rst
index 1d8a5963..d94209ab 100644
--- a/doc/source/dev/manual_install.rst
+++ b/doc/source/dev/manual_install.rst
@@ -441,3 +441,28 @@ One possible way to find the network name is to execute the 'nova list' command.
NETWORK_NAME=IP_ADDRESS
+
+Additional information
+======================
+
+Additional information can be found in the OpenStack installation guide for the trove project. This document can be found under the "Installation Tutorials and Guides" section of the OpenStack Documentation.
+
+For the current documentation, visit:
+
+http://docs.openstack.org/index.html#install-guides
+
+Select the link for "Installation Tutorials and Guides"
+
+The installation guides for trove (the Database Service) can be found under the appropriate operating system.
+
+If you are interested in documentation for a specific OpenStack release, visit:
+
+http://docs.openstack.org/<release-code-name>/
+
+For example, the documentation for the Newton release is found at:
+
+http://docs.openstack.org/newton/
+
+and the documentation for the Mitaka release is found at:
+
+http://docs.openstack.org/mitaka/
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 4e0d9514..f803a376 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -26,7 +26,7 @@ Installation And Deployment
Trove is constantly under development. The easiest way to install
Trove is using the Trove integration scripts that can be found in
-git in the `Trove Integration`_ Repository.
+git in the `Trove`_ Repository.
For further details on how to install Trove using the integration
scripts please refer to the :doc:`dev/install` page.
@@ -55,7 +55,6 @@ functionality, the following resources are provided.
* Source Code Repositories
- `Trove`_
- - `Trove Integration`_
- `Trove Client`_
* `Trove Wiki`_ on OpenStack
@@ -81,6 +80,5 @@ Search Trove Documentation
.. _Trove Wiki: https://wiki.openstack.org/wiki/Trove
.. _Trove: https://git.openstack.org/cgit/openstack/trove
-.. _Trove Integration: https://git.openstack.org/cgit/openstack/trove-integration
.. _Trove Client: https://git.openstack.org/cgit/openstack/python-troveclient
.. _Trove API Documentation: http://developer.openstack.org/api-ref-databases-v1.html
diff --git a/etc/trove/api-paste.ini b/etc/trove/api-paste.ini
index ea3612b9..5851c0fd 100644
--- a/etc/trove/api-paste.ini
+++ b/etc/trove/api-paste.ini
@@ -7,7 +7,7 @@ use = call:trove.common.wsgi:versioned_urlmap
paste.app_factory = trove.versions:app_factory
[pipeline:troveapi]
-pipeline = cors faultwrapper osprofiler authtoken authorization contextwrapper ratelimit extensions troveapp
+pipeline = cors http_proxy_to_wsgi faultwrapper osprofiler authtoken authorization contextwrapper ratelimit extensions troveapp
#pipeline = debug extensions troveapp
[filter:extensions]
@@ -41,3 +41,6 @@ paste.app_factory = trove.common.api:app_factory
#Add this filter to log request and response for debugging
[filter:debug]
paste.filter_factory = trove.common.wsgi:Debug
+
+[filter:http_proxy_to_wsgi]
+use = egg:oslo.middleware#http_proxy_to_wsgi
diff --git a/etc/trove/api-paste.ini.test b/etc/trove/api-paste.ini.test
index 7f935402..f2b0bc08 100644
--- a/etc/trove/api-paste.ini.test
+++ b/etc/trove/api-paste.ini.test
@@ -7,7 +7,7 @@ use = call:trove.common.wsgi:versioned_urlmap
paste.app_factory = trove.versions:app_factory
[pipeline:troveapi]
-pipeline = faultwrapper authtoken authorization contextwrapper extensions ratelimit troveapp
+pipeline = faultwrapper http_proxy_to_wsgi authtoken authorization contextwrapper extensions ratelimit troveapp
#pipeline = debug extensions troveapp
[filter:extensions]
@@ -42,3 +42,6 @@ paste.app_factory = trove.common.api:app_factory
#Add this filter to log request and response for debugging
[filter:debug]
paste.filter_factory = trove.common.wsgi:Debug
+
+[filter:http_proxy_to_wsgi]
+use = egg:oslo.middleware#http_proxy_to_wsgi
diff --git a/etc/trove/policy.json b/etc/trove/policy.json
new file mode 100644
index 00000000..370a8f2a
--- /dev/null
+++ b/etc/trove/policy.json
@@ -0,0 +1,96 @@
+{
+ "admin": "role:admin or is_admin:True",
+ "admin_or_owner": "rule:admin or tenant:%(tenant)s",
+ "default": "rule:admin_or_owner",
+
+ "instance:create": "rule:admin_or_owner",
+ "instance:delete": "rule:admin_or_owner",
+ "instance:force_delete": "rule:admin_or_owner",
+ "instance:index": "rule:admin_or_owner",
+ "instance:show": "rule:admin_or_owner",
+ "instance:update": "rule:admin_or_owner",
+ "instance:edit": "rule:admin_or_owner",
+ "instance:restart": "rule:admin_or_owner",
+ "instance:resize_volume": "rule:admin_or_owner",
+ "instance:resize_flavor": "rule:admin_or_owner",
+ "instance:reset_status": "rule:admin",
+ "instance:promote_to_replica_source": "rule:admin_or_owner",
+ "instance:eject_replica_source": "rule:admin_or_owner",
+ "instance:configuration": "rule:admin_or_owner",
+ "instance:guest_log_list": "rule:admin_or_owner",
+ "instance:backups": "rule:admin_or_owner",
+ "instance:module_list": "rule:admin_or_owner",
+ "instance:module_apply": "rule:admin_or_owner",
+ "instance:module_remove": "rule:admin_or_owner",
+
+ "instance:extension:root:create": "rule:admin_or_owner",
+ "instance:extension:root:delete": "rule:admin_or_owner",
+ "instance:extension:root:index": "rule:admin_or_owner",
+
+ "instance:extension:user:create": "rule:admin_or_owner",
+ "instance:extension:user:delete": "rule:admin_or_owner",
+ "instance:extension:user:index": "rule:admin_or_owner",
+ "instance:extension:user:show": "rule:admin_or_owner",
+ "instance:extension:user:update": "rule:admin_or_owner",
+ "instance:extension:user:update_all": "rule:admin_or_owner",
+
+ "instance:extension:user_access:update": "rule:admin_or_owner",
+ "instance:extension:user_access:delete": "rule:admin_or_owner",
+ "instance:extension:user_access:index": "rule:admin_or_owner",
+
+ "instance:extension:database:create": "rule:admin_or_owner",
+ "instance:extension:database:delete": "rule:admin_or_owner",
+ "instance:extension:database:index": "rule:admin_or_owner",
+ "instance:extension:database:show": "rule:admin_or_owner",
+
+ "cluster:create": "rule:admin_or_owner",
+ "cluster:delete": "rule:admin_or_owner",
+ "cluster:force_delete": "rule:admin_or_owner",
+ "cluster:index": "rule:admin_or_owner",
+ "cluster:show": "rule:admin_or_owner",
+ "cluster:show_instance": "rule:admin_or_owner",
+ "cluster:action": "rule:admin_or_owner",
+ "cluster:reset-status": "rule:admin",
+
+ "cluster:extension:root:create": "rule:admin_or_owner",
+ "cluster:extension:root:delete": "rule:admin_or_owner",
+ "cluster:extension:root:index": "rule:admin_or_owner",
+
+ "backup:create": "rule:admin_or_owner",
+ "backup:delete": "rule:admin_or_owner",
+ "backup:index": "rule:admin_or_owner",
+ "backup:show": "rule:admin_or_owner",
+
+ "configuration:create": "rule:admin_or_owner",
+ "configuration:delete": "rule:admin_or_owner",
+ "configuration:index": "rule:admin_or_owner",
+ "configuration:show": "rule:admin_or_owner",
+ "configuration:instances": "rule:admin_or_owner",
+ "configuration:update": "rule:admin_or_owner",
+ "configuration:edit": "rule:admin_or_owner",
+
+ "configuration-parameter:index": "rule:admin_or_owner",
+ "configuration-parameter:show": "rule:admin_or_owner",
+ "configuration-parameter:index_by_version": "rule:admin_or_owner",
+ "configuration-parameter:show_by_version": "rule:admin_or_owner",
+
+ "datastore:index": "",
+ "datastore:show": "",
+ "datastore:version_show": "",
+ "datastore:version_show_by_uuid": "",
+ "datastore:version_index": "",
+ "datastore:list_associated_flavors": "",
+ "datastore:list_associated_volume_types": "",
+
+ "flavor:index": "",
+ "flavor:show": "",
+
+ "limits:index": "rule:admin_or_owner",
+
+ "module:create": "rule:admin_or_owner",
+ "module:delete": "rule:admin_or_owner",
+ "module:index": "rule:admin_or_owner",
+ "module:show": "rule:admin_or_owner",
+ "module:instances": "rule:admin_or_owner",
+ "module:update": "rule:admin_or_owner"
+}
diff --git a/etc/trove/trove.conf.sample b/etc/trove/trove.conf.sample
index 6e4098bf..a64f7522 100644
--- a/etc/trove/trove.conf.sample
+++ b/etc/trove/trove.conf.sample
@@ -309,4 +309,14 @@ api_strategy = trove.common.strategies.cluster.experimental.vertica.api.VerticaA
# Indicate which header field names may be used during the actual request.
# (list value)
-#allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID \ No newline at end of file
+#allow_headers = X-Auth-Token, X-Identity-Status, X-Roles, X-Service-Catalog, X-User-Id, X-Tenant-Id, X-OpenStack-Request-ID
+
+[oslo_middleware]
+
+#
+# From oslo.middleware
+#
+
+# Whether the application is behind a proxy or not. This determines if the
+# middleware should parse the headers or not. (boolean value)
+#enable_proxy_headers_parsing = false
diff --git a/install-guide/source/conf.py b/install-guide/source/conf.py
index 2589cd3a..52ebe792 100644
--- a/install-guide/source/conf.py
+++ b/install-guide/source/conf.py
@@ -23,8 +23,8 @@
import os
# import sys
-
import openstackdocstheme
+from trove.version import version_info as trove_version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -63,10 +63,8 @@ copyright = u'2016, OpenStack contributors'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
-# The short X.Y version.
-version = '0.1'
-# The full version, including alpha/beta/rc tags.
-release = '0.1'
+version = trove_version.canonical_version_string()
+release = trove_version.version_string_with_vcs()
# A few variables have to be set for the log-a-bug feature.
# giturl: The location of conf.py on Git. Must be set manually.
diff --git a/integration/README.md b/integration/README.md
index dde548ba..593210cb 100644
--- a/integration/README.md
+++ b/integration/README.md
@@ -38,6 +38,15 @@ Install a fresh Ubuntu 14.04 (Trusty Tahr) image ( _We suggest creating a develo
$ mkdir -p /opt/stack
$ cd /opt/stack
+ *Note that it is important that you clone the repository
+ here. This is a change from the earlier trove-integration where
+ you could clone trove-integration anywhere you wanted (like HOME)
+ and trove would get cloned for you in the right place. Since
+ trovestack is now in the trove repository, if you wish to test
+ changes that you have made to trove, it is advisable for you to
+ have your trove repository in /opt/stack to avoid another trove
+ repository being cloned for you.
+
#### Clone this repo:
$ git clone https://github.com/openstack/trove.git
@@ -46,7 +55,7 @@ Install a fresh Ubuntu 14.04 (Trusty Tahr) image ( _We suggest creating a develo
$ cd trove/integration/scripts/
-#### Running trovestack is the core script:
+#### Running trovestack:
*Run this to get the command list with a short description of each*
$ ./trovestack
diff --git a/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra b/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra
index 2bd40181..54e34d68 100755
--- a/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra
+++ b/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra
@@ -4,21 +4,21 @@ set -ex
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get install -qy curl
+apt-get --allow-unauthenticated install -qy curl
echo "deb http://debian.datastax.com/community stable main" >> /etc/apt/sources.list.d/cassandra.sources.list
curl -L http://debian.datastax.com/debian/repo_key | apt-key add -
apt-get update
-apt-get install -qy openjdk-7-jdk expect python-dev
-apt-get install -qy libxml2-dev ntp mc
-apt-get install -qy libxslt1-dev python-pexpect
-apt-get install -qy python-migrate build-essential
+apt-get --allow-unauthenticated install -qy openjdk-7-jdk expect python-dev
+apt-get --allow-unauthenticated install -qy libxml2-dev ntp mc
+apt-get --allow-unauthenticated install -qy libxslt1-dev python-pexpect
+apt-get --allow-unauthenticated install -qy python-migrate build-essential
-apt-get install dsc21=2.1.* cassandra=2.1.* -qy
+apt-get --allow-unauthenticated install dsc21=2.1.* cassandra=2.1.* -qy
# The Python Driver 2.0 for Apache Cassandra.
-pip install cassandra-driver
+pip2 install cassandra-driver
# Sorted sets support for the Python driver.
-pip install blist
+pip2 install blist
service cassandra stop
rm -rf /var/lib/cassandra/data/system/*
diff --git a/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase b/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase
index 0dc67d8b..1303fdfd 100755
--- a/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase
+++ b/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase
@@ -2,7 +2,7 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get install -qy curl
-apt-get install -qy libssl0.9.8
+apt-get --allow-unauthenticated install -qy curl
+apt-get --allow-unauthenticated install -qy libssl0.9.8
curl -O http://packages.couchbase.com/releases/2.2.0/couchbase-server-community_2.2.0_x86_64.deb
INSTALL_DONT_START_SERVER=1 dpkg -i couchbase-server-community_2.2.0_x86_64.deb
diff --git a/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb b/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb
index 77871d3f..b53f7faa 100755
--- a/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb
+++ b/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb
@@ -6,14 +6,14 @@ set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# install the ppa-finding tool for ubuntu 12.0.4 release
-apt-get install -y python-software-properties
+apt-get --allow-unauthenticated install -y python-software-properties
add-apt-repository -y ppa:couchdb/stable
# update cached list of packages
apt-get update -y
# remove any existing couchdb binaries
apt-get remove -yf couchdb couchdb-bin couchdb-common
# install couchdb
-apt-get install -yV couchdb
+apt-get --allow-unauthenticated install -yV couchdb
# install curl to provide a way to interact with CouchDB
# over HTTP REST API
-apt-get install -qy curl
+apt-get --allow-unauthenticated install -qy curl
diff --git a/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2 b/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2
index 63bc3bf7..4495e95a 100755
--- a/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2
+++ b/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2
@@ -19,8 +19,8 @@ echo "127.0.0.1 ${host_name}" >> /etc/hosts
tar -xvzf /tmp/in_target.d/db2.tar.gz
# installing dependencies
-apt-get install libaio1
-apt-get install libstdc++6
+apt-get --allow-unauthenticated install libaio1
+apt-get --allow-unauthenticated install libstdc++6
# start the installation process. Accepts the default installation directory '/opt/ibm/db2/V10.5'
${DB2_PKG_LOCATION}/expc/db2_install -b /opt/ibm/db2/V10.5 -f sysreq -l ${DB2_PKG_LOCATION}/db2_install.log
diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps b/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps
index 5cd392b2..3a8cacfb 100755
--- a/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps
+++ b/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps
@@ -7,4 +7,4 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install ntp apparmor-utils
+apt-get --allow-unauthenticated -y install ntp apparmor-utils
diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep b/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep
index 28d9172e..c79ad877 100755
--- a/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep
+++ b/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep
@@ -7,7 +7,7 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install python-dev libxml2-dev libxslt1-dev python-setuptools \
+apt-get --allow-unauthenticated -y install python-dev libxml2-dev libxslt1-dev python-setuptools \
python-pip python-sqlalchemy python-lxml \
python-routes python-eventlet python-webob \
python-pastedeploy python-paste python-netaddr \
@@ -24,8 +24,8 @@ if [ -f ${TMP_HOOKS_DIR}/upper-constraints.txt ]; then
UPPER_CONSTRAINTS=" -c ${TMP_HOOKS_DIR}/upper-constraints.txt"
fi
-pip install -q --upgrade -r ${TMP_HOOKS_DIR}/requirements.txt ${UPPER_CONSTRAINTS}
+pip2 install -q --upgrade -r ${TMP_HOOKS_DIR}/requirements.txt ${UPPER_CONSTRAINTS}
echo "diagnostic pip freeze output follows"
-pip freeze
+pip2 freeze
echo "diagnostic pip freeze output above"
diff --git a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools b/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools
index 1a8647f5..8360ddfc 100755
--- a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools
+++ b/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools
@@ -4,4 +4,4 @@
set -e
set -o xtrace
-apt-get install -y language-pack-en python-software-properties \ No newline at end of file
+apt-get --allow-unauthenticated install -y language-pack-en python-software-properties \ No newline at end of file
diff --git a/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb b/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb
index 6a95d2f0..4ab5c349 100755
--- a/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb
+++ b/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb
@@ -5,4 +5,4 @@ set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install mongodb-org=3.2.6
+apt-get --allow-unauthenticated -y install mongodb-org=3.2.6
diff --git a/integration/scripts/files/elements/ubuntu-mongodb/install.d/25-trove-mongo-dep b/integration/scripts/files/elements/ubuntu-mongodb/install.d/25-trove-mongo-dep
index 7be7ef95..a8b3ddc0 100755
--- a/integration/scripts/files/elements/ubuntu-mongodb/install.d/25-trove-mongo-dep
+++ b/integration/scripts/files/elements/ubuntu-mongodb/install.d/25-trove-mongo-dep
@@ -6,4 +6,4 @@
set -e
set -o xtrace
-pip install pymongo>=3.0.2,!=3.1
+pip2 install pymongo>=3.0.2,!=3.1
diff --git a/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key b/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
index 1538d61c..1345f508 100755
--- a/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
+++ b/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
@@ -5,7 +5,7 @@ set -o xtrace
[ -n "${RELEASE}" ] || die "RELEASE must be set to either Precise or Quantal"
-apt-get -y install software-properties-common
+apt-get --allow-unauthenticated -y install software-properties-common
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
diff --git a/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/10-percona-apt-key b/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/10-percona-apt-key
index cd289250..d62c6058 100755
--- a/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/10-percona-apt-key
+++ b/integration/scripts/files/elements/ubuntu-mysql/pre-install.d/10-percona-apt-key
@@ -7,7 +7,7 @@ set -e
set -o xtrace
[ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image"
-[ -n "${RELEASE}" ] || die "RELEASE must be set to either Precise or Quantal"
+[ -n "${RELEASE}" ] || die "RELEASE must be set to a valid Ubuntu release (e.g. trusty)"
# Add Percona GPG key
mkdir -p /home/${GUEST_USERNAME}/.gnupg
diff --git a/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql
index 5d5b4265..d5a8ac5b 100755
--- a/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql
@@ -14,4 +14,4 @@ export DEBIAN_FRONTEND=noninteractive
if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then
PXB_VERSION_OVERRIDE="-22"
fi
-apt-get -y install percona-toolkit percona-server-common-5.6 percona-server-server-5.6 percona-server-test-5.6 percona-server-client-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
+apt-get --allow-unauthenticated -y install percona-toolkit percona-server-common-5.6 percona-server-server-5.6 percona-server-test-5.6 percona-server-client-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
diff --git a/integration/scripts/files/elements/ubuntu-postgresql/install.d/10-postgresql b/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql
index cd853256..d5b5f505 100755
--- a/integration/scripts/files/elements/ubuntu-postgresql/install.d/10-postgresql
+++ b/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql
@@ -31,7 +31,7 @@ exit \$?
_EOF_
-apt-get -y install postgresql-9.4 postgresql-contrib-9.4 postgresql-server-dev-9.4
+apt-get --allow-unauthenticated -y install postgresql-9.4 postgresql-contrib-9.4 postgresql-server-dev-9.4
###########################################
# Hack alert:
@@ -50,19 +50,19 @@ git clone https://github.com/vmware/pg_rewind.git --branch REL9_4_STABLE
dev_pkgs="libreadline-dev libkrb5-dev libssl-dev libpam-dev libxml2-dev libxslt-dev libedit-dev libselinux1-dev bison flex"
-apt-get install $dev_pkgs -y
+apt-get --allow-unauthenticated install $dev_pkgs -y
# Unfortunately, on ubuntu, was not able to get pg_rewind to build
# outside of the pgsql source tree. Configure and compile postgres
# but only call make install against the contrib/pg_rewind directory
# so that support library is accessible to the server
cd $tmpdir/postgres
-./configure
+./configure
make
cd contrib/pg_rewind
make install
-# Make the pg_rewind binary and the library used by the
+# Make the pg_rewind binary and the library used by the
# pg_rewind stored procedures accessible
ln -s /usr/local/pgsql/bin/pg_rewind /usr/bin/pg_rewind
ln -s /usr/local/pgsql/lib/pg_rewind_support.so /usr/lib/postgresql/9.4/lib/pg_rewind_support.so
@@ -75,5 +75,5 @@ apt-get remove -y $dev_pkgs
################################
# Install the native Python client.
-apt-get -y install libpq-dev
-pip install psycopg2
+apt-get --allow-unauthenticated -y install libpq-dev
+pip2 install psycopg2
diff --git a/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo b/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo
index 48a25d1d..132bc4f3 100755
--- a/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo
+++ b/integration/scripts/files/elements/ubuntu-postgresql/pre-install.d/10-postgresql-repo
@@ -3,8 +3,10 @@
set -e
set -o xtrace
+[ -n "${RELEASE}" ] || die "RELEASE must be set to a valid Ubuntu release (e.g. trusty)"
+
cat <<EOL > /etc/apt/sources.list.d/postgresql.list
-deb http://apt.postgresql.org/pub/repos/apt/ trusty-pgdg main
+deb http://apt.postgresql.org/pub/repos/apt/ ${RELEASE}-pgdg main
EOL
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add -
diff --git a/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql
index ae658957..d9f2f427 100755
--- a/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql
@@ -7,7 +7,7 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install percona-xtradb-cluster-server-5.6 percona-xtradb-cluster-client-5.6 percona-xtrabackup
+apt-get --allow-unauthenticated -y install percona-xtradb-cluster-server-5.6 percona-xtradb-cluster-client-5.6 percona-xtrabackup
# Don't auto start mysql (we'll start it up in guest)
update-rc.d mysql defaults
diff --git a/integration/scripts/files/elements/ubuntu-redis/install.d/10-redis b/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis
index 57fcdf90..84b091f1 100755
--- a/integration/scripts/files/elements/ubuntu-redis/install.d/10-redis
+++ b/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis
@@ -34,7 +34,7 @@ _EOF_
add-apt-repository -y ppa:chris-lea/redis-server
apt-get -y update
-apt-get install -y redis-server
+apt-get --allow-unauthenticated install -y redis-server
cat > "/etc/default/redis-server" << _EOF_
# Call ulimit -n with this argument prior to invoking Redis itself.
@@ -45,9 +45,9 @@ ULIMIT=65536
_EOF_
# Install Python driver for Redis ('redis-py').
-pip install redis
+pip2 install redis
# By default, redis-py will attempt to use the HiredisParser if installed.
# Using Hiredis can provide up to a 10x speed improvement in parsing responses
# from the Redis server.
-pip install hiredis
+pip2 install hiredis
diff --git a/integration/scripts/files/elements/ubuntu-trusty-cassandra/element-deps b/integration/scripts/files/elements/ubuntu-trusty-cassandra/element-deps
new file mode 100644
index 00000000..28898cf7
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-cassandra/element-deps
@@ -0,0 +1 @@
+ubuntu-cassandra
diff --git a/integration/scripts/files/elements/ubuntu-trusty-couchbase/element-deps b/integration/scripts/files/elements/ubuntu-trusty-couchbase/element-deps
new file mode 100644
index 00000000..fa85fc7e
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-couchbase/element-deps
@@ -0,0 +1 @@
+ubuntu-couchbase
diff --git a/integration/scripts/files/elements/ubuntu-trusty-couchdb/element-deps b/integration/scripts/files/elements/ubuntu-trusty-couchdb/element-deps
new file mode 100644
index 00000000..abd5561b
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-couchdb/element-deps
@@ -0,0 +1 @@
+ubuntu-couchdb
diff --git a/integration/scripts/files/elements/ubuntu-trusty-db2/element-deps b/integration/scripts/files/elements/ubuntu-trusty-db2/element-deps
new file mode 100644
index 00000000..c5a439cd
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-db2/element-deps
@@ -0,0 +1 @@
+ubuntu-db2
diff --git a/integration/scripts/files/elements/ubuntu-trusty-guest/element-deps b/integration/scripts/files/elements/ubuntu-trusty-guest/element-deps
new file mode 100644
index 00000000..eaa808e1
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-guest/element-deps
@@ -0,0 +1 @@
+ubuntu-guest
diff --git a/integration/scripts/files/elements/ubuntu-guest/extra-data.d/20-guest-upstart b/integration/scripts/files/elements/ubuntu-trusty-guest/extra-data.d/20-guest-upstart
index 5d06bb0b..5d06bb0b 100755
--- a/integration/scripts/files/elements/ubuntu-guest/extra-data.d/20-guest-upstart
+++ b/integration/scripts/files/elements/ubuntu-trusty-guest/extra-data.d/20-guest-upstart
diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/20-etc b/integration/scripts/files/elements/ubuntu-trusty-guest/install.d/20-etc
index 8ac19f7c..8ac19f7c 100755
--- a/integration/scripts/files/elements/ubuntu-guest/install.d/20-etc
+++ b/integration/scripts/files/elements/ubuntu-trusty-guest/install.d/20-etc
diff --git a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/01-trim-pkgs b/integration/scripts/files/elements/ubuntu-trusty-guest/pre-install.d/01-trim-pkgs
index 8787df7b..8787df7b 100755
--- a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/01-trim-pkgs
+++ b/integration/scripts/files/elements/ubuntu-trusty-guest/pre-install.d/01-trim-pkgs
diff --git a/integration/scripts/files/elements/ubuntu-trusty-mariadb/element-deps b/integration/scripts/files/elements/ubuntu-trusty-mariadb/element-deps
new file mode 100644
index 00000000..5d7756f9
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-mariadb/element-deps
@@ -0,0 +1 @@
+ubuntu-mariadb
diff --git a/integration/scripts/files/elements/ubuntu-mariadb/install.d/30-mariadb b/integration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb
index 065c2f98..eba83c70 100755
--- a/integration/scripts/files/elements/ubuntu-mariadb/install.d/30-mariadb
+++ b/integration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb
@@ -10,7 +10,7 @@ export DEBIAN_FRONTEND=noninteractive
# NOTE(vkmc): Using MariaDB repositories is required
# https://mariadb.com/kb/en/mariadb/installing-mariadb-deb-files/
-apt-get -y install software-properties-common
+apt-get --allow-unauthenticated -y install software-properties-common
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xcbcb082a1bb943db
add-apt-repository 'deb http://ftp.osuosl.org/pub/mariadb/repo/10.1/ubuntu trusty main'
@@ -25,8 +25,8 @@ apt-get -y update
if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then
PXB_VERSION_OVERRIDE="-22"
fi
-apt-get -y install socat percona-xtrabackup${PXB_VERSION_OVERRIDE}
-apt-get -y install libmariadbclient18 mariadb-server
+apt-get --allow-unauthenticated -y install socat percona-xtrabackup${PXB_VERSION_OVERRIDE}
+apt-get --allow-unauthenticated -y install libmariadbclient18 mariadb-server
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
[mysqld]
diff --git a/integration/scripts/files/elements/ubuntu-trusty-mongodb/element-deps b/integration/scripts/files/elements/ubuntu-trusty-mongodb/element-deps
new file mode 100644
index 00000000..6a5964ec
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-mongodb/element-deps
@@ -0,0 +1 @@
+ubuntu-mongodb
diff --git a/integration/scripts/files/elements/ubuntu-trusty-mysql/element-deps b/integration/scripts/files/elements/ubuntu-trusty-mysql/element-deps
new file mode 100644
index 00000000..bd3447a6
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-mysql/element-deps
@@ -0,0 +1 @@
+ubuntu-mysql
diff --git a/integration/scripts/files/elements/ubuntu-mysql/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql
index d31292ec..887bda2f 100755
--- a/integration/scripts/files/elements/ubuntu-mysql/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql
@@ -15,7 +15,7 @@ export DEBIAN_FRONTEND=noninteractive
if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then
PXB_VERSION_OVERRIDE="-22"
fi
-apt-get -y install libmysqlclient18 mysql-server-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
+apt-get --allow-unauthenticated -y install libmysqlclient18 mysql-server-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
[mysqld]
diff --git a/integration/scripts/files/elements/ubuntu-trusty-percona/element-deps b/integration/scripts/files/elements/ubuntu-trusty-percona/element-deps
new file mode 100644
index 00000000..bc5f9af6
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-percona/element-deps
@@ -0,0 +1 @@
+ubuntu-percona
diff --git a/integration/scripts/files/elements/ubuntu-trusty-postgresql/element-deps b/integration/scripts/files/elements/ubuntu-trusty-postgresql/element-deps
new file mode 100644
index 00000000..98e1bc19
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-postgresql/element-deps
@@ -0,0 +1 @@
+ubuntu-postgresql
diff --git a/integration/scripts/files/elements/ubuntu-trusty-pxc/element-deps b/integration/scripts/files/elements/ubuntu-trusty-pxc/element-deps
new file mode 100644
index 00000000..7b1a84c9
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-pxc/element-deps
@@ -0,0 +1 @@
+ubuntu-pxc
diff --git a/integration/scripts/files/elements/ubuntu-trusty-redis/element-deps b/integration/scripts/files/elements/ubuntu-trusty-redis/element-deps
new file mode 100644
index 00000000..030d85ba
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-redis/element-deps
@@ -0,0 +1 @@
+ubuntu-redis
diff --git a/integration/scripts/files/elements/ubuntu-trusty-vertica/element-deps b/integration/scripts/files/elements/ubuntu-trusty-vertica/element-deps
new file mode 100644
index 00000000..575094f0
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-trusty-vertica/element-deps
@@ -0,0 +1 @@
+ubuntu-vertica
diff --git a/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica b/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica
index 2d50bc6a..bd2b72ac 100755
--- a/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica
+++ b/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica
@@ -13,11 +13,11 @@ export DEBIAN_FRONTEND=noninteractive
dd if=/tmp/in_target.d/vertica.deb of=/vertica.deb
# Install base packages
-apt-get install -qy build-essential bc iptables
-apt-get install -qy curl sysstat pstack mcelog
-apt-get install -qy python-dev g++ unixODBC unixODBC-dev dialog
-apt-get install -qy dialog libbz2-dev libboost-all-dev libcurl4-gnutls-dev
-apt-get install -qy openjdk-7-jdk
+apt-get --allow-unauthenticated install -qy build-essential bc iptables
+apt-get --allow-unauthenticated install -qy curl sysstat pstack mcelog
+apt-get --allow-unauthenticated install -qy python-dev g++ unixODBC unixODBC-dev dialog
+apt-get --allow-unauthenticated install -qy dialog libbz2-dev libboost-all-dev libcurl4-gnutls-dev
+apt-get --allow-unauthenticated install -qy openjdk-7-jdk
# Install Vertica package
dpkg -i /vertica.deb
diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/element-deps b/integration/scripts/files/elements/ubuntu-xenial-guest/element-deps
new file mode 100644
index 00000000..eaa808e1
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-guest/element-deps
@@ -0,0 +1 @@
+ubuntu-guest
diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd b/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd
new file mode 100755
index 00000000..d541078c
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-guest/extra-data.d/20-guest-systemd
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+set -e
+set -o xtrace
+
+# CONTEXT: HOST prior to IMAGE BUILD as SCRIPT USER
+# PURPOSE: stages the bootstrap file and upstart conf file while replacing variables so that guest image is properly
+# configured
+
+source $_LIB/die
+
+[ -n "$TMP_HOOKS_PATH" ] || die "Temp hook path not set"
+
+[ -n "${GUEST_USERNAME}" ] || die "GUEST_USERNAME needs to be set to the user for the guest image"
+[ -n "${HOST_SCP_USERNAME}" ] || die "HOST_SCP_USERNAME needs to be set to the user for the host instance"
+[ -n "${CONTROLLER_IP}" ] || die "CONTROLLER_IP needs to be set to the ip address that guests will use to contact the controller"
+[ -n "${ESCAPED_PATH_TROVE}" ] || die "ESCAPED_PATH_TROVE needs to be set to the path to the trove directory on the trovestack host"
+[ -n "${TROVESTACK_SCRIPTS}" ] || die "TROVESTACK_SCRIPTS needs to be set to the trove/integration/scripts dir"
+[ -n "${ESCAPED_GUEST_LOGDIR}" ] || die "ESCAPED_GUEST_LOGDIR must be set to the escaped guest log dir"
+
+sed "s/GUEST_USERNAME/${GUEST_USERNAME}/g;s/GUEST_LOGDIR/${ESCAPED_GUEST_LOGDIR}/g;s/HOST_SCP_USERNAME/${HOST_SCP_USERNAME}/g;s/CONTROLLER_IP/${CONTROLLER_IP}/g;s/PATH_TROVE/${ESCAPED_PATH_TROVE}/g" ${TROVESTACK_SCRIPTS}/files/trove-guest.systemd.conf > ${TMP_HOOKS_PATH}/trove-guest.service
+
diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc
new file mode 100755
index 00000000..1a350153
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# CONTEXT: GUEST during CONSTRUCTION as ROOT
+# PURPOSE: take "staged" trove-guest.conf file and put it in the init directory on guest image
+
+dd if=/tmp/in_target.d/trove-guest.service of=/etc/systemd/system/trove-guest.service
+
+systemctl enable trove-guest.service
+
+
diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs b/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs
new file mode 100755
index 00000000..125f6c78
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-guest/pre-install.d/01-trim-pkgs
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+# CONTEXT: GUEST during CONSTRUCTION as ROOT
+# PURPOSE: Install basic services and applications
+
+set -e
+set -o xtrace
+
+export DEBIAN_FRONTEND=noninteractive
+apt-get -y purge acpid\
+ apport\
+ apport-symptoms\
+ apt-transport-https\
+ aptitude\
+ at\
+ bash-completion\
+ bc\
+ bind9-host\
+ bsdmainutils\
+ busybox-static\
+ byobu\
+ command-not-found\
+ command-not-found-data\
+ curl\
+ dbus\
+ dmidecode\
+ dosfstools\
+ ed\
+ fonts-ubuntu-font-family-console\
+ friendly-recovery\
+ ftp\
+ fuse\
+ geoip-database\
+ groff-base\
+ hdparm\
+ info\
+ install-info\
+ iptables\
+ iputils-tracepath\
+ irqbalance\
+ language-selector-common\
+ libaccountsservice0\
+ libevent-2.0-5\
+ libgeoip1\
+ libnfnetlink0\
+ libpcap0.8\
+ libpci3\
+ libpipeline1\
+ libpolkit-gobject-1-0\
+ libsasl2-modules\
+ libusb-1.0-0\
+ lshw\
+ lsof\
+ ltrace\
+ man-db\
+ mlocate\
+ mtr-tiny\
+ nano\
+ ntfs-3g\
+ parted\
+ patch\
+ plymouth-theme-ubuntu-text\
+ popularity-contest\
+ powermgmt-base\
+ ppp\
+ screen\
+ shared-mime-info\
+ strace\
+ tcpdump\
+ telnet\
+ time\
+ tmux\
+ ubuntu-standard\
+ ufw\
+ update-manager-core\
+ update-notifier-common\
+ usbutils\
+ uuid-runtime\
+
+# The following packages cannot be removed as they cause cloud-init to be
+# uninstalled in Ubuntu 14.04
+# gir1.2-glib-2.0
+# libdbus-glib-1-2
+# libgirepository-1.0-1
+# python-chardet
+# python-serial
+# xz-utils
+
+apt-get -y autoremove
+
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps b/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps
new file mode 100644
index 00000000..5d7756f9
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps
@@ -0,0 +1 @@
+ubuntu-mariadb
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb b/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb
new file mode 100755
index 00000000..521f3e68
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+# CONTEXT: GUEST during CONSTRUCTION as ROOT
+# PURPOSE: Install controller base required packages
+
+set -e
+set -o xtrace
+
+export DEBIAN_FRONTEND=noninteractive
+
+# NOTE(vkmc): Using MariaDB repositories is required
+# https://mariadb.com/kb/en/mariadb/installing-mariadb-deb-files/
+apt-get -y install software-properties-common
+apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
+add-apt-repository 'deb http://ftp.osuosl.org/pub/mariadb/repo/10.1/ubuntu xenial main'
+
+# Pin MariaDB repository
+sudo echo -e "Package: *\nPin: origin ftp.osuosl.org\nPin-Priority: 1000" > /etc/apt/preferences.d/mariadb.pref
+
+apt-get -y update
+apt-get -y install socat percona-xtrabackup
+apt-get -y install libmariadbclient18 mariadb-server
+
+cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
+[mysqld]
+performance_schema = off
+_EOF_
+
+mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf
+chown mysql:mysql /etc/mysql/my.cnf
+cat >/etc/mysql/my.cnf <<_EOF_
+[mysql]
+
+!includedir /etc/mysql/conf.d/
+_EOF_
+
+rm /etc/init.d/mysql
+systemctl daemon-reload
+systemctl enable mariadb
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps b/integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps
new file mode 100644
index 00000000..bd3447a6
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-mysql/element-deps
@@ -0,0 +1 @@
+ubuntu-mysql
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
new file mode 100755
index 00000000..75ccdc66
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# CONTEXT: GUEST during CONSTRUCTION as ROOT
+# PURPOSE: Install controller base required packages
+
+set -e
+set -o xtrace
+
+export DEBIAN_FRONTEND=noninteractive
+
+add-apt-repository 'deb http://archive.ubuntu.com/ubuntu trusty universe'
+apt-get -y update
+
+apt-get --allow-unauthenticated -y install mysql-client-5.6 mysql-server-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
+
+cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
+[mysqld]
+performance_schema = off
+_EOF_
+
+mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf
+chown mysql:mysql /etc/mysql/my.cnf
+cat >/etc/mysql/my.cnf <<_EOF_
+[mysql]
+
+!includedir /etc/mysql/conf.d/
+_EOF_
+
+rm /etc/init/mysql.conf
+systemctl daemon-reload
+systemctl enable mysql
+
diff --git a/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps b/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps
new file mode 100644
index 00000000..98e1bc19
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps
@@ -0,0 +1 @@
+ubuntu-postgresql
diff --git a/integration/scripts/files/elements/ubuntu-xenial-postgresql/install.d/31-fix-init-script b/integration/scripts/files/elements/ubuntu-xenial-postgresql/install.d/31-fix-init-script
new file mode 100755
index 00000000..c454d83d
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-postgresql/install.d/31-fix-init-script
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+set -e
+set -o xtrace
+
+sed -i "s/PIDFile=\/var\/run\/postgresql\/%i.pid/PIDFile=\/var\/run\/postgresql\/postgresql.pid/" /lib/systemd/system/postgresql@.service
+
diff --git a/integration/scripts/files/elements/ubuntu-xenial-pxc/element-deps b/integration/scripts/files/elements/ubuntu-xenial-pxc/element-deps
new file mode 100644
index 00000000..7b1a84c9
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-pxc/element-deps
@@ -0,0 +1 @@
+ubuntu-pxc
diff --git a/integration/scripts/files/elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf b/integration/scripts/files/elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf
new file mode 100755
index 00000000..d3347228
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+# CONTEXT: GUEST during CONSTRUCTION as ROOT
+# PURPOSE: Install controller base required packages
+
+set -e
+set -o xtrace
+
+mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf
+chown mysql:mysql /etc/mysql/my.cnf
+cat >/etc/mysql/my.cnf <<_EOF_
+[mysql]
+>
+!includedir /etc/mysql/conf.d/
+_EOF_
diff --git a/integration/scripts/files/elements/ubuntu-xenial-redis/element-deps b/integration/scripts/files/elements/ubuntu-xenial-redis/element-deps
new file mode 100644
index 00000000..030d85ba
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-redis/element-deps
@@ -0,0 +1 @@
+ubuntu-redis
diff --git a/integration/scripts/files/elements/ubuntu-xenial-redis/install.d/31-fix-init-file b/integration/scripts/files/elements/ubuntu-xenial-redis/install.d/31-fix-init-file
new file mode 100755
index 00000000..08442f60
--- /dev/null
+++ b/integration/scripts/files/elements/ubuntu-xenial-redis/install.d/31-fix-init-file
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+set -e
+set -o xtrace
+
+cat > /lib/systemd/system/redis-server.service << '_EOF_'
+[Unit]
+Description=Advanced key-value store
+After=network.target
+
+[Service]
+Type=forking
+PIDFile=/var/run/redis/redis-server.pid
+User=redis
+Group=redis
+
+Environment=statedir=/var/run/redis
+PermissionsStartOnly=true
+ExecStartPre=/bin/mkdir -p ${statedir}
+ExecStartPre=/bin/chown -R redis:redis ${statedir}
+ExecStart=/usr/bin/redis-server /etc/redis/redis.conf
+ExecReload=/bin/kill -USR2 $MAINPID
+ExecStop=/usr/bin/redis-cli shutdown
+Restart=always
+
+[Install]
+WantedBy=multi-user.target
+_EOF_
diff --git a/integration/scripts/functions b/integration/scripts/functions
index 1a09f552..a1f64c8b 100644
--- a/integration/scripts/functions
+++ b/integration/scripts/functions
@@ -193,30 +193,36 @@ GetOSVersion() {
function GetDistro() {
GetOSVersion
if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
- # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
- DISTRO=$os_CODENAME
+ DISTRO_NAME=ubuntu
+ DISTRO_RELEASE=$os_CODENAME
elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
- # For Fedora, just use 'f' and the release
- DISTRO="f$os_RELEASE"
+ DISTRO_NAME=fedora
+ DISTRO_RELEASE=$os_RELEASE
elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
- DISTRO="opensuse-$os_RELEASE"
+ DISTRO_NAME=opensuse
+ DISTRO_RELEASE=$os_RELEASE
elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
+ DISTRO_NAME=sle
# For SLE, also use the service pack
if [[ -z "$os_UPDATE" ]]; then
- DISTRO="sle${os_RELEASE}"
+ DISTRO_RELEASE=$os_RELEASE
else
- DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
+ DISTRO_RELEASE="${os_RELEASE}sp${os_UPDATE}"
fi
elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then
# Drop the . release as we assume it's compatible
- DISTRO="rhel${os_RELEASE::1}"
+ DISTRO_NAME=rhel
+ DISTRO_RELEASE=${os_RELEASE::1}
elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
- DISTRO="xs$os_RELEASE"
+ DISTRO_NAME=xs
+ DISTRO_RELEASE=$os_RELEASE
else
# Catch-all for now is Vendor + Release + Update
- DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
+ DISTRO_NAME=$os_VENDOR
+ DISTRO_RELEASE=$os_RELEASE.$os_UPDATE
fi
- export DISTRO
+ export DISTRO_NAME
+ export DISTRO_RELEASE
}
diff --git a/integration/scripts/functions_qemu b/integration/scripts/functions_qemu
index e797bcd8..ac91efc8 100644
--- a/integration/scripts/functions_qemu
+++ b/integration/scripts/functions_qemu
@@ -19,14 +19,8 @@ function build_vm() {
manage_ssh_keys
if [ $DISTRO == 'ubuntu' ]; then
- export RELEASE=trusty
export DIB_RELEASE=$RELEASE
export DIB_CLOUD_IMAGES=cloud-images.ubuntu.com
- # Use the apt sources.list on the build host, its almost always preferred
- if [ -f /etc/apt/sources.list ]; then
- export DIB_APT_SOURCES=/etc/apt/sources.list
- EXTRA_ELEMENTS="apt-sources apt-conf-dir"
- fi
fi
if [ $DISTRO == 'fedora' ]; then
EXTRA_ELEMENTS=selinux-permissive
@@ -54,7 +48,7 @@ function build_vm() {
local QEMU_IMG_OPTIONS=$(! $(qemu-img | grep -q 'version 1') && echo "--qemu-img-options compat=0.10")
${PATH_DISKIMAGEBUILDER}/bin/disk-image-create -a amd64 -o "${VM}" \
-x ${QEMU_IMG_OPTIONS} ${DISTRO} ${EXTRA_ELEMENTS} vm heat-cfntools \
- cloud-init-datasources ${DISTRO}-guest ${DISTRO}-${SERVICE_TYPE}
+ cloud-init-datasources ${DISTRO}-${RELEASE}-guest ${DISTRO}-${RELEASE}-${SERVICE_TYPE}
}
function build_guest_image() {
diff --git a/integration/scripts/localrc.rc b/integration/scripts/localrc.rc
index 0477dd49..d3914b4b 100644
--- a/integration/scripts/localrc.rc
+++ b/integration/scripts/localrc.rc
@@ -5,15 +5,12 @@ SERVICE_TOKEN=$SERVICE_TOKEN
ADMIN_PASSWORD=$ADMIN_PASSWORD
SERVICE_PASSWORD=$SERVICE_PASSWORD
-PUBLIC_INTERFACE=eth0
IP_VERSION=4
TROVE_LOGDIR=$TROVE_LOGDIR
TROVE_AUTH_CACHE_DIR=$TROVE_AUTH_CACHE_DIR
# Enable the Trove plugin for devstack
-if [[ $USE_DEVSTACK_TROVE_PLUGIN = true ]]; then
- enable_plugin trove $TROVE_REPO $TROVE_BRANCH
-fi
+enable_plugin trove $TROVE_REPO $TROVE_BRANCH
# Enable Trove, Swift, and Heat
ENABLED_SERVICES+=,trove,tr-api,tr-tmgr,tr-cond
@@ -24,6 +21,7 @@ if [[ $ENABLE_NEUTRON = true ]]; then
ENABLED_SERVICES+=,neutron,q-svc,q-agt,q-dhcp,q-l3,q-meta
disable_service n-net
else
+ PUBLIC_INTERFACE=eth0
enable_service n-net
disable_service neutron q-svc q-agt q-dhcp q-l3 q-meta
fi
diff --git a/integration/scripts/trovestack b/integration/scripts/trovestack
index f1ca8b74..4f4ebfb9 100755
--- a/integration/scripts/trovestack
+++ b/integration/scripts/trovestack
@@ -9,8 +9,9 @@
# #
###############################################################################
-PATH_TROVE=${PATH_TROVE:=$(readlink -f $(dirname $0)/../..)}
-TROVESTACK_SCRIPTS=${TROVESTACK_SCRIPTS:=$(readlink -f $(dirname $0))}
+SCRIPT_DIRNAME=$(dirname "$0")
+PATH_TROVE=${PATH_TROVE:=$(readlink -f "${SCRIPT_DIRNAME}"/../..)}
+TROVESTACK_SCRIPTS=${TROVESTACK_SCRIPTS:=$(readlink -f "${SCRIPT_DIRNAME}")}
TROVESTACK_TESTS=$TROVESTACK_SCRIPTS/../tests/
DEFAULT_LOCAL_CONF=local.conf.rc
DEFAULT_LOCALRC=localrc.rc
@@ -18,6 +19,18 @@ LOCAL_CONF=local.conf
LOCALRC=localrc
LOCALRC_AUTO=.localrc.auto
USER_LOCAL_CONF_NAME=.devstack.$LOCAL_CONF
+CLOUD_ADMIN_ARG="--os-cloud=devstack-admin"
+
+# Make sure we're not affected by the local environment
+# by unsetting all the 'OS_' variables
+while read -r ENV_VAR; do unset "${ENV_VAR}"; done < <(env|grep "OS_"|awk -F= '{print $1}')
+
+# Now grab the admin credentials from devstack if it's set up.
+# This is to facilitate setting the ADMIN_PASSWORD correctly
+# for gate runs.
+if [ -f $DEST/devstack/accrc/admin/admin ]; then
+ source $DEST/devstack/accrc/admin/admin
+fi
USERHOME=$HOME
# Load options not checked into VCS.
@@ -40,7 +53,7 @@ set -e
# Get default host ip from interface
function get_default_host_ip() {
- host_iface=$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)
+ host_iface=$(ip route | grep default | awk '{print $5}' | head -1)
echo `LC_ALL=C ip -f inet addr show ${host_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}' | head -1`
}
@@ -48,6 +61,12 @@ function get_default_host_ip() {
. $TROVESTACK_SCRIPTS/functions
. $TROVESTACK_SCRIPTS/functions_qemu
+# Pre-set DISTRO and RELEASE variables based on host OS
+# Can be overridden by env vars DISTRO and RELEASE
+GetDistro
+export DISTRO=${DISTRO:-$DISTRO_NAME}
+export RELEASE=${RELEASE:-$DISTRO_RELEASE}
+
# Load global configuration variables.
. $TROVESTACK_SCRIPTS/trovestack.rc
. $TROVESTACK_SCRIPTS/reviews.rc
@@ -56,15 +75,6 @@ function get_default_host_ip() {
[[ -f $PATH_DEVSTACK_SRC/functions-common ]] && source $PATH_DEVSTACK_SRC/functions-common
[[ -f $PATH_DEVSTACK_SRC/functions ]] && source $PATH_DEVSTACK_SRC/functions
-# Source the old-style localrc, or new-style .local.auto - only one should exist.
-# Note: The devstack localrc's have references to 'enable_plugin' which causes
-# errors when sourcing them in the stable/juno and stable/kilo branches.
-# These errors are safe to ignore when sourcing these files.
-set +e
-[[ -f $PATH_DEVSTACK_SRC/$LOCALRC ]] && source $PATH_DEVSTACK_SRC/$LOCALRC
-[[ -f $PATH_DEVSTACK_SRC/$LOCALRC_AUTO ]] && source $PATH_DEVSTACK_SRC/$LOCALRC_AUTO
-set -e
-
# Set up variables for the CONF files - this has to happen after loading trovestack.rc, since
# TROVE_CONF_DIR is defined there - these will be used by devstack too
export TROVE_CONF=$TROVE_CONF_DIR/trove.conf
@@ -85,7 +95,6 @@ KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-http}
-TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
# The following depends on whether neutron is used or nova-network
# neutron uses a bridge, nova-network does not
@@ -97,6 +106,8 @@ ESCAPED_PATH_TROVE=$(echo $PATH_TROVE | sed 's/\//\\\//g')
ESCAPED_TROVESTACK_SCRIPTS=$(echo $TROVESTACK_SCRIPTS | sed 's/\//\\\//g')
TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
TROVE_LOGDIR=${TROVE_LOGDIR:-$DEST/logs}
+TROVE_DEVSTACK_SETTINGS="$DEST/trove/devstack/settings"
+TROVE_DEVSTACK_PLUGIN="$DEST/trove/devstack/plugin.sh"
# DATASTORE_PKG_LOCATION defines the location from where the datastore packages
# can be accessed by the DIB elements. This is applicable only for datastores
# that do not have a public repository from where their packages can be accessed.
@@ -111,11 +122,6 @@ else
TROVE_BIN_DIR=$(get_python_exec_prefix)
fi
-# Allow sourcing config values from env.rc for overrides
-if [ -f /tmp/integration/env.rc ]; then
- source /tmp/integration/env.rc
-fi
-
# set up respective package managers
if is_fedora; then
PKG_INSTALL_OPTS=""
@@ -145,27 +151,6 @@ function exclaim () {
echo "*******************************************************************************"
}
-# Set the location of the Trove setup commands file for devstack - either in
-# the devstack repo, or the Trove one. Also sets the flag which activates the
-# devstack trove plugin, if required. We'll use the devstack version if it
-# exists, otherwise we assume the plugin method.
-function set_trove_plugin_vars () {
- FAIL_IF_MISSING=${1:-true}
-
- TROVE_SETUP_CMD_FILE="$PATH_DEVSTACK_SRC/lib/trove"
- if [ -f "$TROVE_SETUP_CMD_FILE" ]; then
- USE_DEVSTACK_TROVE_PLUGIN=false
- else
- TROVE_SETUP_CMD_FILE="$DEST/trove/devstack/plugin.sh"
- USE_DEVSTACK_TROVE_PLUGIN=true
- fi
- # Only complain if we have a devstack directory and are told to
- if [ "$FAIL_IF_MISSING" = "true" ] && [ -d "$PATH_DEVSTACK_SRC" ] && [ ! -f "$TROVE_SETUP_CMD_FILE" ]; then
- exclaim "${COLOR_RED}Trove setup file '${TROVE_SETUP_CMD_FILE}' not found!${COLOR_NONE}"
- exit 1
- fi
-}
-
function pkg_install () {
echo Installing $@...
sudo -E $PKG_INSTALL_OPTS $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS $PKG_INSTALL_ARG $@
@@ -176,11 +161,6 @@ function pkg_update () {
sudo -E $PKG_INSTALL_OPTS $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS $PKG_UPDATE_ARG $@
}
-function set_home_dir() {
- exclaim "set_home_dir has been nooped."
- exit 1
-}
-
function set_http_proxy() {
if [ ! "${http_proxy}" = '' ]; then
HTTP_PROXY="http_proxy=$http_proxy https_proxy=$https_proxy"
@@ -197,6 +177,30 @@ function ip_chunk() {
get_ip_for_device $1 | cut -d. -f$2
}
+function dump_env() {
+ # Print out the environment for debug purposes
+ if [[ -n ${TROVESTACK_DUMP_ENV} ]]; then
+ set +e
+ exclaim "Dumping configuration, starting with env vars:"
+ env | sort
+ CLOUDS_YAML=${CLOUDS_YAML:-/etc/openstack/clouds.yaml}
+ for filename in "${TEST_CONF}" "${CLOUDS_YAML}" "${TROVE_CONF}" "${PATH_DEVSTACK_SRC}/${LOCALRC}" "${PATH_DEVSTACK_SRC}/${LOCALRC_AUTO}"; do
+ if [[ -f ${filename} ]]; then
+ exclaim "Dumping contents of '${filename}':"
+ cat ${filename}
+ else
+ exclaim "File '${filename}' not found"
+ fi
+ done
+ exclaim "Dumping pip modules:"
+ pip freeze | sort
+ exclaim "Dumping domain list:"
+ openstack --os-cloud=devstack-admin domain list
+ exclaim "Dumping configuration completed"
+ set -e
+ fi
+}
+
# Add a flavor and a corresponding flavor.resize
# (flavor.resize adds 16 to the memory and one more vcpu)
function add_flavor() {
@@ -205,10 +209,10 @@ function add_flavor() {
local FLAVOR_MEMORY_MB=$3
local FLAVOR_ROOT_GB=$4
local FLAVOR_VCPUS=$5
+ local FLAVOR_SKIP_RESIZE=${6:-""}
- credentials="--os-username=admin --os-password=$ADMIN_PASSWORD --os-tenant-name=admin --os-auth-url=$TROVE_AUTH_ENDPOINT"
if [[ -z "$FLAVOR_LIST_FOR_ADD" ]]; then
- FLAVOR_LIST_FOR_ADD=$(nova $credentials flavor-list | cut -d'|' -f3 | sed -e's/ /,/g')
+ FLAVOR_LIST_FOR_ADD=$(openstack $CLOUD_ADMIN_ARG flavor list | cut -d'|' -f3 | sed -e's/ /,/g')
fi
base_id=${FLAVOR_ID}
@@ -226,30 +230,24 @@ function add_flavor() {
memory=${FLAVOR_MEMORY_MB}
vcpus=${FLAVOR_VCPUS}
if [[ $ephemeral != 0 ]]; then
- if [[ $BRANCH_OVERRIDE == "stable/juno" || $BRANCH_OVERRIDE == "stable/kilo" ]]; then
- id=1${id}
- else
- id=${id}e
- fi
+ id=${id}e
fi
if [[ $name == ${resize_name} ]]; then
- if [[ $BRANCH_OVERRIDE == "stable/juno" || $BRANCH_OVERRIDE == "stable/kilo" ]]; then
- id=2${id}
- else
- id=${id}r
- fi
+ id=${id}r
memory=$((${FLAVOR_MEMORY_MB} + 16))
vcpus=$((${FLAVOR_VCPUS} + 1))
fi
if [[ $FLAVOR_LIST_FOR_ADD != *",$name,"* ]]; then
- nova $credentials flavor-create $name $id $memory $FLAVOR_ROOT_GB $vcpus --ephemeral $ephemeral
+ if [[ -z ${FLAVOR_SKIP_RESIZE} || ${name} == ${reg_name} ]]; then
+ openstack $CLOUD_ADMIN_ARG flavor create $name --id $id --ram $memory --disk $FLAVOR_ROOT_GB --vcpus $vcpus --ephemeral $ephemeral
+ fi
fi
done
done
}
function get_attribute_id() {
- openstack --os-auth-url=$TROVE_AUTH_ENDPOINT --os-username=admin --os-password=$ADMIN_PASSWORD --os-project-name=admin $1 list | grep " $2" | get_field $3
+ openstack --os-cloud=devstack-admin $1 list | grep " $2" | get_field $3
}
@@ -266,7 +264,8 @@ function install_prep_packages() {
if is_fedora; then
pkg_install git gettext
else
- pkg_install git-core kvm-ipxe gettext
+ #pkg_install git-core kvm-ipxe gettext
+ pkg_install git-core gettext
fi
sudo -H $HTTP_PROXY pip install --upgrade pip dib-utils
}
@@ -415,7 +414,6 @@ function run_devstack() {
USER_OPTS_TAG_END="$MARKER_TOKEN End Of User Specified Options $MARKER_TOKEN"
ADD_OPTS_TAG="$MARKER_TOKEN Additional Options $MARKER_TOKEN"
ADD_OPTS_TAG_END="$MARKER_TOKEN End Of Additional Options $MARKER_TOKEN"
- set_trove_plugin_vars false
pushd "$PATH_DEVSTACK_SRC"
DEVSTACK_LOCAL_CONF=$LOCAL_CONF
@@ -544,8 +542,10 @@ function set_mysql_pkg() {
function cmd_set_datastore() {
- IMAGEID=$1
- DATASTORE_TYPE=$2
+ local IMAGEID=$1
+ local DATASTORE_TYPE=$2
+ local RESTART_TROVE=${3:-$(get_bool RESTART_TROVE "true")}
+
# rd_manage datastore_update <datastore_name> <default_version>
rd_manage datastore_update "$DATASTORE_TYPE" ""
PACKAGES=${PACKAGES:-""}
@@ -605,10 +605,14 @@ function cmd_set_datastore() {
rd_manage db_load_datastore_config_parameters "$DATASTORE_TYPE" "$VERSION" "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json
fi
- cmd_stop
+ if [[ "${RESTART_TROVE}" == true ]]; then
+ cmd_stop
+ fi
iniset $TROVE_CONF DEFAULT default_datastore "$DATASTORE_TYPE"
sleep 1.5
- cmd_start
+ if [[ "${RESTART_TROVE}" == true ]]; then
+ cmd_start
+ fi
}
###############################################################################
@@ -659,7 +663,19 @@ function rd_manage() {
}
function install_test_packages() {
+ DATASTORE_TYPE=$1
+
sudo -H $HTTP_PROXY pip install openstack.nose_plugin proboscis pexpect
+ if [ "$DATASTORE_TYPE" = "couchbase" ]; then
+ if [ "$DISTRO" == "ubuntu" ]; then
+ # Install Couchbase SDK for scenario tests.
+ sudo -H $HTTP_PROXY curl http://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add -
+ echo "deb http://packages.couchbase.com/ubuntu trusty trusty/main" | sudo tee /etc/apt/sources.list.d/couchbase-csdk.list
+ sudo -H $HTTP_PROXY apt-get update
+ sudo -H $HTTP_PROXY apt-get --allow-unauthenticated -y install libcouchbase-dev
+ sudo -H $HTTP_PROXY pip install --upgrade couchbase
+ fi
+ fi
}
function mod_confs() {
@@ -704,14 +720,15 @@ function mod_confs() {
# Enable neutron tests if needed
sed -i "s/%neutron_enabled%/$ENABLE_NEUTRON/g" $TEST_CONF
- # If neutron is enabled, we create a shared network and write this info to the
- # confs so that the integration tests can use it.
+ # If neutron is enabled, the devstack plugin will have created an alt_demo
+ # network - write this info to the confs so that the integration tests can
+ # use it.
if [[ $ENABLE_NEUTRON = true ]]; then
- management_network_id=$(neutron --os-username=admin --os-password=$ADMIN_PASSWORD --os-tenant-name=admin --os-auth-url=$TROVE_AUTH_ENDPOINT net-list | awk '/ alt-private / {print $2}')
- management_subnet=$(neutron --os-username=admin --os-password=$ADMIN_PASSWORD --os-tenant-name=admin --os-auth-url=$TROVE_AUTH_ENDPOINT subnet-list | awk '/ alt-private-subnet / {print $2}')
- echo "Using neutron network $management_network_id and subnet $management_subnet"
- sed -i "s,%shared_network%,$management_network_id,g" $TEST_CONF
- sed -i "s,%shared_network_subnet%,$management_subnet,g" $TEST_CONF
+ TROVE_NET_ID=$(openstack $CLOUD_ADMIN_ARG network list | grep " $TROVE_PRIVATE_NETWORK_NAME " | awk '{print $2}')
+ TROVE_SUBNET_ID=$(openstack $CLOUD_ADMIN_ARG subnet list | grep " $TROVE_PRIVATE_SUBNET_NAME " | awk '{print $2}')
+ echo "Using network ${TROVE_PRIVATE_NETWORK_NAME} (${TROVE_NET_ID}): ${TROVE_PRIVATE_SUBNET_NAME} (${TROVE_SUBNET_ID})"
+ sed -i "s,%shared_network%,$TROVE_NET_ID,g" $TEST_CONF
+ sed -i "s,%shared_network_subnet%,$TROVE_SUBNET_ID,g" $TEST_CONF
else
# do not leave invalid keys in the configuration when using Nova for networking
sed -i "/%shared_network%/d" $TEST_CONF
@@ -759,19 +776,26 @@ function add_test_flavors() {
add_flavor 'large' 27 2048 15 1
# This will allow Nova to create an instance, but not enough disk to boot the image
- add_flavor 'fault_1' 30 512 1 1
+ add_flavor 'fault_1' 30 512 1 1 'skip_resize'
# This should be enough memory to cause Nova to fail entirely due to too much allocation
- add_flavor 'fault_2' 31 131072 5 1
+ add_flavor 'fault_2' 31 131072 5 1 'skip_resize'
}
function cmd_test_init() {
+ local DATASTORE_TYPE=$1
+
+ if [ -z "${DATASTORE_TYPE}" ]; then
+ exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
+ exit 1
+ fi
+
exclaim 'Initializing Configuration for Running Tests...'
exclaim "Installing python test packages."
- install_test_packages
+ install_test_packages "${DATASTORE_TYPE}"
exclaim "Modifying test.conf and guest.conf with appropriate values."
- mod_confs $1
+ mod_confs "${DATASTORE_TYPE}"
exclaim "Creating Test Flavors."
add_test_flavors
@@ -786,10 +810,10 @@ function cmd_test_init() {
}
function cmd_build_image() {
- IMAGE_DATASTORE_TYPE=${1:-'mysql'}
- ESCAPED_PATH_TROVE=${2:-'\/opt\/stack\/trove'}
- HOST_SCP_USERNAME=${3:-'ubuntu'}
- GUEST_USERNAME=${4:-'ubuntu'}
+ local IMAGE_DATASTORE_TYPE=${1:-'mysql'}
+ local ESCAPED_PATH_TROVE=${2:-'\/opt\/stack\/trove'}
+ local HOST_SCP_USERNAME=${3:-'ubuntu'}
+ local GUEST_USERNAME=${4:-'ubuntu'}
exclaim "Ensuring we have all packages needed to build image."
sudo $HTTP_PROXY $PKG_MGR $PKG_GET_ARGS update
@@ -806,6 +830,14 @@ function cmd_build_image() {
}
function cmd_build_and_upload_image() {
+ local DATASTORE_TYPE=$1
+ local RESTART_TROVE=${2:-$(get_bool RESTART_TROVE "true")}
+
+ if [ -z "${DATASTORE_TYPE}" ]; then
+ exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
+ exit 1
+ fi
+
local IMAGE_URL=""
# Use /tmp as file_cache
FILES=/tmp
@@ -814,26 +846,21 @@ function cmd_build_and_upload_image() {
IMAGE_URL=$IMAGE_DOWNLOAD_URL
else
exclaim "Trying to build image"
- build_guest_image $1
+ build_guest_image "${DATASTORE_TYPE}"
QCOW_IMAGE=`find $VM_PATH -name '*.qcow2'`
IMAGE_URL="file://$QCOW_IMAGE"
fi
- # The devstack openrc has references to 'enable_plugin' which causes errors
- # in the stable/juno and stable/kilo branches. These are safe to ignore.
- set +e; source $PATH_DEVSTACK_SRC/openrc admin admin; set -e
- TOKEN=$(openstack token issue | grep ' id ' | get_field 2)
- GLANCE_IMAGEIDS=$(glance image-list | grep $(basename $IMAGE_URL .qcow2) | get_field 1)
+ GLANCE_IMAGEIDS=$(openstack $CLOUD_ADMIN_ARG image list | grep $(basename $IMAGE_URL .qcow2) | get_field 1)
if [[ -n $GLANCE_IMAGEIDS ]]; then
- glance image-delete $GLANCE_IMAGEIDS
+ openstack $CLOUD_ADMIN_ARG image delete $GLANCE_IMAGEIDS
fi
- GLANCE_IMAGEID=`get_glance_id upload_image $IMAGE_URL $TOKEN`
- set +e; source $PATH_DEVSTACK_SRC/openrc demo demo; set -e
+ GLANCE_IMAGEID=`get_glance_id upload_image $IMAGE_URL`
[[ -z "$GLANCE_IMAGEID" ]] && echo "Glance upload failed!" && exit 1
echo "IMAGE ID: $GLANCE_IMAGEID"
exclaim "Updating Datastores"
- cmd_set_datastore $GLANCE_IMAGEID $1
+ cmd_set_datastore "${GLANCE_IMAGEID}" "${DATASTORE_TYPE}" "${RESTART_TROVE}"
}
@@ -911,7 +938,6 @@ function init_fake_mode() {
function cmd_start() {
if screen -ls | grep -q stack; then
- set_trove_plugin_vars
USE_SCREEN=True
TOP_DIR=$PATH_DEVSTACK_SRC
LOGDIR=$TROVE_LOGDIR
@@ -919,7 +945,8 @@ function cmd_start() {
if [[ "$RUNNING" =~ " tr-" ]]; then
exclaim "${COLOR_RED}WARNING: Trove services appear to be running. Please run 'stop' or 'restart'${COLOR_NONE}"
else
- source /dev/stdin < <(sed -n '/^function start_trove\(\)/,/^}/p' "$TROVE_SETUP_CMD_FILE")
+ source "$TROVE_DEVSTACK_SETTINGS"
+ source /dev/stdin < <(sed -n '/^function start_trove\(\)/,/^}/p' "$TROVE_DEVSTACK_PLUGIN")
start_trove
fi
else
@@ -950,10 +977,10 @@ function cmd_run_fake() {
function cmd_stop() {
if screen -ls | grep -q stack; then
- set_trove_plugin_vars
rm -f $DEST/status/stack/tr-*
USE_SCREEN=True
- source /dev/stdin < <(sed -n '/^function stop_trove\(\)/,/^}/p' "$TROVE_SETUP_CMD_FILE")
+ source "$TROVE_DEVSTACK_SETTINGS"
+ source /dev/stdin < <(sed -n '/^function stop_trove\(\)/,/^}/p' "$TROVE_DEVSTACK_PLUGIN")
MAX_RETRY=5
COUNT=1
while true; do
@@ -991,11 +1018,12 @@ function cmd_int_tests() {
args="$@"
fi
+ dump_env
# -- verbose makes it prettier.
# -- logging-clear-handlers keeps the novaclient and other things from
# spewing logs to stdout.
args="$INT_TEST_OPTIONS -B $TROVESTACK_TESTS/integration/int_tests.py --verbose --logging-clear-handlers $args"
- echo "python $args"
+ echo "Running: python $args"
python $args
}
@@ -1094,15 +1122,23 @@ function cmd_vagrant_ssh() {
function cmd_run_ci() {
+ local DATASTORE_TYPE=$1
+ local RESTART_TROVE=${2:-$(get_bool RESTART_TROVE "true")}
+
+ if [ -z "${DATASTORE_TYPE}" ]; then
+ exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
+ exit 1
+ fi
+
exclaim "Running CI suite..."
set +e
cmd_stop_deps
cmd_stop
set -e
cmd_install
- cmd_test_init $1
+ cmd_test_init "${DATASTORE_TYPE}"
# The arg will be the image type
- cmd_build_and_upload_image $1
+ cmd_build_and_upload_image "${DATASTORE_TYPE}" "${RESTART_TROVE}"
# Test in fake mode.
exclaim "Testing in fake mode."
@@ -1172,7 +1208,10 @@ function exec_cmd_on_output() {
}
function cmd_clean() {
- echo "Cleaning up project '${OS_PROJECT_NAME}'"
+ local project_name=${1:-alt_demo}
+
+ exclaim "Cleaning up project '${COLOR_BLUE}${project_name}${COLOR_NONE}'"
+
# reset any stuck backups
mysql_trove "update backups set state='COMPLETED'"
# clear out any DS version metadata
@@ -1186,7 +1225,11 @@ function cmd_clean() {
# mark all instance modules as deleted
mysql_trove "update instance_modules set deleted=1"
- source "${PATH_DEVSTACK_SRC}"/openrc admin "${OS_PROJECT_NAME}"
+ if [[ ! -f "${PATH_DEVSTACK_SRC}"/accrc/${project_name}/admin ]]; then
+ echo "Could not find credentials file for project '${project_name}'"
+ exit 1
+ fi
+ source "${PATH_DEVSTACK_SRC}"/accrc/${project_name}/admin
# delete any trove clusters
exec_cmd_on_output "trove cluster-list" "trove cluster-delete" 20
# delete any trove instances
@@ -1194,41 +1237,47 @@ function cmd_clean() {
# delete any backups
exec_cmd_on_output "trove backup-list" "trove backup-delete"
# clean up any remaining nova instances or cinder volumes
- exec_cmd_on_output "nova list" "nova delete" 5
- exec_cmd_on_output "cinder list" "cinder delete" 1
+ exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG server list" "openstack $CLOUD_ADMIN_ARG server delete" 5
+ exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG volume list" "openstack $CLOUD_ADMIN_ARG volume delete" 1
# delete any config groups since all instances should be gone now
exec_cmd_on_output "trove configuration-list" "trove configuration-delete"
# delete any modules too
exec_cmd_on_output "trove module-list" "trove module-delete"
# make sure that security groups are also gone, except the default
- exec_cmd_on_output "openstack security group list" "nova security group delete" 0 "default"
+ exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG security group list" "openstack $CLOUD_ADMIN_ARG security group delete" 0 "default"
# delete server groups
- exec_cmd_on_output "nova server-group-list" "nova server-group-delete"
+ exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG server group list" "openstack $CLOUD_ADMIN_ARG server group delete"
}
function cmd_kick_start() {
- cmd_test_init $1
- cmd_build_and_upload_image $1
+ local DATASTORE_TYPE=$1
+ local RESTART_TROVE=${2:-$(get_bool RESTART_TROVE "true")}
+
+ if [ -z "${DATASTORE_TYPE}" ]; then
+ exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
+ exit 1
+ fi
+
+ exclaim "Running kick-start for $DATASTORE_TYPE (restart trove: $RESTART_TROVE)"
+ dump_env
+ cmd_test_init "${DATASTORE_TYPE}"
+ cmd_build_and_upload_image "${DATASTORE_TYPE}" "${RESTART_TROVE}"
}
function cmd_dsvm_gate_tests() {
+ ACTUAL_HOSTNAME=$(hostname -I | sed 's/[0-9]*\.[0-9]*\.[0-9]*\.1\b//g' | sed 's/[0-9a-z][0-9a-z]*:.*:[0-9a-z][0-9a-z]*//g' | sed 's/ /\n/g' | sed '/^$/d' | sort -bu | head -1)
+
+ local DATASTORE_TYPE=${1:-'mysql'}
+ local TEST_GROUP=${2:-${DATASTORE_TYPE}}
+ local HOST_SCP_USERNAME=${3:-'jenkins'}
+ local GUEST_USERNAME=${4:-'ubuntu'}
+ local CONTROLLER_IP=${5:-$ACTUAL_HOSTNAME}
+ local ESCAPED_PATH_TROVE=${6:-'\/opt\/stack\/new\/trove'}
+
exclaim "Running cmd_dsvm_gate_tests ..."
- DATASTORE_TYPE=${1:-'mysql'}
- TEST_GROUP=${2:-${DATASTORE_TYPE}}
- HOST_SCP_USERNAME=${3:-'jenkins'}
- GUEST_USERNAME=${4:-'ubuntu'}
- CONTROLLER_IP=${5:-'10.1.0.1'}
- ESCAPED_PATH_TROVE=${6:-'\/opt\/stack\/new\/trove'}
export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/dsvm-report/}
export TROVE_REPORT_DIR=$HOME/dsvm-report/
-
- if [[ $BRANCH_OVERRIDE == "stable/liberty" ]]; then
- # Devstack in liberty doesn't copy the clouds.yaml file to /etc so we need to
- # ensure we have access to the clouds.yaml file set up by devstack-gate
- sudo mkdir -p ~/.config/openstack
- sudo ln -s $DEST/.config/openstack/clouds.yaml ~/.config/openstack/clouds.yaml
- sudo chown -R $(whoami) ~/.config
- fi
+ TROVESTACK_DUMP_ENV=true
# Devstack vm-gate runs as the jenkins user, but needs to connect to the guest image as ubuntu
echo "User=ubuntu" >> /home/jenkins/.ssh/config
@@ -1242,25 +1291,15 @@ function cmd_dsvm_gate_tests() {
cd $TROVESTACK_SCRIPTS
sudo -H $HTTP_PROXY pip install --upgrade pip dib-utils
- cmd_kick_start $DATASTORE_TYPE
+ local RESTART_TROVE=false
+ cmd_kick_start "${DATASTORE_TYPE}" "${RESTART_TROVE}"
# Update the local swift endpoint in the catalog to use the CONTROLLER_IP instead of 127.0.0.1
- source $DEST/devstack/accrc/admin/admin
- # NOTE(mriedem): We have to treat stable branches before liberty special
- # due to constraints with older versions of python-openstackclient.
- if [[ $BRANCH_OVERRIDE == "stable/juno" || $BRANCH_OVERRIDE == "stable/kilo" ]]; then
- SWIFT_ENDPOINT=$(openstack endpoint list | grep 'swift' | get_field 1)
- openstack endpoint create swift --region RegionOne --publicurl 'http://'$CONTROLLER_IP':8080/v1/AUTH_$(tenant_id)s' \
- --internalurl 'http://'$CONTROLLER_IP':8080/v1/AUTH_$(tenant_id)s' --adminurl 'http://'$CONTROLLER_IP':8080'
- openstack endpoint delete $SWIFT_ENDPOINT
- else
- OS_CLIENT_ARGS="--os-auth-type v3password --os-auth-url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v3 --os-identity-api-version=3"
- SWIFT_ENDPOINTS=$(openstack endpoint list $OS_CLIENT_ARGS --service swift -c ID -f value)
- openstack endpoint create $OS_CLIENT_ARGS swift public 'http://'$CONTROLLER_IP':8080/v1/AUTH_$(tenant_id)s' --region RegionOne
- openstack endpoint create $OS_CLIENT_ARGS swift internal 'http://'$CONTROLLER_IP':8080/v1/AUTH_$(tenant_id)s' --region RegionOne
- openstack endpoint create $OS_CLIENT_ARGS swift admin 'http://'$CONTROLLER_IP':8080' --region RegionOne
- echo $SWIFT_ENDPOINTS | xargs -n 1 openstack endpoint delete $OS_CLIENT_ARGS
- fi
+ SWIFT_ENDPOINTS=$(openstack $CLOUD_ADMIN_ARG endpoint list --service swift -c ID -f value)
+ openstack $CLOUD_ADMIN_ARG endpoint create swift public 'http://'$CONTROLLER_IP':8080/v1/AUTH_$(tenant_id)s' --region RegionOne
+ openstack $CLOUD_ADMIN_ARG endpoint create swift internal 'http://'$CONTROLLER_IP':8080/v1/AUTH_$(tenant_id)s' --region RegionOne
+ openstack $CLOUD_ADMIN_ARG endpoint create swift admin 'http://'$CONTROLLER_IP':8080' --region RegionOne
+ echo $SWIFT_ENDPOINTS | xargs -n 1 openstack $CLOUD_ADMIN_ARG endpoint delete
cmd_int_tests --group=$TEST_GROUP
}
@@ -1270,8 +1309,9 @@ function cmd_reset_task() {
}
function cmd_clone_projects() {
- UPDATE_PROJECTS=$1
- PROJECT_LIST_FILES=${@:2}
+ local UPDATE_PROJECTS=$1
+ local PROJECT_LIST_FILES=${@:2}
+
for project in $(cat $PROJECT_LIST_FILES); do
if [ ! -d $PATH_DEVSTACK_OUTPUT/$project ]; then
echo "Creating a new clone of $project..."
@@ -1318,7 +1358,6 @@ function cmd_repl() {
INT_TEST_OPTIONS=-i cmd_int_tests_white_box --repl --group=_does_not_exist_ $@
}
-
###############################################################################
# Process the user provided command and run the appropriate command
###############################################################################
@@ -1332,9 +1371,7 @@ fi
# Set this to exit immediately on error
set -o errexit
-# set_home_dir
set_http_proxy
-set_trove_plugin_vars false
function print_usage() {
echo "Usage: $0 [command]"
@@ -1354,7 +1391,6 @@ function print_usage() {
--helper for environment--
kick-start - kick start the setup of trove.
(trovestack test-init/build-image in one step)
- [mysql no-clean] no clean avoids rebuilding packages from scratch
- Set REBUILD_IMAGE=True to force rebuild (won't use cached image)
--trove dependency services--
@@ -1362,13 +1398,19 @@ function print_usage() {
stop-deps - Kill daemons Trove depends on.
--trove services--
- start - Start or resume daemons Trove depends on.
- stop - Kill daemons Trove depends on.
+ start - Start or resume Trove daemons.
+ stop - Kill Trove daemons.
restart - Runs stop then start for Trove services.
--tests--
unit-tests - Run the unit tests.dependencies
int-tests - Runs the integration tests (requires all daemons).
+ See trove/tests/int_tests.py for list of registered groups.
+ Examples:
+ Run original MySQL tests: ./trovestack int-tests
+ Run all MySQL scenario tests: ./trovestack int-tests --group=mysql-supported
+ Run single Redis scenario tests: ./trovestack int-tests --group=redis-supported-single
+ Run specific functional tests: ./trovestack int-tests --group=module-create --group=configuration-create
simple-tests - Runs the simple integration tests (requires all daemons).
dsvm-gate-tests - Configures and runs the int-tests in a devstack vm-gate environment.
@@ -1378,7 +1420,8 @@ function print_usage() {
rd-sql - Opens the Trove MySQL database.
vagrant-ssh - Runs a command from the host on the server.
clear - Destroy instances and rabbit queues.
- clean - Clean up resources created by a failed test run.
+ clean - Clean up resources created by a failed test run. Takes
+ project_name as an optional parameter (defaults to alt_demo).
run - Starts RD but not in a screen.
run-fake - Runs the server in fake mode.
update-projects - Git pull on all the daemons trove dependencies.
@@ -1396,7 +1439,7 @@ function run_command() {
case "$1" in
"install" ) cmd_install;;
- "test-init" ) cmd_test_init $@;;
+ "test-init" ) shift; cmd_test_init $@;;
"build-image" ) shift; cmd_build_image $@;;
"initialize" ) cmd_initialize;;
"unit-tests" ) cmd_unit_tests;;
@@ -1414,7 +1457,7 @@ function run_command() {
"run-ci" ) shift; cmd_run_ci $@;;
"vagrant-ssh" ) shift; cmd_vagrant_ssh $@;;
"debug" ) shift; echo "Enabling debugging."; \
- set -o xtrace; run_command $@;;
+ set -o xtrace; TROVESTACK_DUMP_ENV=true; run_command $@;;
"clear" ) shift; cmd_clear $@;;
"clean" ) shift; cmd_clean $@;;
"run" ) shift; cmd_run $@;;
diff --git a/integration/scripts/trovestack.rc b/integration/scripts/trovestack.rc
index f7c8297a..8982c2fa 100644
--- a/integration/scripts/trovestack.rc
+++ b/integration/scripts/trovestack.rc
@@ -10,17 +10,8 @@
# Try REGION_NAME then OS_REGION_NAME then RegionOne (the devstack default)
REGION_NAME=${REGION_NAME:-${OS_REGION_NAME:-RegionOne}}
-# Enable neutron instead of nova-network
-# Note: Until a few key changesets land, we can't enable Neutron properly.
-# See: https://review.openstack.org/#/c/356026
-# https://review.openstack.org/#/c/356763
-# https://review.openstack.org/#/c/356701
-# NEUTRON_DEFAULT=true
-NEUTRON_DEFAULT=false
-if [[ $BRANCH_OVERRIDE == "stable/liberty" || $BRANCH_OVERRIDE == "stable/mitaka" ]]; then
- NEUTRON_DEFAULT=false
-fi
-ENABLE_NEUTRON=$(get_bool ENABLE_NEUTRON $NEUTRON_DEFAULT)
+# Enable Neutron
+ENABLE_NEUTRON=$(get_bool ENABLE_NEUTRON true)
# Enable osprofiler - note: Enables Ceilometer as well
ENABLE_PROFILER=$(get_bool ENABLE_PROFILER false)
@@ -78,6 +69,9 @@ TROVE_CLIENT_BRANCH=${TROVE_CLIENT_BRANCH:-${TROVECLIENT_BRANCH:-master}}
TROVE_DASHBOARD_REPO=${TROVE_DASHBOARD_REPO:-${TROVEDASHBOARD_REPO:-${GIT_OPENSTACK}/trove-dashboard.git}}
TROVE_DASHBOARD_DIR=${TROVE_DASHBOARD_DIR:-${TROVEDASHBOARD_DIR:-${PATH_TROVE_DASHBOARD}}}
TROVE_DASHBOARD_BRANCH=${TROVE_DASHBOARD_BRANCH:-${TROVEDASHBOARD_BRANCH:-master}}
+# Trove specific networking options
+TROVE_PRIVATE_NETWORK_NAME=alt-private
+TROVE_PRIVATE_SUBNET_NAME=alt-private-subnet
# Destination for working data
DATA_DIR=${DEST}/data
@@ -93,7 +87,7 @@ VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-51200M}
MYSQL_PASSWORD=e1a2c042c828d3566d0a
RABBIT_PASSWORD=f7999d1955c5014aa32c
SERVICE_TOKEN=be19c524ddc92109a224
-ADMIN_PASSWORD=3de4922d8b6ac5a1aad9
+ADMIN_PASSWORD=${ADMIN_PASSWORD:-${OS_PASSWORD:-3de4922d8b6ac5a1aad9}}
SERVICE_PASSWORD=7de4162d826bc5a11ad9
# Swift hash used by devstack.
@@ -103,9 +97,6 @@ SWIFT_HASH=12go358snjw24501
SWIFT_DATA_DIR=${DATA_DIR}/swift
SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img
-DISTRO=${DISTRO:-ubuntu}
-#DISTRO=fedora
-
# The following values can be used to tweak how devstack sets
# up Trove. If not explicitly set, the defaults in the code are used.
# To make changes without modifying the repo, add these variables
diff --git a/integration/tests/integration/tests/api/__init__.py b/integration/tests/integration/tests/api/__init__.py
index 40d014dd..e69de29b 100644
--- a/integration/tests/integration/tests/api/__init__.py
+++ b/integration/tests/integration/tests/api/__init__.py
@@ -1,13 +0,0 @@
-# Copyright 2011 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/integration/tests/integration/tests/util/__init__.py b/integration/tests/integration/tests/util/__init__.py
index 671d3c17..e69de29b 100644
--- a/integration/tests/integration/tests/util/__init__.py
+++ b/integration/tests/integration/tests/util/__init__.py
@@ -1,16 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
diff --git a/releasenotes/notes/cluster_list_show_all_ips-3547635440.yaml b/releasenotes/notes/cluster_list_show_all_ips-3547635440.yaml
new file mode 100644
index 00000000..cb6ec4b2
--- /dev/null
+++ b/releasenotes/notes/cluster_list_show_all_ips-3547635440.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - The payload for cluster GET now returns ips
+ for all networks, not just the first one
+ found for each instance.
+ Bug 1642695
diff --git a/releasenotes/notes/fix-cluster-show-346798b3e3.yaml b/releasenotes/notes/fix-cluster-show-346798b3e3.yaml
new file mode 100644
index 00000000..7034e4b9
--- /dev/null
+++ b/releasenotes/notes/fix-cluster-show-346798b3e3.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - Fix race condition in cluster-show that returned
+ erroneous not found error.
+ Bug 1643002
diff --git a/releasenotes/notes/fix_module_apply-042fc6e61f721540.yaml b/releasenotes/notes/fix_module_apply-042fc6e61f721540.yaml
new file mode 100644
index 00000000..661c694d
--- /dev/null
+++ b/releasenotes/notes/fix_module_apply-042fc6e61f721540.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - Case where a new instance_modules record is written
+ for each apply has been fixed. This issue would have
+ potentially made it impossible to delete a module.
+ Bug 1640010
diff --git a/releasenotes/notes/grow-cluster-nic-az-0e0fe4083666c300.yaml b/releasenotes/notes/grow-cluster-nic-az-0e0fe4083666c300.yaml
new file mode 100644
index 00000000..78406abf
--- /dev/null
+++ b/releasenotes/notes/grow-cluster-nic-az-0e0fe4083666c300.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Pass instance nic and az to cluster grow. Add specific
+ Fix for mongodb to use the instance nic and az.
diff --git a/releasenotes/notes/instance-show-comp-vol-id-964db9f52a5ac9c1.yaml b/releasenotes/notes/instance-show-comp-vol-id-964db9f52a5ac9c1.yaml
new file mode 100644
index 00000000..d704ea25
--- /dev/null
+++ b/releasenotes/notes/instance-show-comp-vol-id-964db9f52a5ac9c1.yaml
@@ -0,0 +1,4 @@
+---
+other:
+ - Add Compute ID (server_id) and Volume ID (volume_id)
+ to trove show output for admin users. Bug #1633581
diff --git a/releasenotes/notes/mountpoint-detection-096734f0097eb75a.yaml b/releasenotes/notes/mountpoint-detection-096734f0097eb75a.yaml
new file mode 100644
index 00000000..0971bbfb
--- /dev/null
+++ b/releasenotes/notes/mountpoint-detection-096734f0097eb75a.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - Improved mountpoint detection by running it as root. This prevents guests
+ that have undiscoverable mount points from failing to unmount.
diff --git a/releasenotes/notes/multi-region-cd8da560bfe00de5.yaml b/releasenotes/notes/multi-region-cd8da560bfe00de5.yaml
new file mode 100644
index 00000000..49a9dbf5
--- /dev/null
+++ b/releasenotes/notes/multi-region-cd8da560bfe00de5.yaml
@@ -0,0 +1,3 @@
+features:
+ - Adds a region property to the instance model and table. This is the
+ first step in multi-region support.
diff --git a/releasenotes/notes/post-upgrade-fixes-828811607826d433.yaml b/releasenotes/notes/post-upgrade-fixes-828811607826d433.yaml
new file mode 100644
index 00000000..ca276a51
--- /dev/null
+++ b/releasenotes/notes/post-upgrade-fixes-828811607826d433.yaml
@@ -0,0 +1,4 @@
+---
+fixes:
+ - After upgrading the guestagent was in an inconsistent state. This became
+ apparent after restarting or resizing the instance after upgrading.
diff --git a/releasenotes/notes/use-oslo-policy-bbd1b911e6487c36.yaml b/releasenotes/notes/use-oslo-policy-bbd1b911e6487c36.yaml
new file mode 100644
index 00000000..5e6138d7
--- /dev/null
+++ b/releasenotes/notes/use-oslo-policy-bbd1b911e6487c36.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - Add RBAC (role-based access control)
+ enforcement on all trove APIs.
+ Allows to define a role-based access rule
+ for every trove API call
+ (rule definitions are available in
+ /etc/trove/policy.json).
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 34b7b865..5774d3a6 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -8,4 +8,3 @@
unreleased
newton
mitaka
- liberty
diff --git a/releasenotes/source/liberty.rst b/releasenotes/source/liberty.rst
deleted file mode 100644
index 36217be8..00000000
--- a/releasenotes/source/liberty.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-==============================
- Liberty Series Release Notes
-==============================
-
-.. release-notes::
- :branch: origin/stable/liberty
diff --git a/requirements.txt b/requirements.txt
index d16cd825..f7e4a85e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,10 +1,10 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.6 # Apache-2.0
+pbr>=1.8 # Apache-2.0
SQLAlchemy<1.1.0,>=1.0.10 # MIT
eventlet!=0.18.3,>=0.18.2 # MIT
-keystonemiddleware!=4.5.0,>=4.2.0 # Apache-2.0
+keystonemiddleware>=4.12.0 # Apache-2.0
Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT
Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT
WebOb>=1.6.0 # MIT
@@ -14,15 +14,16 @@ sqlalchemy-migrate>=0.9.6 # Apache-2.0
netaddr!=0.7.16,>=0.7.13 # BSD
netifaces>=0.10.4 # MIT
httplib2>=0.7.5 # MIT
-lxml>=2.3 # BSD
-passlib>=1.6 # BSD
-python-heatclient>=1.5.0 # Apache-2.0
+lxml!=3.7.0,>=2.3 # BSD
+passlib>=1.7.0 # BSD
+python-heatclient>=1.6.1 # Apache-2.0
python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
python-cinderclient!=1.7.0,!=1.7.1,>=1.6.0 # Apache-2.0
-python-keystoneclient>=3.6.0 # Apache-2.0
-python-swiftclient>=2.2.0 # Apache-2.0
+python-keystoneclient>=3.8.0 # Apache-2.0
+python-swiftclient>=3.2.0 # Apache-2.0
python-designateclient>=1.5.0 # Apache-2.0
python-neutronclient>=5.1.0 # Apache-2.0
+python-glanceclient>=2.5.0 # Apache-2.0
iso8601>=0.1.11 # MIT
jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
Jinja2>=2.8 # BSD License (3 clause)
@@ -33,15 +34,17 @@ oslo.i18n>=2.1.0 # Apache-2.0
oslo.middleware>=3.0.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
oslo.service>=1.10.0 # Apache-2.0
-oslo.utils>=3.17.0 # Apache-2.0
+oslo.utils>=3.18.0 # Apache-2.0
oslo.concurrency>=3.8.0 # Apache-2.0
-PyMySQL!=0.7.7,>=0.6.2 # MIT License
+PyMySQL>=0.7.6 # MIT License
Babel>=2.3.4 # BSD
six>=1.9.0 # MIT
stevedore>=1.17.1 # Apache-2.0
-oslo.messaging>=5.2.0 # Apache-2.0
+oslo.messaging>=5.14.0 # Apache-2.0
osprofiler>=1.4.0 # Apache-2.0
oslo.log>=3.11.0 # Apache-2.0
-oslo.db!=4.13.1,!=4.13.2,>=4.10.0 # Apache-2.0
+oslo.db>=4.13.3 # Apache-2.0
enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
xmltodict>=0.10.1 # MIT
+pycrypto>=2.6 # Public Domain
+oslo.policy>=1.17.0 # Apache-2.0
diff --git a/test-requirements.txt b/test-requirements.txt
index 865e6fa9..db7b528b 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# Hacking already pins down pep8, pyflakes and flake8
-hacking<0.11,>=0.10.0
+hacking<0.13,>=0.12.0 # Apache-2.0
bandit>=1.1.0 # Apache-2.0
sphinx!=1.3b1,<1.4,>=1.2.1 # BSD
os-api-ref>=1.0.0 # Apache-2.0
@@ -25,7 +25,6 @@ pymongo!=3.1,>=3.0.2 # Apache-2.0
redis>=2.10.0 # MIT
psycopg2>=2.5 # LGPL/ZPL
cassandra-driver!=3.6.0,>=2.1.4 # Apache-2.0
-pycrypto>=2.6 # Public Domain
couchdb>=0.8 # Apache-2.0
os-testr>=0.8.0 # Apache-2.0
astroid<1.4.0 # LGPLv2.1 # breaks pylint 1.4.4
diff --git a/tools/trove-pylint.config b/tools/trove-pylint.config
index ba9a6f6b..ea041c5d 100644
--- a/tools/trove-pylint.config
+++ b/tools/trove-pylint.config
@@ -694,6 +694,30 @@
"upgrade"
],
[
+ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
+ "E1101",
+ "Instance of 'Table' has no 'create_column' member",
+ "upgrade"
+ ],
+ [
+ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
+ "E1120",
+ "No value for argument 'dml' in method call",
+ "upgrade"
+ ],
+ [
+ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
+ "no-member",
+ "Instance of 'Table' has no 'create_column' member",
+ "upgrade"
+ ],
+ [
+ "trove/db/sqlalchemy/migrate_repo/versions/039_region.py",
+ "no-value-for-parameter",
+ "No value for argument 'dml' in method call",
+ "upgrade"
+ ],
+ [
"trove/db/sqlalchemy/migration.py",
"E0611",
"No name 'exceptions' in module 'migrate.versioning'",
diff --git a/tools/trove-pylint.py b/tools/trove-pylint.py
index c2d7f3dc..a31d9db9 100755
--- a/tools/trove-pylint.py
+++ b/tools/trove-pylint.py
@@ -79,16 +79,8 @@ class Config(object):
indent=2, separators=(',', ': '))
def load(self, filename=DEFAULT_CONFIG_FILE):
- self.config = self.default_config
-
- try:
- with open(filename) as fp:
- _c = json.load(fp, encoding="utf-8")
-
- self.config = _c
- except Exception:
- print("An error occured loading configuration, using default.")
- return self
+ with open(filename) as fp:
+ self.config = json.load(fp, encoding="utf-8")
def get(self, attribute):
return self.config[attribute]
diff --git a/tox.ini b/tox.ini
index 73e4b114..f80d24df 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,6 +5,7 @@ skipsdist = True
[testenv]
setenv = VIRTUAL_ENV={envdir}
+ PYTHONWARNINGS=default::DeprecationWarning
usedevelop = True
install_command = pip install \
-c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} \
@@ -72,6 +73,7 @@ show-source = True
# H301 is ignored on purpose.
# The rest of the ignores are TODOs.
ignore = F821,H301,H404,H405,H501
+enable-extensions = H203,H106
builtins = _
exclude=.venv,.tox,.git,dist,doc,*egg,tools,etc,build,*.po,*.pot,integration
filename=*.py,trove-*
diff --git a/trove/backup/service.py b/trove/backup/service.py
index 4d505f54..bb14b6bb 100644
--- a/trove/backup/service.py
+++ b/trove/backup/service.py
@@ -22,6 +22,7 @@ from trove.common.i18n import _
from trove.common import notification
from trove.common.notification import StartNotification
from trove.common import pagination
+from trove.common import policy
from trove.common import wsgi
LOG = logging.getLogger(__name__)
@@ -40,6 +41,7 @@ class BackupController(wsgi.Controller):
LOG.debug("Listing backups for tenant %s" % tenant_id)
datastore = req.GET.get('datastore')
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'backup:index')
backups, marker = Backup.list(context, datastore)
view = views.BackupViews(backups)
paged = pagination.SimplePaginatedDataView(req.url, 'backups', view,
@@ -52,11 +54,14 @@ class BackupController(wsgi.Controller):
% (tenant_id, id))
context = req.environ[wsgi.CONTEXT_KEY]
backup = Backup.get_by_id(context, id)
+ policy.authorize_on_target(context, 'backup:show',
+ {'tenant': backup.tenant_id})
return wsgi.Result(views.BackupView(backup).data(), 200)
def create(self, req, body, tenant_id):
LOG.info(_("Creating a backup for tenant %s"), tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'backup:create')
data = body['backup']
instance = data['instance']
name = data['name']
@@ -76,6 +81,9 @@ class BackupController(wsgi.Controller):
'ID: %(backup_id)s') %
{'tenant_id': tenant_id, 'backup_id': id})
context = req.environ[wsgi.CONTEXT_KEY]
+ backup = Backup.get_by_id(context, id)
+ policy.authorize_on_target(context, 'backup:delete',
+ {'tenant': backup.tenant_id})
context.notification = notification.DBaaSBackupDelete(context,
request=req)
with StartNotification(context, backup_id=id):
diff --git a/trove/cluster/models.py b/trove/cluster/models.py
index 26a4e8d8..82ed5fa5 100644
--- a/trove/cluster/models.py
+++ b/trove/cluster/models.py
@@ -21,8 +21,9 @@ from trove.cluster.tasks import ClusterTasks
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
-from trove.common.notification import (DBaaSClusterGrow, DBaaSClusterShrink,
- DBaaSClusterResetStatus)
+from trove.common.notification import DBaaSClusterGrow, DBaaSClusterShrink
+from trove.common.notification import DBaaSClusterResetStatus
+from trove.common.notification import DBaaSClusterUpgrade
from trove.common.notification import StartNotification
from trove.common import remote
from trove.common import server_group as srv_grp
@@ -297,6 +298,11 @@ class Cluster(object):
instance['volume_size'] = int(node['volume']['size'])
if 'modules' in node:
instance['modules'] = node['modules']
+ if 'nics' in node:
+ instance['nics'] = node['nics']
+ if 'availability_zone' in node:
+ instance['availability_zone'] = (
+ node['availability_zone'])
instances.append(instance)
return self.grow(instances)
elif action == 'shrink':
@@ -310,14 +316,24 @@ class Cluster(object):
with StartNotification(context, cluster_id=self.id):
return self.reset_status()
+ elif action == 'upgrade':
+ context.notification = DBaaSClusterUpgrade(context, request=req)
+ dv_id = param['datastore_version']
+ dv = datastore_models.DatastoreVersion.load(self.datastore, dv_id)
+ with StartNotification(context, cluster_id=self.id,
+ datastore_version=dv.id):
+ return self.upgrade(dv)
else:
raise exception.BadRequest(_("Action %s not supported") % action)
def grow(self, instances):
- raise exception.BadRequest(_("Action 'grow' not supported"))
+ raise exception.BadRequest(_("Action 'grow' not supported"))
def shrink(self, instance_ids):
- raise exception.BadRequest(_("Action 'shrink' not supported"))
+ raise exception.BadRequest(_("Action 'shrink' not supported"))
+
+ def upgrade(self, datastore_version):
+ raise exception.BadRequest(_("Action 'upgrade' not supported"))
@staticmethod
def load_instance(context, cluster_id, instance_id):
@@ -341,23 +357,26 @@ def is_cluster_deleting(context, cluster_id):
def validate_instance_flavors(context, instances,
volume_enabled, ephemeral_enabled):
- """Load and validate flavors for given instance definitions."""
- flavors = dict()
- nova_client = remote.create_nova_client(context)
+ """Validate flavors for given instance definitions."""
+ nova_cli_cache = dict()
for instance in instances:
+ region_name = instance.get('region_name')
flavor_id = instance['flavor_id']
- if flavor_id not in flavors:
- try:
- flavor = nova_client.flavors.get(flavor_id)
- if (not volume_enabled and
- (ephemeral_enabled and flavor.ephemeral == 0)):
- raise exception.LocalStorageNotSpecified(
- flavor=flavor_id)
- flavors[flavor_id] = flavor
- except nova_exceptions.NotFound:
- raise exception.FlavorNotFound(uuid=flavor_id)
-
- return flavors
+ try:
+ if region_name in nova_cli_cache:
+ nova_client = nova_cli_cache[region_name]
+ else:
+ nova_client = remote.create_nova_client(
+ context, region_name)
+ nova_cli_cache[region_name] = nova_client
+
+ flavor = nova_client.flavors.get(flavor_id)
+ if (not volume_enabled and
+ (ephemeral_enabled and flavor.ephemeral == 0)):
+ raise exception.LocalStorageNotSpecified(
+ flavor=flavor_id)
+ except nova_exceptions.NotFound:
+ raise exception.FlavorNotFound(uuid=flavor_id)
def get_required_volume_size(instances, volume_enabled):
diff --git a/trove/cluster/service.py b/trove/cluster/service.py
index e1fb5ddc..67e7a24a 100644
--- a/trove/cluster/service.py
+++ b/trove/cluster/service.py
@@ -25,6 +25,7 @@ from trove.common.i18n import _
from trove.common import notification
from trove.common.notification import StartNotification
from trove.common import pagination
+from trove.common import policy
from trove.common import utils
from trove.common import wsgi
from trove.datastore import models as datastore_models
@@ -40,6 +41,11 @@ class ClusterController(wsgi.Controller):
schemas = apischema.cluster.copy()
@classmethod
+ def authorize_cluster_action(cls, context, cluster_rule_name, cluster):
+ policy.authorize_on_target(context, 'cluster:%s' % cluster_rule_name,
+ {'tenant': cluster.tenant_id})
+
+ @classmethod
def get_action_schema(cls, body, action_schema):
action_type = list(body.keys())[0]
return action_schema.get(action_type, {})
@@ -58,15 +64,25 @@ class ClusterController(wsgi.Controller):
{"req": req, "id": id, "tenant_id": tenant_id})
if not body:
raise exception.BadRequest(_("Invalid request body."))
+
if len(body) != 1:
raise exception.BadRequest(_("Action request should have exactly"
" one action specified in body"))
context = req.environ[wsgi.CONTEXT_KEY]
cluster = models.Cluster.load(context, id)
+ if ('reset-status' in body and
+ 'force_delete' not in body['reset-status']):
+ self.authorize_cluster_action(context, 'reset-status', cluster)
+ elif ('reset-status' in body and
+ 'force_delete' in body['reset-status']):
+ self.authorize_cluster_action(context, 'force_delete', cluster)
+ else:
+ self.authorize_cluster_action(context, 'action', cluster)
cluster.action(context, req, *next(iter(body.items())))
view = views.load_view(cluster, req=req, load_servers=False)
wsgi_result = wsgi.Result(view.data(), 202)
+
return wsgi_result
def show(self, req, tenant_id, id):
@@ -77,6 +93,7 @@ class ClusterController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
cluster = models.Cluster.load(context, id)
+ self.authorize_cluster_action(context, 'show', cluster)
return wsgi.Result(views.load_view(cluster, req=req).data(), 200)
def show_instance(self, req, tenant_id, cluster_id, instance_id):
@@ -92,6 +109,7 @@ class ClusterController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
cluster = models.Cluster.load(context, cluster_id)
+ self.authorize_cluster_action(context, 'show_instance', cluster)
instance = models.Cluster.load_instance(context, cluster.id,
instance_id)
return wsgi.Result(views.ClusterInstanceDetailView(
@@ -105,6 +123,7 @@ class ClusterController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
cluster = models.Cluster.load(context, id)
+ self.authorize_cluster_action(context, 'delete', cluster)
context.notification = notification.DBaaSClusterDelete(context,
request=req)
with StartNotification(context, cluster_id=id):
@@ -118,9 +137,19 @@ class ClusterController(wsgi.Controller):
"tenant_id": tenant_id})
context = req.environ[wsgi.CONTEXT_KEY]
+
+ # This theoretically allows the Admin tenant list clusters for
+ # only one particular tenant as opposed to listing all clusters for
+ # for all tenants.
+ # * As far as I can tell this is the only call which actually uses the
+ # passed-in 'tenant_id' for anything.
if not context.is_admin and context.tenant != tenant_id:
raise exception.TroveOperationAuthError(tenant_id=context.tenant)
+ # The rule checks that the currently authenticated tenant can perform
+ # the 'cluster-list' action.
+ policy.authorize_on_tenant(context, 'cluster:index')
+
# load all clusters and instances for the tenant
clusters, marker = models.Cluster.load_all(context, tenant_id)
view = views.ClustersView(clusters, req=req)
@@ -134,6 +163,8 @@ class ClusterController(wsgi.Controller):
{"tenant_id": tenant_id, "req": req, "body": body})
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'cluster:create')
+
name = body['cluster']['name']
datastore_args = body['cluster'].get('datastore', {})
datastore, datastore_version = (
@@ -174,6 +205,7 @@ class ClusterController(wsgi.Controller):
"volume_type": volume_type,
"nics": nics,
"availability_zone": availability_zone,
+ 'region_name': node.get('region_name'),
"modules": modules})
locality = body['cluster'].get('locality')
diff --git a/trove/cluster/views.py b/trove/cluster/views.py
index 6b2856c7..f60ac0b7 100644
--- a/trove/cluster/views.py
+++ b/trove/cluster/views.py
@@ -88,7 +88,7 @@ class ClusterView(object):
if self.load_servers and instance_ips:
instance_dict["ip"] = instance_ips
if instance.type in ip_to_be_published_for:
- ip_list.append(instance_ips[0])
+ ip_list.extend(instance_ips)
if instance.type in instance_dict_to_be_published_for:
instances.append(instance_dict)
ip_list.sort()
diff --git a/trove/cmd/conductor.py b/trove/cmd/conductor.py
index 66499190..daff5df4 100644
--- a/trove/cmd/conductor.py
+++ b/trove/cmd/conductor.py
@@ -16,13 +16,13 @@ from oslo_concurrency import processutils
from oslo_service import service as openstack_service
from trove.cmd.common import with_initialize
+from trove.conductor import api as conductor_api
@with_initialize
def main(conf):
from trove.common import notification
from trove.common.rpc import service as rpc_service
- from trove.common.rpc import version as rpc_version
from trove.instance import models as inst_models
notification.DBaaSAPINotification.register_notify_callback(
@@ -30,7 +30,7 @@ def main(conf):
topic = conf.conductor_queue
server = rpc_service.RpcService(
manager=conf.conductor_manager, topic=topic,
- rpc_api_version=rpc_version.RPC_API_VERSION)
+ rpc_api_version=conductor_api.API.API_LATEST_VERSION)
workers = conf.trove_conductor_workers or processutils.get_worker_count()
launcher = openstack_service.launch(conf, server, workers=workers)
launcher.wait()
diff --git a/trove/cmd/guest.py b/trove/cmd/guest.py
index 9f2b1807..ccb33563 100644
--- a/trove/cmd/guest.py
+++ b/trove/cmd/guest.py
@@ -25,6 +25,7 @@ from oslo_service import service as openstack_service
from trove.common import cfg
from trove.common import debug_utils
from trove.common.i18n import _LE
+from trove.guestagent import api as guest_api
CONF = cfg.CONF
# The guest_id opt definition must match the one in common/cfg.py
@@ -57,11 +58,10 @@ def main():
rpc.init(CONF)
from trove.common.rpc import service as rpc_service
- from trove.common.rpc import version as rpc_version
server = rpc_service.RpcService(
topic="guestagent.%s" % CONF.guest_id,
manager=manager, host=CONF.guest_id,
- rpc_api_version=rpc_version.RPC_API_VERSION)
+ rpc_api_version=guest_api.API.API_LATEST_VERSION)
launcher = openstack_service.launch(CONF, server)
launcher.wait()
diff --git a/trove/cmd/taskmanager.py b/trove/cmd/taskmanager.py
index 58f7ed12..aaef017c 100644
--- a/trove/cmd/taskmanager.py
+++ b/trove/cmd/taskmanager.py
@@ -16,6 +16,7 @@ from oslo_config import cfg as openstack_cfg
from oslo_service import service as openstack_service
from trove.cmd.common import with_initialize
+from trove.taskmanager import api as task_api
extra_opts = [openstack_cfg.StrOpt('taskmanager_manager')]
@@ -24,14 +25,13 @@ extra_opts = [openstack_cfg.StrOpt('taskmanager_manager')]
def startup(conf, topic):
from trove.common import notification
from trove.common.rpc import service as rpc_service
- from trove.common.rpc import version as rpc_version
from trove.instance import models as inst_models
notification.DBaaSAPINotification.register_notify_callback(
inst_models.persist_instance_fault)
server = rpc_service.RpcService(
manager=conf.taskmanager_manager, topic=topic,
- rpc_api_version=rpc_version.RPC_API_VERSION)
+ rpc_api_version=task_api.API.API_LATEST_VERSION)
launcher = openstack_service.launch(conf, server)
launcher.wait()
diff --git a/trove/common/apischema.py b/trove/common/apischema.py
index 484e8176..4f424107 100644
--- a/trove/common/apischema.py
+++ b/trove/common/apischema.py
@@ -250,6 +250,7 @@ cluster = {
"nics": nics,
"availability_zone": non_empty_string,
"modules": module_list,
+ "region_name": non_empty_string
}
}
},
@@ -287,7 +288,8 @@ cluster = {
"availability_zone": non_empty_string,
"modules": module_list,
"related_to": non_empty_string,
- "type": non_empty_string
+ "type": non_empty_string,
+ "region_name": non_empty_string
}
}
}
@@ -310,6 +312,21 @@ cluster = {
}
}
}
+ },
+ "upgrade": {
+ "type": "object",
+ "required": ["upgrade"],
+ "additionalProperties": True,
+ "properties": {
+ "upgrade": {
+ "type": "object",
+ "required": ["datastore_version"],
+ "additionalProperties": True,
+ "properties": {
+ "datastore_version": non_empty_string
+ }
+ }
+ }
}
}
@@ -349,6 +366,7 @@ instance = {
},
"nics": nics,
"modules": module_list,
+ "region_name": non_empty_string,
"locality": non_empty_string
}
}
diff --git a/trove/common/cfg.py b/trove/common/cfg.py
index 908d0a59..72d7030f 100644
--- a/trove/common/cfg.py
+++ b/trove/common/cfg.py
@@ -92,8 +92,18 @@ common_opts = [
help='Service type to use when searching catalog.'),
cfg.StrOpt('swift_endpoint_type', default='publicURL',
help='Service endpoint type to use when searching catalog.'),
+ cfg.URIOpt('glance_url', help='URL ending in ``AUTH_``.'),
+ cfg.StrOpt('glance_service_type', default='image',
+ help='Service type to use when searching catalog.'),
+ cfg.StrOpt('glance_endpoint_type', default='publicURL',
+ help='Service endpoint type to use when searching catalog.'),
cfg.URIOpt('trove_auth_url', default='http://0.0.0.0:5000/v2.0',
help='Trove authentication URL.'),
+ cfg.StrOpt('trove_url', help='URL without the tenant segment.'),
+ cfg.StrOpt('trove_service_type', default='database',
+ help='Service type to use when searching catalog.'),
+ cfg.StrOpt('trove_endpoint_type', default='publicURL',
+ help='Service endpoint type to use when searching catalog.'),
cfg.IPOpt('host', default='0.0.0.0',
help='Host to listen for RPC messages.'),
cfg.IntOpt('report_interval', default=30,
@@ -328,11 +338,17 @@ common_opts = [
cfg.StrOpt('remote_swift_client',
default='trove.common.remote.swift_client',
help='Client to send Swift calls to.'),
+ cfg.StrOpt('remote_trove_client',
+ default='trove.common.trove_remote.trove_client',
+ help='Client to send Trove calls to.'),
+ cfg.StrOpt('remote_glance_client',
+ default='trove.common.glance_remote.glance_client',
+ help='Client to send Glance calls to.'),
cfg.StrOpt('exists_notification_transformer',
help='Transformer for exists notifications.'),
cfg.IntOpt('exists_notification_interval', default=3600,
help='Seconds to wait between pushing events.'),
- cfg.IntOpt('quota_notification_interval', default=3600,
+ cfg.IntOpt('quota_notification_interval',
help='Seconds to wait between pushing events.'),
cfg.DictOpt('notification_service_id',
default={'mysql': '2f3ff068-2bfb-4f70-9a9d-a6bb65bc084b',
@@ -1449,18 +1465,19 @@ mariadb_opts = [
upgrade_levels = cfg.OptGroup(
'upgrade_levels',
title='RPC upgrade levels group for handling versions',
- help='Contains the support version caps for each RPC API')
+ help='Contains the support version caps (Openstack Release) for '
+ 'each RPC API')
rpcapi_cap_opts = [
cfg.StrOpt(
- 'taskmanager', default="icehouse",
+ 'taskmanager', default='latest',
help='Set a version cap for messages sent to taskmanager services'),
cfg.StrOpt(
- 'guestagent', default="icehouse",
+ 'guestagent', default='latest',
help='Set a version cap for messages sent to guestagent services'),
cfg.StrOpt(
- 'conductor', default="icehouse",
- help='Set a version cap for messages sent to conductor services'),
+ 'conductor', default='latest',
+ help='Set Openstack Release compatibility for conductor services'),
]
CONF = cfg.CONF
diff --git a/trove/common/exception.py b/trove/common/exception.py
index dc57a589..020a9b48 100644
--- a/trove/common/exception.py
+++ b/trove/common/exception.py
@@ -236,11 +236,6 @@ class UnprocessableEntity(TroveError):
message = _("Unable to process the contained request.")
-class UnauthorizedRequest(TroveError):
-
- message = _("Unauthorized request.")
-
-
class CannotResizeToSameSize(TroveError):
message = _("No change was requested in the size of the instance.")
@@ -309,6 +304,11 @@ class Forbidden(TroveError):
message = _("User does not have admin privileges.")
+class PolicyNotAuthorized(Forbidden):
+
+ message = _("Policy doesn't allow %(action)s to be performed.")
+
+
class InvalidModelError(TroveError):
message = _("The following values are invalid: %(errors)s.")
@@ -538,6 +538,10 @@ class ModuleInvalid(Forbidden):
message = _("The module is invalid: %(reason)s")
+class InstanceNotFound(NotFound):
+ message = _("Instance '%(instance)s' cannot be found.")
+
+
class ClusterNotFound(NotFound):
message = _("Cluster '%(cluster)s' cannot be found.")
@@ -622,3 +626,8 @@ class ImageNotFound(NotFound):
class DatastoreVersionAlreadyExists(BadRequest):
message = _("A datastore version with the name '%(name)s' already exists.")
+
+
+class LogAccessForbidden(Forbidden):
+
+ message = _("You must be admin to %(action)s log '%(log)s'.")
diff --git a/trove/common/glance_remote.py b/trove/common/glance_remote.py
new file mode 100644
index 00000000..0bcde97b
--- /dev/null
+++ b/trove/common/glance_remote.py
@@ -0,0 +1,53 @@
+# Copyright 2016 Tesora Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from keystoneauth1.identity import v3
+from keystoneauth1 import session as ka_session
+
+from oslo_utils.importutils import import_class
+
+from trove.common import cfg
+from trove.common.remote import get_endpoint
+from trove.common.remote import normalize_url
+
+from glanceclient import Client
+
+CONF = cfg.CONF
+
+
+def glance_client(context, region_name=None):
+
+ # We should allow glance to get the endpoint from the service
+ # catalog, but to do so we would need to be able to specify
+ # the endpoint_filter on the API calls, but glance
+ # doesn't currently allow that. As a result, we must
+ # specify the endpoint explicitly.
+ if CONF.glance_url:
+ endpoint_url = '%(url)s%(tenant)s' % {
+ 'url': normalize_url(CONF.glance_url),
+ 'tenant': context.tenant}
+ else:
+ endpoint_url = get_endpoint(
+ context.service_catalog, service_type=CONF.glance_service_type,
+ endpoint_region=region_name or CONF.os_region_name,
+ endpoint_type=CONF.glance_endpoint_type)
+
+ auth = v3.Token(CONF.trove_auth_url, context.auth_token)
+ session = ka_session.Session(auth=auth)
+
+ return Client('2', endpoint=endpoint_url, session=session)
+
+
+create_glance_client = import_class(CONF.remote_glance_client)
diff --git a/trove/common/models.py b/trove/common/models.py
index 84fbcc95..78e5e9a2 100644
--- a/trove/common/models.py
+++ b/trove/common/models.py
@@ -103,21 +103,28 @@ class NetworkRemoteModelBase(RemoteModelBase):
network_driver = None
@classmethod
- def get_driver(cls, context):
+ def get_driver(cls, context, region_name):
if not cls.network_driver:
cls.network_driver = import_class(CONF.network_driver)
- return cls.network_driver(context)
+ return cls.network_driver(context, region_name)
class NovaRemoteModelBase(RemoteModelBase):
@classmethod
- def get_client(cls, context):
- return remote.create_nova_client(context)
+ def get_client(cls, context, region_name):
+ return remote.create_nova_client(context, region_name)
class SwiftRemoteModelBase(RemoteModelBase):
@classmethod
+ def get_client(cls, context, region_name):
+ return remote.create_swift_client(context, region_name)
+
+
+class CinderRemoteModelBase(RemoteModelBase):
+
+ @classmethod
def get_client(cls, context):
- return remote.create_swift_client(context)
+ return remote.create_cinder_client(context)
diff --git a/trove/common/notification.py b/trove/common/notification.py
index be7c96bf..c23923a2 100644
--- a/trove/common/notification.py
+++ b/trove/common/notification.py
@@ -436,7 +436,7 @@ class DBaaSInstanceCreate(DBaaSAPINotification):
def required_start_traits(self):
return ['name', 'flavor_id', 'datastore', 'datastore_version',
- 'image_id', 'availability_zone']
+ 'image_id', 'availability_zone', 'region_name']
def optional_start_traits(self):
return ['databases', 'users', 'volume_size', 'restore_point',
@@ -564,6 +564,17 @@ class DBaaSClusterCreate(DBaaSAPINotification):
return ['cluster_id']
+class DBaaSClusterUpgrade(DBaaSAPINotification):
+
+ @abc.abstractmethod
+ def event_type(self):
+ return 'cluster_upgrade'
+
+ @abc.abstractmethod
+ def required_start_traits(self):
+ return ['cluster_id', 'datastore_version']
+
+
class DBaaSClusterDelete(DBaaSAPINotification):
@abc.abstractmethod
@@ -789,3 +800,14 @@ class DBaaSInstanceUpgrade(DBaaSAPINotification):
@abc.abstractmethod
def required_start_traits(self):
return ['instance_id', 'datastore_version_id']
+
+
+class DBaaSInstanceMigrate(DBaaSAPINotification):
+
+ @abc.abstractmethod
+ def event_type(self):
+ return 'migrate'
+
+ @abc.abstractmethod
+ def required_start_traits(self):
+ return ['host']
diff --git a/trove/common/policy.py b/trove/common/policy.py
new file mode 100644
index 00000000..9304f309
--- /dev/null
+++ b/trove/common/policy.py
@@ -0,0 +1,260 @@
+# Copyright 2016 Tesora Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from oslo_config import cfg
+from oslo_policy import policy
+
+from trove.common import exception as trove_exceptions
+
+CONF = cfg.CONF
+_ENFORCER = None
+
+
+base_rules = [
+ policy.RuleDefault(
+ 'admin',
+ 'role:admin or is_admin:True',
+ description='Must be an administrator.'),
+ policy.RuleDefault(
+ 'admin_or_owner',
+ 'rule:admin or tenant:%(tenant)s',
+ description='Must be an administrator or owner of the object.'),
+ policy.RuleDefault(
+ 'default',
+ 'rule:admin_or_owner',
+ description='Must be an administrator or owner of the object.')
+]
+
+instance_rules = [
+ policy.RuleDefault(
+ 'instance:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:force_delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:show', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:update', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:edit', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:restart', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:resize_volume', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:resize_flavor', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:reset_status', 'rule:admin'),
+ policy.RuleDefault(
+ 'instance:promote_to_replica_source', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:eject_replica_source', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:configuration', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:guest_log_list', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:backups', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:module_list', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:module_apply', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:module_remove', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'instance:extension:root:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:root:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:root:index', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'instance:extension:user:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:user:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:user:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:user:show', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:user:update', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:user:update_all', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'instance:extension:user_access:update', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:user_access:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:user_access:index', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'instance:extension:database:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:database:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:database:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'instance:extension:database:show', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'cluster:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:force_delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:show', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:show_instance', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:action', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:reset-status', 'rule:admin'),
+
+ policy.RuleDefault(
+ 'cluster:extension:root:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:extension:root:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'cluster:extension:root:index', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'backup:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'backup:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'backup:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'backup:show', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'configuration:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration:show', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration:instances', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration:update', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration:edit', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'configuration-parameter:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration-parameter:show', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration-parameter:index_by_version', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'configuration-parameter:show_by_version', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'datastore:index', ''),
+ policy.RuleDefault(
+ 'datastore:show', ''),
+ policy.RuleDefault(
+ 'datastore:version_show', ''),
+ policy.RuleDefault(
+ 'datastore:version_show_by_uuid', ''),
+ policy.RuleDefault(
+ 'datastore:version_index', ''),
+ policy.RuleDefault(
+ 'datastore:list_associated_flavors', ''),
+ policy.RuleDefault(
+ 'datastore:list_associated_volume_types', ''),
+
+ policy.RuleDefault(
+ 'flavor:index', ''),
+ policy.RuleDefault(
+ 'flavor:show', ''),
+
+ policy.RuleDefault(
+ 'limits:index', 'rule:admin_or_owner'),
+
+ policy.RuleDefault(
+ 'module:create', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'module:delete', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'module:index', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'module:show', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'module:instances', 'rule:admin_or_owner'),
+ policy.RuleDefault(
+ 'module:update', 'rule:admin_or_owner'),
+]
+
+
+def get_enforcer():
+ global _ENFORCER
+ if not _ENFORCER:
+ _ENFORCER = policy.Enforcer(CONF)
+ _ENFORCER.register_defaults(base_rules)
+ _ENFORCER.register_defaults(instance_rules)
+ return _ENFORCER
+
+
+def authorize_on_tenant(context, rule):
+ return __authorize(context, rule, target=None)
+
+
+def authorize_on_target(context, rule, target):
+ if target:
+ return __authorize(context, rule, target=target)
+ raise trove_exceptions.TroveError(
+ "BUG: Target must not evaluate to False.")
+
+
+def __authorize(context, rule, target=None):
+ """Checks authorization of a rule against the target in this context.
+
+ * This function is not to be called directly.
+ Calling the function with a target that evaluates to None may
+ result in policy bypass.
+ Use 'authorize_on_*' calls instead.
+
+ :param context Trove context.
+ :type context Context.
+
+ :param rule: The rule to evaluate.
+ e.g. ``instance:create_instance``,
+ ``instance:resize_volume``
+
+ :param target As much information about the object being operated on
+ as possible.
+ For object creation (target=None) this should be a
+ dictionary representing the location of the object
+ e.g. ``{'project_id': context.project_id}``
+ :type target dict
+
+ :raises: :class:`PolicyNotAuthorized` if verification fails.
+
+ """
+ target = target or {'tenant': context.tenant}
+ return get_enforcer().authorize(
+ rule, target, context.to_dict(), do_raise=True,
+ exc=trove_exceptions.PolicyNotAuthorized, action=rule)
diff --git a/trove/common/remote.py b/trove/common/remote.py
index 70867bba..76b9335a 100644
--- a/trove/common/remote.py
+++ b/trove/common/remote.py
@@ -87,7 +87,7 @@ def guest_client(context, id, manager=None):
return clazz(context, id)
-def nova_client(context):
+def nova_client(context, region_name=None):
if CONF.nova_compute_url:
url = '%(nova_url)s%(tenant)s' % {
'nova_url': normalize_url(CONF.nova_compute_url),
@@ -95,7 +95,7 @@ def nova_client(context):
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.nova_compute_service_type,
- endpoint_region=CONF.os_region_name,
+ endpoint_region=region_name or CONF.os_region_name,
endpoint_type=CONF.nova_compute_endpoint_type)
client = Client(CONF.nova_client_version, context.user, context.auth_token,
@@ -116,7 +116,7 @@ def create_admin_nova_client(context):
return client
-def cinder_client(context):
+def cinder_client(context, region_name=None):
if CONF.cinder_url:
url = '%(cinder_url)s%(tenant)s' % {
'cinder_url': normalize_url(CONF.cinder_url),
@@ -124,7 +124,7 @@ def cinder_client(context):
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.cinder_service_type,
- endpoint_region=CONF.os_region_name,
+ endpoint_region=region_name or CONF.os_region_name,
endpoint_type=CONF.cinder_endpoint_type)
client = CinderClient.Client(context.user, context.auth_token,
@@ -135,7 +135,7 @@ def cinder_client(context):
return client
-def heat_client(context):
+def heat_client(context, region_name=None):
if CONF.heat_url:
url = '%(heat_url)s%(tenant)s' % {
'heat_url': normalize_url(CONF.heat_url),
@@ -143,7 +143,7 @@ def heat_client(context):
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.heat_service_type,
- endpoint_region=CONF.os_region_name,
+ endpoint_region=region_name or CONF.os_region_name,
endpoint_type=CONF.heat_endpoint_type)
client = HeatClient.Client(token=context.auth_token,
@@ -152,7 +152,7 @@ def heat_client(context):
return client
-def swift_client(context):
+def swift_client(context, region_name=None):
if CONF.swift_url:
# swift_url has a different format so doesn't need to be normalized
url = '%(swift_url)s%(tenant)s' % {'swift_url': CONF.swift_url,
@@ -160,7 +160,7 @@ def swift_client(context):
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.swift_service_type,
- endpoint_region=CONF.os_region_name,
+ endpoint_region=region_name or CONF.os_region_name,
endpoint_type=CONF.swift_endpoint_type)
client = Connection(preauthurl=url,
@@ -170,7 +170,7 @@ def swift_client(context):
return client
-def neutron_client(context):
+def neutron_client(context, region_name=None):
from neutronclient.v2_0 import client as NeutronClient
if CONF.neutron_url:
# neutron endpoint url / publicURL does not include tenant segment
@@ -178,7 +178,7 @@ def neutron_client(context):
else:
url = get_endpoint(context.service_catalog,
service_type=CONF.neutron_service_type,
- endpoint_region=CONF.os_region_name,
+ endpoint_region=region_name or CONF.os_region_name,
endpoint_type=CONF.neutron_endpoint_type)
client = NeutronClient.Client(token=context.auth_token,
diff --git a/trove/common/single_tenant_remote.py b/trove/common/single_tenant_remote.py
index ede29306..87f93756 100644
--- a/trove/common/single_tenant_remote.py
+++ b/trove/common/single_tenant_remote.py
@@ -52,7 +52,7 @@ remote_neutron_client = \
PROXY_AUTH_URL = CONF.trove_auth_url
-def nova_client_trove_admin(context=None):
+def nova_client_trove_admin(context, region_name=None, compute_url=None):
"""
Returns a nova client object with the trove admin credentials
:param context: original context from user request
@@ -60,16 +60,19 @@ def nova_client_trove_admin(context=None):
:return novaclient: novaclient with trove admin credentials
:rtype: novaclient.v1_1.client.Client
"""
+
+ compute_url = compute_url or CONF.nova_compute_url
+
client = NovaClient(CONF.nova_proxy_admin_user,
CONF.nova_proxy_admin_pass,
CONF.nova_proxy_admin_tenant_name,
auth_url=PROXY_AUTH_URL,
service_type=CONF.nova_compute_service_type,
- region_name=CONF.os_region_name)
+ region_name=region_name or CONF.os_region_name)
- if CONF.nova_compute_url and CONF.nova_proxy_admin_tenant_id:
+ if compute_url and CONF.nova_proxy_admin_tenant_id:
client.client.management_url = "%s/%s/" % (
- normalize_url(CONF.nova_compute_url),
+ normalize_url(compute_url),
CONF.nova_proxy_admin_tenant_id)
return client
diff --git a/trove/common/strategies/cluster/experimental/cassandra/api.py b/trove/common/strategies/cluster/experimental/cassandra/api.py
index 3a7cdfb8..41c3f229 100644
--- a/trove/common/strategies/cluster/experimental/cassandra/api.py
+++ b/trove/common/strategies/cluster/experimental/cassandra/api.py
@@ -155,8 +155,9 @@ class CassandraCluster(models.Cluster):
availability_zone=instance_az,
configuration_id=None,
cluster_config=member_config,
+ modules=instance.get('modules'),
locality=locality,
- modules=instance.get('modules'))
+ region_name=instance.get('region_name'))
new_instances.append(new_instance)
diff --git a/trove/common/strategies/cluster/experimental/cassandra/guestagent.py b/trove/common/strategies/cluster/experimental/cassandra/guestagent.py
index 6bdd8bc2..c8b89482 100644
--- a/trove/common/strategies/cluster/experimental/cassandra/guestagent.py
+++ b/trove/common/strategies/cluster/experimental/cassandra/guestagent.py
@@ -30,65 +30,100 @@ class CassandraGuestAgentStrategy(base.BaseGuestAgentStrategy):
class CassandraGuestAgentAPI(guest_api.API):
+ """Cluster Specific Datastore Guest API
+
+ **** VERSION CONTROLLED API ****
+
+ The methods in this class are subject to version control as
+ coordinated by guestagent/api.py. Whenever a change is made to
+ any API method in this class, add a version number and comment
+ to the top of guestagent/api.py and use the version number as
+ appropriate in this file
+ """
def get_data_center(self):
LOG.debug("Retrieving the data center for node: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_data_center", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def get_rack(self):
LOG.debug("Retrieving the rack for node: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_rack", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def set_seeds(self, seeds):
LOG.debug("Configuring the gossip seeds for node: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("set_seeds", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap, seeds=seeds)
+ version=version, seeds=seeds)
def get_seeds(self):
LOG.debug("Retrieving the gossip seeds for node: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_seeds", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def set_auto_bootstrap(self, enabled):
LOG.debug("Setting the auto-bootstrap to '%s' for node: %s"
% (enabled, self.id))
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("set_auto_bootstrap", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap, enabled=enabled)
+ version=version, enabled=enabled)
def cluster_complete(self):
LOG.debug("Sending a setup completion notification for node: %s"
% self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
def node_cleanup_begin(self):
LOG.debug("Signaling the node to prepare for cleanup: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("node_cleanup_begin", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def node_cleanup(self):
LOG.debug("Running cleanup on node: %s" % self.id)
- return self._cast('node_cleanup', self.version_cap)
+ version = guest_api.API.API_BASE_VERSION
+
+ return self._cast('node_cleanup', version=version)
def node_decommission(self):
LOG.debug("Decommission node: %s" % self.id)
- return self._cast("node_decommission", self.version_cap)
+ version = guest_api.API.API_BASE_VERSION
+
+ return self._cast("node_decommission", version=version)
def cluster_secure(self, password):
LOG.debug("Securing the cluster via node: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call(
"cluster_secure", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap, password=password)
+ version=version, password=password)
def get_admin_credentials(self):
LOG.debug("Retrieving the admin credentials from node: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_admin_credentials", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def store_admin_credentials(self, admin_credentials):
LOG.debug("Storing the admin credentials on node: %s" % self.id)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("store_admin_credentials",
- guest_api.AGENT_LOW_TIMEOUT, self.version_cap,
+ guest_api.AGENT_LOW_TIMEOUT,
+ version=version,
admin_credentials=admin_credentials)
diff --git a/trove/common/strategies/cluster/experimental/galera_common/api.py b/trove/common/strategies/cluster/experimental/galera_common/api.py
index 1c0f6b71..edaf9a38 100644
--- a/trove/common/strategies/cluster/experimental/galera_common/api.py
+++ b/trove/common/strategies/cluster/experimental/galera_common/api.py
@@ -120,8 +120,9 @@ class GaleraCommonCluster(cluster_models.Cluster):
nics=instance.get('nics', None),
configuration_id=None,
cluster_config=member_config,
+ modules=instance.get('modules'),
locality=locality,
- modules=instance.get('modules')
+ region_name=instance.get('region_name')
)
for instance in instances]
@@ -146,14 +147,6 @@ class GaleraCommonCluster(cluster_models.Cluster):
return cls(context, db_info, datastore, datastore_version)
- def _get_cluster_network_interfaces(self):
- nova_client = remote.create_nova_client(self.context)
- nova_instance_id = self.db_instances[0].compute_instance_id
- interfaces = nova_client.virtual_interfaces.list(nova_instance_id)
- ret = [{"net-id": getattr(interface, 'net_id')}
- for interface in interfaces]
- return ret
-
def grow(self, instances):
LOG.debug("Growing cluster %s." % self.id)
@@ -166,11 +159,6 @@ class GaleraCommonCluster(cluster_models.Cluster):
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
try:
- # Get the network of the existing cluster instances.
- interface_ids = self._get_cluster_network_interfaces()
- for instance in instances:
- instance["nics"] = interface_ids
-
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
new_instances = self._create_instances(
context, db_info, datastore, datastore_version, instances,
@@ -180,6 +168,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
db_info.id, [instance.id for instance in new_instances])
except Exception:
db_info.update(task_status=ClusterTasks.NONE)
+ raise
return self.__class__(context, db_info,
datastore, datastore_version)
@@ -203,6 +192,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
for instance in removal_instances])
except Exception:
self.db_info.update(task_status=ClusterTasks.NONE)
+ raise
return self.__class__(self.context, self.db_info,
self.ds, self.ds_version)
diff --git a/trove/common/strategies/cluster/experimental/galera_common/guestagent.py b/trove/common/strategies/cluster/experimental/galera_common/guestagent.py
index 7510e29a..1161c38d 100644
--- a/trove/common/strategies/cluster/experimental/galera_common/guestagent.py
+++ b/trove/common/strategies/cluster/experimental/galera_common/guestagent.py
@@ -31,39 +31,59 @@ class GaleraCommonGuestAgentStrategy(cluster_base.BaseGuestAgentStrategy):
class GaleraCommonGuestAgentAPI(guest_api.API):
+ """Cluster Specific Datastore Guest API
+
+ **** VERSION CONTROLLED API ****
+
+ The methods in this class are subject to version control as
+ coordinated by guestagent/api.py. Whenever a change is made to
+ any API method in this class, add a version number and comment
+ to the top of guestagent/api.py and use the version number as
+ appropriate in this file
+ """
def install_cluster(self, replication_user, cluster_configuration,
bootstrap):
"""Install the cluster."""
LOG.debug("Installing Galera cluster.")
+ version = guest_api.API.API_BASE_VERSION
+
self._call("install_cluster", CONF.cluster_usage_timeout,
- self.version_cap,
+ version=version,
replication_user=replication_user,
cluster_configuration=cluster_configuration,
bootstrap=bootstrap)
def reset_admin_password(self, admin_password):
"""Store this password on the instance as the admin password."""
+ version = guest_api.API.API_BASE_VERSION
+
self._call("reset_admin_password", CONF.cluster_usage_timeout,
- self.version_cap,
+ version=version,
admin_password=admin_password)
def cluster_complete(self):
"""Set the status that the cluster is build is complete."""
LOG.debug("Notifying cluster install completion.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
def get_cluster_context(self):
"""Get the context of the cluster."""
LOG.debug("Getting the cluster context.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_cluster_context", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
def write_cluster_configuration_overrides(self, cluster_configuration):
"""Write an updated the cluster configuration."""
LOG.debug("Writing an updated the cluster configuration.")
+ version = guest_api.API.API_BASE_VERSION
+
self._call("write_cluster_configuration_overrides",
guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap,
+ version=version,
cluster_configuration=cluster_configuration)
diff --git a/trove/common/strategies/cluster/experimental/mongodb/api.py b/trove/common/strategies/cluster/experimental/mongodb/api.py
index a1ffba47..9c4ea4f7 100644
--- a/trove/common/strategies/cluster/experimental/mongodb/api.py
+++ b/trove/common/strategies/cluster/experimental/mongodb/api.py
@@ -93,6 +93,9 @@ class MongoDbCluster(models.Cluster):
azs = [instance.get('availability_zone', None)
for instance in instances]
+ regions = [instance.get('region_name', None)
+ for instance in instances]
+
db_info = models.DBCluster.create(
name=name, tenant_id=context.tenant,
datastore_version_id=datastore_version.id,
@@ -129,8 +132,9 @@ class MongoDbCluster(models.Cluster):
nics=nics[i],
configuration_id=None,
cluster_config=member_config,
+ modules=instances[i].get('modules'),
locality=locality,
- modules=instances[i].get('modules'))
+ region_name=regions[i])
for i in range(1, num_configsvr + 1):
instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
@@ -144,7 +148,8 @@ class MongoDbCluster(models.Cluster):
nics=None,
configuration_id=None,
cluster_config=configsvr_config,
- locality=locality)
+ locality=locality,
+ region_name=regions[i])
for i in range(1, num_mongos + 1):
instance_name = "%s-%s-%s" % (name, "mongos", str(i))
@@ -158,7 +163,8 @@ class MongoDbCluster(models.Cluster):
nics=None,
configuration_id=None,
cluster_config=mongos_config,
- locality=locality)
+ locality=locality,
+ region_name=regions[i])
task_api.load(context, datastore_version.manager).create_cluster(
db_info.id)
@@ -193,6 +199,8 @@ class MongoDbCluster(models.Cluster):
'query_router'])
name = _check_option('name')
related_to = _check_option('related_to')
+ nics = _check_option('nics')
+ availability_zone = _check_option('availability_zone')
unused_keys = list(set(item.keys()).difference(set(used_keys)))
if unused_keys:
@@ -208,6 +216,10 @@ class MongoDbCluster(models.Cluster):
instance['name'] = name
if related_to:
instance['related_to'] = related_to
+ if nics:
+ instance['nics'] = nics
+ if availability_zone:
+ instance['availability_zone'] = availability_zone
return instance
def action(self, context, req, action, param):
diff --git a/trove/common/strategies/cluster/experimental/mongodb/guestagent.py b/trove/common/strategies/cluster/experimental/mongodb/guestagent.py
index 1566f34f..08e1a8a9 100644
--- a/trove/common/strategies/cluster/experimental/mongodb/guestagent.py
+++ b/trove/common/strategies/cluster/experimental/mongodb/guestagent.py
@@ -33,6 +33,16 @@ class MongoDbGuestAgentStrategy(base.BaseGuestAgentStrategy):
class MongoDbGuestAgentAPI(guest_api.API):
+ """Cluster Specific Datastore Guest API
+
+ **** VERSION CONTROLLED API ****
+
+ The methods in this class are subject to version control as
+ coordinated by guestagent/api.py. Whenever a change is made to
+ any API method in this class, add a version number and comment
+ to the top of guestagent/api.py and use the version number as
+ appropriate in this file
+ """
def add_shard(self, replica_set_name, replica_set_member):
LOG.debug("Adding shard with replSet %(replica_set_name)s and member "
@@ -40,66 +50,89 @@ class MongoDbGuestAgentAPI(guest_api.API):
"%(id)s" % {'replica_set_name': replica_set_name,
'replica_set_member': replica_set_member,
'id': self.id})
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("add_shard", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap,
+ version=version,
replica_set_name=replica_set_name,
replica_set_member=replica_set_member)
def add_members(self, members):
LOG.debug("Adding members %(members)s on instance %(id)s" % {
'members': members, 'id': self.id})
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("add_members", ADD_MEMBERS_TIMEOUT,
- self.version_cap, members=members)
+ version=version, members=members)
def add_config_servers(self, config_servers):
LOG.debug("Adding config servers %(config_servers)s for instance "
"%(id)s" % {'config_servers': config_servers,
'id': self.id})
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("add_config_servers", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap, config_servers=config_servers)
+ version=version,
+ config_servers=config_servers)
def cluster_complete(self):
LOG.debug("Notify regarding cluster install completion")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
def get_key(self):
LOG.debug("Requesting cluster key from guest")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_key", guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def prep_primary(self):
LOG.debug("Preparing member to be primary member.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("prep_primary", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
def create_admin_user(self, password):
LOG.debug("Creating admin user")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("create_admin_user", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap, password=password)
+ version=version, password=password)
def store_admin_password(self, password):
LOG.debug("Storing admin password")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("store_admin_password",
guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap,
+ version=version,
password=password)
def get_replica_set_name(self):
LOG.debug("Querying member for its replica set name")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_replica_set_name",
guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
def get_admin_password(self):
LOG.debug("Querying instance for its admin password")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_admin_password",
guest_api.AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def is_shard_active(self, replica_set_name):
LOG.debug("Checking if replica set %s is active" % replica_set_name)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("is_shard_active",
guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap,
+ version=version,
replica_set_name=replica_set_name)
diff --git a/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py b/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py
index 66273d7c..c4a3c1f9 100644
--- a/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/mongodb/taskmanager.py
@@ -379,7 +379,8 @@ class MongoDbTaskManagerAPI(task_api.API):
def mongodb_add_shard_cluster(self, cluster_id, shard_id,
replica_set_name):
LOG.debug("Making async call to add shard cluster %s " % cluster_id)
- cctxt = self.client.prepare(version=self.version_cap)
+ version = task_api.API.API_BASE_VERSION
+ cctxt = self.client.prepare(version=version)
cctxt.cast(self.context,
"add_shard_cluster",
cluster_id=cluster_id,
diff --git a/trove/common/strategies/cluster/experimental/redis/api.py b/trove/common/strategies/cluster/experimental/redis/api.py
index 3799f932..28186c3f 100644
--- a/trove/common/strategies/cluster/experimental/redis/api.py
+++ b/trove/common/strategies/cluster/experimental/redis/api.py
@@ -88,8 +88,10 @@ class RedisCluster(models.Cluster):
cluster_config={
"id": db_info.id,
"instance_type": "member"},
+ modules=instance.get('modules'),
locality=locality,
- modules=instance.get('modules')
+ region_name=instance.get(
+ 'region_name')
)
for instance in instances]
diff --git a/trove/common/strategies/cluster/experimental/redis/guestagent.py b/trove/common/strategies/cluster/experimental/redis/guestagent.py
index d8ec3f3b..1f8f2c4d 100644
--- a/trove/common/strategies/cluster/experimental/redis/guestagent.py
+++ b/trove/common/strategies/cluster/experimental/redis/guestagent.py
@@ -28,34 +28,59 @@ class RedisGuestAgentStrategy(base.BaseGuestAgentStrategy):
class RedisGuestAgentAPI(guest_api.API):
+ """Cluster Specific Datastore Guest API
+
+ **** VERSION CONTROLLED API ****
+
+ The methods in this class are subject to version control as
+ coordinated by guestagent/api.py. Whenever a change is made to
+ any API method in this class, add a version number and comment
+ to the top of guestagent/api.py and use the version number as
+ appropriate in this file
+ """
def get_node_ip(self):
LOG.debug("Retrieve ip info from node.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_node_ip",
- guest_api.AGENT_HIGH_TIMEOUT, self.version_cap)
+ guest_api.AGENT_HIGH_TIMEOUT,
+ version=version)
def get_node_id_for_removal(self):
LOG.debug("Validating cluster node removal.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_node_id_for_removal",
- guest_api.AGENT_HIGH_TIMEOUT, self.version_cap)
+ guest_api.AGENT_HIGH_TIMEOUT,
+ version=version)
def remove_nodes(self, node_ids):
LOG.debug("Removing nodes from cluster.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("remove_nodes", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap, node_ids=node_ids)
+ version=version, node_ids=node_ids)
def cluster_meet(self, ip, port):
LOG.debug("Joining node to cluster.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("cluster_meet", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap, ip=ip, port=port)
+ version=version, ip=ip, port=port)
def cluster_addslots(self, first_slot, last_slot):
LOG.debug("Adding slots %s-%s to cluster.", first_slot, last_slot)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("cluster_addslots",
- guest_api.AGENT_HIGH_TIMEOUT, self.version_cap,
+ guest_api.AGENT_HIGH_TIMEOUT,
+ version=version,
first_slot=first_slot, last_slot=last_slot)
def cluster_complete(self):
LOG.debug("Notifying cluster install completion.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
diff --git a/trove/common/strategies/cluster/experimental/vertica/api.py b/trove/common/strategies/cluster/experimental/vertica/api.py
index 67cce94f..a5f7de23 100644
--- a/trove/common/strategies/cluster/experimental/vertica/api.py
+++ b/trove/common/strategies/cluster/experimental/vertica/api.py
@@ -103,6 +103,9 @@ class VerticaCluster(models.Cluster):
azs = [instance.get('availability_zone', None)
for instance in instances]
+ regions = [instance.get('region_name', None)
+ for instance in instances]
+
# Creating member instances
minstances = []
for i in range(0, num_instances):
@@ -119,7 +122,8 @@ class VerticaCluster(models.Cluster):
datastore_version, volume_size, None,
nics=nics[i], availability_zone=azs[i],
configuration_id=None, cluster_config=member_config,
- locality=locality, modules=instances[i].get('modules'))
+ modules=instances[i].get('modules'), locality=locality,
+ region_name=regions[i])
)
return minstances
diff --git a/trove/common/strategies/cluster/experimental/vertica/guestagent.py b/trove/common/strategies/cluster/experimental/vertica/guestagent.py
index 33c75ff5..ef6354b4 100644
--- a/trove/common/strategies/cluster/experimental/vertica/guestagent.py
+++ b/trove/common/strategies/cluster/experimental/vertica/guestagent.py
@@ -30,39 +30,64 @@ class VerticaGuestAgentStrategy(base.BaseGuestAgentStrategy):
class VerticaGuestAgentAPI(guest_api.API):
+ """Cluster Specific Datastore Guest API
+
+ **** VERSION CONTROLLED API ****
+
+ The methods in this class are subject to version control as
+ coordinated by guestagent/api.py. Whenever a change is made to
+ any API method in this class, add a version number and comment
+ to the top of guestagent/api.py and use the version number as
+ appropriate in this file
+ """
def get_public_keys(self, user):
LOG.debug("Getting public keys for user: %s." % user)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("get_public_keys", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap, user=user)
+ version=version, user=user)
def authorize_public_keys(self, user, public_keys):
LOG.debug("Authorizing public keys for user: %s." % user)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("authorize_public_keys",
- guest_api.AGENT_HIGH_TIMEOUT, self.version_cap,
+ guest_api.AGENT_HIGH_TIMEOUT,
+ version=version,
user=user, public_keys=public_keys)
def install_cluster(self, members):
LOG.debug("Installing Vertica cluster on members: %s." % members)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("install_cluster", CONF.cluster_usage_timeout,
- self.version_cap, members=members)
+ version=version, members=members)
def grow_cluster(self, members):
LOG.debug("Growing Vertica cluster with members: %s." % members)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("grow_cluster", CONF.cluster_usage_timeout,
- self.version_cap, members=members)
+ version=version, members=members)
def shrink_cluster(self, members):
LOG.debug("Shrinking Vertica cluster with members: %s." % members)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("shrink_cluster", CONF.cluster_usage_timeout,
- self.version_cap, members=members)
+ version=version, members=members)
def mark_design_ksafe(self, k):
LOG.debug("Setting vertica k-safety level to : %s." % k)
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("mark_design_ksafe", CONF.cluster_usage_timeout,
- self.version_cap, k=k)
+ version=version, k=k)
def cluster_complete(self):
LOG.debug("Notifying cluster install completion.")
+ version = guest_api.API.API_BASE_VERSION
+
return self._call("cluster_complete", guest_api.AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
diff --git a/trove/common/trove_remote.py b/trove/common/trove_remote.py
new file mode 100644
index 00000000..01b4141e
--- /dev/null
+++ b/trove/common/trove_remote.py
@@ -0,0 +1,56 @@
+# Copyright 2016 Tesora Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils.importutils import import_class
+
+from trove.common import cfg
+from trove.common.remote import get_endpoint
+from trove.common.remote import normalize_url
+
+from troveclient.v1 import client as TroveClient
+
+CONF = cfg.CONF
+
+PROXY_AUTH_URL = CONF.trove_auth_url
+
+
+"""
+NOTE(mwj, Apr 2016):
+This module is separated from remote.py because remote.py is used
+on the Trove guest, but the trove client is not installed on the guest,
+so the imports here would fail.
+"""
+
+
+def trove_client(context, region_name=None):
+ if CONF.trove_url:
+ url = '%(url)s%(tenant)s' % {
+ 'url': normalize_url(CONF.trove_url),
+ 'tenant': context.tenant}
+ else:
+ url = get_endpoint(context.service_catalog,
+ service_type=CONF.trove_service_type,
+ endpoint_region=region_name or CONF.os_region_name,
+ endpoint_type=CONF.trove_endpoint_type)
+
+ client = TroveClient.Client(context.user, context.auth_token,
+ project_id=context.tenant,
+ auth_url=PROXY_AUTH_URL)
+ client.client.auth_token = context.auth_token
+ client.client.management_url = url
+ return client
+
+
+create_trove_client = import_class(CONF.remote_trove_client)
diff --git a/trove/common/wsgi.py b/trove/common/wsgi.py
index bfec6c00..c087f6c8 100644
--- a/trove/common/wsgi.py
+++ b/trove/common/wsgi.py
@@ -322,6 +322,8 @@ class Controller(object):
exception.BackupTooLarge,
exception.ModuleAccessForbidden,
exception.ModuleAppliedToInstance,
+ exception.PolicyNotAuthorized,
+ exception.LogAccessForbidden,
],
webob.exc.HTTPBadRequest: [
exception.InvalidModelError,
@@ -548,7 +550,8 @@ class ContextMiddleware(base_wsgi.Middleware):
is_admin=is_admin,
limit=limits.get('limit'),
marker=limits.get('marker'),
- service_catalog=service_catalog)
+ service_catalog=service_catalog,
+ roles=roles)
request.environ[CONTEXT_KEY] = context
@classmethod
diff --git a/trove/conductor/api.py b/trove/conductor/api.py
index d83aef5c..757416b2 100644
--- a/trove/conductor/api.py
+++ b/trove/conductor/api.py
@@ -16,28 +16,50 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from trove.common import cfg
-from trove.common.rpc import version as rpc_version
from trove.common.serializable_notification import SerializableNotification
from trove import rpc
-
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class API(object):
- """API for interacting with trove conductor."""
+ """API for interacting with trove conductor.
+
+ API version history:
+ * 1.0 - Initial version.
+
+ When updating this API, also update API_LATEST_VERSION
+ """
+
+ # API_LATEST_VERSION should bump the minor number each time
+ # a method signature is added or changed
+ API_LATEST_VERSION = '1.0'
+
+ # API_BASE_VERSION should only change on major version upgrade
+ API_BASE_VERSION = '1.0'
+
+ VERSION_ALIASES = {
+ 'icehouse': '1.0',
+ 'juno': '1.0',
+ 'kilo': '1.0',
+ 'liberty': '1.0',
+ 'mitaka': '1.0',
+ 'newton': '1.0',
+
+ 'latest': API_LATEST_VERSION
+ }
def __init__(self, context):
self.context = context
super(API, self).__init__()
+ version_cap = self.VERSION_ALIASES.get(
+ CONF.upgrade_levels.conductor, CONF.upgrade_levels.conductor)
target = messaging.Target(topic=CONF.conductor_queue,
- version=rpc_version.RPC_API_VERSION)
+ version=version_cap)
- self.version_cap = rpc_version.VERSION_ALIASES.get(
- CONF.upgrade_levels.conductor)
- self.client = self.get_client(target, self.version_cap)
+ self.client = self.get_client(target, version_cap)
def get_client(self, target, version_cap, serializer=None):
return rpc.get_client(target,
@@ -47,8 +69,9 @@ class API(object):
def heartbeat(self, instance_id, payload, sent=None):
LOG.debug("Making async call to cast heartbeat for instance: %s"
% instance_id)
+ version = self.API_BASE_VERSION
- cctxt = self.client.prepare(version=self.version_cap)
+ cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, "heartbeat",
instance_id=instance_id,
sent=sent,
@@ -58,8 +81,9 @@ class API(object):
**backup_fields):
LOG.debug("Making async call to cast update_backup for instance: %s"
% instance_id)
+ version = self.API_BASE_VERSION
- cctxt = self.client.prepare(version=self.version_cap)
+ cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, "update_backup",
instance_id=instance_id,
backup_id=backup_id,
@@ -69,14 +93,16 @@ class API(object):
def report_root(self, instance_id, user):
LOG.debug("Making async call to cast report_root for instance: %s"
% instance_id)
- cctxt = self.client.prepare(version=self.version_cap)
+ version = self.API_BASE_VERSION
+ cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, "report_root",
instance_id=instance_id,
user=user)
def notify_end(self, **notification_args):
LOG.debug("Making async call to cast end notification")
- cctxt = self.client.prepare(version=self.version_cap)
+ version = self.API_BASE_VERSION
+ cctxt = self.client.prepare(version=version)
context = self.context
serialized = SerializableNotification.serialize(context,
context.notification)
@@ -86,7 +112,8 @@ class API(object):
def notify_exc_info(self, message, exception):
LOG.debug("Making async call to cast error notification")
- cctxt = self.client.prepare(version=self.version_cap)
+ version = self.API_BASE_VERSION
+ cctxt = self.client.prepare(version=version)
context = self.context
serialized = SerializableNotification.serialize(context,
context.notification)
diff --git a/trove/configuration/service.py b/trove/configuration/service.py
index 19dd51c3..485e6e17 100644
--- a/trove/configuration/service.py
+++ b/trove/configuration/service.py
@@ -25,6 +25,7 @@ from trove.common.i18n import _
from trove.common import notification
from trove.common.notification import StartNotification, EndNotification
from trove.common import pagination
+from trove.common import policy
from trove.common import wsgi
from trove.configuration import models
from trove.configuration.models import DBConfigurationParameter
@@ -41,9 +42,16 @@ class ConfigurationsController(wsgi.Controller):
schemas = apischema.configuration
+ @classmethod
+ def authorize_config_action(cls, context, config_rule_name, config):
+ policy.authorize_on_target(
+ context, 'configuration:%s' % config_rule_name,
+ {'tenant': config.tenant_id})
+
def index(self, req, tenant_id):
context = req.environ[wsgi.CONTEXT_KEY]
configs, marker = models.Configurations.load(context)
+ policy.authorize_on_tenant(context, 'configuration:index')
view = views.ConfigurationsView(configs)
paged = pagination.SimplePaginatedDataView(req.url, 'configurations',
view, marker)
@@ -54,6 +62,7 @@ class ConfigurationsController(wsgi.Controller):
% {"tenant": tenant_id, "id": id})
context = req.environ[wsgi.CONTEXT_KEY]
configuration = models.Configuration.load(context, id)
+ self.authorize_config_action(context, 'show', configuration)
configuration_items = models.Configuration.load_items(context, id)
configuration.instance_count = instances_models.DBInstance.find_all(
@@ -68,6 +77,7 @@ class ConfigurationsController(wsgi.Controller):
def instances(self, req, tenant_id, id):
context = req.environ[wsgi.CONTEXT_KEY]
configuration = models.Configuration.load(context, id)
+ self.authorize_config_action(context, 'instances', configuration)
instances = instances_models.DBInstance.find_all(
tenant_id=context.tenant,
configuration_id=configuration.id,
@@ -89,6 +99,7 @@ class ConfigurationsController(wsgi.Controller):
LOG.debug("body : '%s'\n\n" % req)
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'configuration:create')
context.notification = notification.DBaaSConfigurationCreate(
context, request=req)
name = body['configuration']['name']
@@ -137,10 +148,11 @@ class ConfigurationsController(wsgi.Controller):
LOG.info(msg % {"tenant_id": tenant_id, "cfg_id": id})
context = req.environ[wsgi.CONTEXT_KEY]
+ group = models.Configuration.load(context, id)
+ self.authorize_config_action(context, 'delete', group)
context.notification = notification.DBaaSConfigurationDelete(
context, request=req)
with StartNotification(context, configuration_id=id):
- group = models.Configuration.load(context, id)
instances = instances_models.DBInstance.find_all(
tenant_id=context.tenant,
configuration_id=id,
@@ -157,6 +169,15 @@ class ConfigurationsController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
group = models.Configuration.load(context, id)
+ # Note that changing the configuration group will also
+ # indirectly affect all the instances which attach it.
+ #
+ # The Trove instance itself won't be changed (the same group is still
+ # attached) but the configuration values will.
+ #
+ # The operator needs to keep this in mind when defining the related
+ # policies.
+ self.authorize_config_action(context, 'update', group)
# if name/description are provided in the request body, update the
# model with these values as well.
@@ -181,10 +202,11 @@ class ConfigurationsController(wsgi.Controller):
def edit(self, req, body, tenant_id, id):
context = req.environ[wsgi.CONTEXT_KEY]
+ group = models.Configuration.load(context, id)
+ self.authorize_config_action(context, 'edit', group)
context.notification = notification.DBaaSConfigurationEdit(
context, request=req)
with StartNotification(context, configuration_id=id):
- group = models.Configuration.load(context, id)
items = self._configuration_items_list(group,
body['configuration'])
models.Configuration.save(group, items)
@@ -329,7 +351,18 @@ class ConfigurationsController(wsgi.Controller):
class ParametersController(wsgi.Controller):
+ @classmethod
+ def authorize_request(cls, req, rule_name):
+ """Parameters (configuration templates) bind to a datastore.
+ Datastores are not owned by any particular tenant so we only check
+ the current tenant is allowed to perform the action.
+ """
+ context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'configuration-parameter:%s'
+ % rule_name)
+
def index(self, req, tenant_id, datastore, id):
+ self.authorize_request(req, 'index')
ds, ds_version = ds_models.get_datastore_version(
type=datastore, version=id)
rules = models.DatastoreConfigurationParameters.load_parameters(
@@ -338,6 +371,7 @@ class ParametersController(wsgi.Controller):
200)
def show(self, req, tenant_id, datastore, id, name):
+ self.authorize_request(req, 'show')
ds, ds_version = ds_models.get_datastore_version(
type=datastore, version=id)
rule = models.DatastoreConfigurationParameters.load_parameter_by_name(
@@ -345,6 +379,7 @@ class ParametersController(wsgi.Controller):
return wsgi.Result(views.ConfigurationParameterView(rule).data(), 200)
def index_by_version(self, req, tenant_id, version):
+ self.authorize_request(req, 'index_by_version')
ds_version = ds_models.DatastoreVersion.load_by_uuid(version)
rules = models.DatastoreConfigurationParameters.load_parameters(
ds_version.id)
@@ -352,6 +387,7 @@ class ParametersController(wsgi.Controller):
200)
def show_by_version(self, req, tenant_id, version, name):
+ self.authorize_request(req, 'show_by_version')
ds_models.DatastoreVersion.load_by_uuid(version)
rule = models.DatastoreConfigurationParameters.load_parameter_by_name(
version, name)
diff --git a/trove/datastore/service.py b/trove/datastore/service.py
index 6a04a1ce..0f69c029 100644
--- a/trove/datastore/service.py
+++ b/trove/datastore/service.py
@@ -16,6 +16,7 @@
# under the License.
#
+from trove.common import policy
from trove.common import wsgi
from trove.datastore import models, views
from trove.flavor import views as flavor_views
@@ -23,7 +24,16 @@ from trove.flavor import views as flavor_views
class DatastoreController(wsgi.Controller):
+ @classmethod
+ def authorize_request(cls, req, rule_name):
+ """Datastores are not owned by any particular tenant so we only check
+ the current tenant is allowed to perform the action.
+ """
+ context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'datastore:%s' % rule_name)
+
def show(self, req, tenant_id, id):
+ self.authorize_request(req, 'show')
datastore = models.Datastore.load(id)
datastore_versions = (models.DatastoreVersions.load(datastore.id))
return wsgi.Result(views.
@@ -31,6 +41,7 @@ class DatastoreController(wsgi.Controller):
req).data(), 200)
def index(self, req, tenant_id):
+ self.authorize_request(req, 'index')
context = req.environ[wsgi.CONTEXT_KEY]
only_active = True
if context.is_admin:
@@ -42,17 +53,20 @@ class DatastoreController(wsgi.Controller):
req).data(), 200)
def version_show(self, req, tenant_id, datastore, id):
+ self.authorize_request(req, 'version_show')
datastore = models.Datastore.load(datastore)
datastore_version = models.DatastoreVersion.load(datastore, id)
return wsgi.Result(views.DatastoreVersionView(datastore_version,
req).data(), 200)
def version_show_by_uuid(self, req, tenant_id, uuid):
+ self.authorize_request(req, 'version_show_by_uuid')
datastore_version = models.DatastoreVersion.load_by_uuid(uuid)
return wsgi.Result(views.DatastoreVersionView(datastore_version,
req).data(), 200)
def version_index(self, req, tenant_id, datastore):
+ self.authorize_request(req, 'version_index')
context = req.environ[wsgi.CONTEXT_KEY]
only_active = True
if context.is_admin:
@@ -70,6 +84,7 @@ class DatastoreController(wsgi.Controller):
one or more entries are found in datastore_version_metadata,
in which case only those are returned.
"""
+ self.authorize_request(req, 'list_associated_flavors')
context = req.environ[wsgi.CONTEXT_KEY]
flavors = (models.DatastoreVersionMetadata.
list_datastore_version_flavor_associations(
diff --git a/trove/db/sqlalchemy/migrate_repo/versions/039_region.py b/trove/db/sqlalchemy/migrate_repo/versions/039_region.py
new file mode 100644
index 00000000..eda38f56
--- /dev/null
+++ b/trove/db/sqlalchemy/migrate_repo/versions/039_region.py
@@ -0,0 +1,35 @@
+# Copyright 2016 Tesora Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import MetaData
+
+from trove.common import cfg
+from trove.db.sqlalchemy.migrate_repo.schema import String
+from trove.db.sqlalchemy.migrate_repo.schema import Table
+
+
+CONF = cfg.CONF
+logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema')
+
+meta = MetaData()
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True)
+ instances.create_column(Column('region_id', String(255)))
+ instances.update().values(region_id=CONF.os_region_name).execute()
diff --git a/trove/extensions/common/service.py b/trove/extensions/common/service.py
index 8120b48c..78669266 100644
--- a/trove/extensions/common/service.py
+++ b/trove/extensions/common/service.py
@@ -21,14 +21,17 @@ from oslo_log import log as logging
from oslo_utils import importutils
import six
+from trove.cluster import models as cluster_models
from trove.cluster.models import DBCluster
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _LI
+from trove.common import policy
from trove.common import wsgi
from trove.datastore import models as datastore_models
from trove.extensions.common import models
from trove.extensions.common import views
+from trove.instance import models as instance_models
from trove.instance.models import DBInstance
@@ -37,8 +40,30 @@ import_class = importutils.import_class
CONF = cfg.CONF
+class ExtensionController(wsgi.Controller):
+
+ @classmethod
+ def authorize_target_action(cls, context, target_rule_name,
+ target_id, is_cluster=False):
+ target = None
+ if is_cluster:
+ target = cluster_models.Cluster.load(context, target_id)
+ else:
+ target = instance_models.Instance.load(context, target_id)
+
+ if not target:
+ if is_cluster:
+ raise exception.ClusterNotFound(cluster=target_id)
+ raise exception.InstanceNotFound(instance=target_id)
+
+ target_type = 'cluster' if is_cluster else 'instance'
+ policy.authorize_on_target(
+ context, '%s:extension:%s' % (target_type, target_rule_name),
+ {'tenant': target.tenant_id})
+
+
@six.add_metaclass(abc.ABCMeta)
-class BaseDatastoreRootController(wsgi.Controller):
+class BaseDatastoreRootController(ExtensionController):
"""Base class that defines the contract for root controllers."""
@abc.abstractmethod
@@ -174,13 +199,16 @@ class ClusterRootController(DefaultRootController):
return single_instance_id, instance_ids
-class RootController(wsgi.Controller):
+class RootController(ExtensionController):
"""Controller for instance functionality."""
def index(self, req, tenant_id, instance_id):
"""Returns True if root is enabled; False otherwise."""
datastore_manager, is_cluster = self._get_datastore(tenant_id,
instance_id)
+ context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'root:index', instance_id,
+ is_cluster=is_cluster)
root_controller = self.load_root_controller(datastore_manager)
return root_controller.root_index(req, tenant_id, instance_id,
is_cluster)
@@ -189,6 +217,9 @@ class RootController(wsgi.Controller):
"""Enable the root user for the db instance."""
datastore_manager, is_cluster = self._get_datastore(tenant_id,
instance_id)
+ context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'root:create', instance_id,
+ is_cluster=is_cluster)
root_controller = self.load_root_controller(datastore_manager)
if root_controller is not None:
return root_controller.root_create(req, body, tenant_id,
@@ -199,6 +230,9 @@ class RootController(wsgi.Controller):
def delete(self, req, tenant_id, instance_id):
datastore_manager, is_cluster = self._get_datastore(tenant_id,
instance_id)
+ context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'root:delete', instance_id,
+ is_cluster=is_cluster)
root_controller = self.load_root_controller(datastore_manager)
if root_controller is not None:
return root_controller.root_delete(req, tenant_id,
diff --git a/trove/extensions/mgmt/instances/models.py b/trove/extensions/mgmt/instances/models.py
index c3912c15..3c0a5bbc 100644
--- a/trove/extensions/mgmt/instances/models.py
+++ b/trove/extensions/mgmt/instances/models.py
@@ -33,7 +33,7 @@ CONF = cfg.CONF
def load_mgmt_instances(context, deleted=None, client=None,
include_clustered=None):
if not client:
- client = remote.create_nova_client(context)
+ client = remote.create_nova_client(context, CONF.os_region_name)
try:
mgmt_servers = client.rdservers.list()
except AttributeError:
@@ -56,7 +56,7 @@ def load_mgmt_instance(cls, context, id, include_deleted):
try:
instance = load_instance(cls, context, id, needs_server=True,
include_deleted=include_deleted)
- client = remote.create_nova_client(context)
+ client = remote.create_nova_client(context, CONF.os_region_name)
try:
server = client.rdservers.get(instance.server_id)
except AttributeError:
@@ -169,7 +169,7 @@ def _load_servers(instances, find_server):
server = find_server(db.id, db.compute_instance_id)
instance.server = server
except Exception as ex:
- LOG.error(ex)
+ LOG.exception(ex)
return instances
diff --git a/trove/extensions/mgmt/instances/service.py b/trove/extensions/mgmt/instances/service.py
index 280cb20d..a1bd8c07 100644
--- a/trove/extensions/mgmt/instances/service.py
+++ b/trove/extensions/mgmt/instances/service.py
@@ -22,6 +22,8 @@ import trove.common.apischema as apischema
from trove.common.auth import admin_context
from trove.common import exception
from trove.common.i18n import _
+from trove.common import notification
+from trove.common.notification import StartNotification
from trove.common import wsgi
from trove.extensions.mgmt.instances import models
from trove.extensions.mgmt.instances import views
@@ -63,7 +65,7 @@ class MgmtInstanceController(InstanceController):
instances = models.load_mgmt_instances(
context, deleted=deleted, include_clustered=include_clustered)
except nova_exceptions.ClientException as e:
- LOG.error(e)
+ LOG.exception(e)
return wsgi.Result(str(e), 403)
view_cls = views.MgmtInstancesView
@@ -118,28 +120,32 @@ class MgmtInstanceController(InstanceController):
raise exception.BadRequest(msg)
if selected_action:
- return selected_action(context, instance, body)
+ return selected_action(context, instance, req, body)
else:
raise exception.BadRequest(_("Invalid request body."))
- def _action_stop(self, context, instance, body):
+ def _action_stop(self, context, instance, req, body):
LOG.debug("Stopping MySQL on instance %s." % instance.id)
instance.stop_db()
return wsgi.Result(None, 202)
- def _action_reboot(self, context, instance, body):
+ def _action_reboot(self, context, instance, req, body):
LOG.debug("Rebooting instance %s." % instance.id)
instance.reboot()
return wsgi.Result(None, 202)
- def _action_migrate(self, context, instance, body):
+ def _action_migrate(self, context, instance, req, body):
LOG.debug("Migrating instance %s." % instance.id)
LOG.debug("body['migrate']= %s" % body['migrate'])
host = body['migrate'].get('host', None)
- instance.migrate(host)
+
+ context.notification = notification.DBaaSInstanceMigrate(context,
+ request=req)
+ with StartNotification(context, host=host):
+ instance.migrate(host)
return wsgi.Result(None, 202)
- def _action_reset_task_status(self, context, instance, body):
+ def _action_reset_task_status(self, context, instance, req, body):
LOG.debug("Setting Task-Status to NONE on instance %s." %
instance.id)
instance.reset_task_status()
@@ -163,7 +169,7 @@ class MgmtInstanceController(InstanceController):
try:
instance_models.Instance.load(context=context, id=id)
except exception.TroveError as e:
- LOG.error(e)
+ LOG.exception(e)
return wsgi.Result(str(e), 404)
rhv = views.RootHistoryView(id)
reh = mysql_models.RootHistory.load(context=context, instance_id=id)
diff --git a/trove/extensions/mgmt/volume/models.py b/trove/extensions/mgmt/volume/models.py
index 0f8b4eb8..a7abc90a 100644
--- a/trove/extensions/mgmt/volume/models.py
+++ b/trove/extensions/mgmt/volume/models.py
@@ -41,8 +41,8 @@ class StorageDevice(object):
class StorageDevices(object):
@staticmethod
- def load(context):
- client = create_cinder_client(context)
+ def load(context, region_name):
+ client = create_cinder_client(context, region_name)
rdstorages = client.rdstorage.list()
for rdstorage in rdstorages:
LOG.debug("rdstorage=" + str(rdstorage))
diff --git a/trove/extensions/mgmt/volume/service.py b/trove/extensions/mgmt/volume/service.py
index c6b1b992..3f9b4733 100644
--- a/trove/extensions/mgmt/volume/service.py
+++ b/trove/extensions/mgmt/volume/service.py
@@ -17,11 +17,13 @@
from oslo_log import log as logging
from trove.common.auth import admin_context
+from trove.common import cfg
from trove.common.i18n import _
from trove.common import wsgi
from trove.extensions.mgmt.volume import models
from trove.extensions.mgmt.volume import views
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -34,5 +36,5 @@ class StorageController(wsgi.Controller):
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Indexing storage info for tenant '%s'") % tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
- storages = models.StorageDevices.load(context)
+ storages = models.StorageDevices.load(context, CONF.os_region_name)
return wsgi.Result(views.StoragesView(storages).data(), 200)
diff --git a/trove/extensions/mysql/models.py b/trove/extensions/mysql/models.py
index 71eb939d..3071d30f 100644
--- a/trove/extensions/mysql/models.py
+++ b/trove/extensions/mysql/models.py
@@ -261,3 +261,13 @@ class Schemas(object):
mysql_schema.collate,
mysql_schema.character_set))
return model_schemas, next_marker
+
+ @classmethod
+ def find(cls, context, instance_id, schema_id):
+ load_and_verify(context, instance_id)
+ client = create_guest_client(context, instance_id)
+ model_schemas, _ = cls.load_with_client(client, 1, schema_id, True)
+ if model_schemas and model_schemas[0].name == schema_id:
+ return model_schemas[0]
+
+ return None
diff --git a/trove/extensions/mysql/service.py b/trove/extensions/mysql/service.py
index 15dc4e86..fb444ede 100644
--- a/trove/extensions/mysql/service.py
+++ b/trove/extensions/mysql/service.py
@@ -30,6 +30,7 @@ from trove.common import pagination
from trove.common.utils import correct_id_with_req
from trove.common import wsgi
from trove.extensions.common.service import DefaultRootController
+from trove.extensions.common.service import ExtensionController
from trove.extensions.mysql.common import populate_users
from trove.extensions.mysql.common import populate_validated_databases
from trove.extensions.mysql.common import unquote_user_host
@@ -42,7 +43,7 @@ import_class = importutils.import_class
CONF = cfg.CONF
-class UserController(wsgi.Controller):
+class UserController(ExtensionController):
"""Controller for instance functionality."""
schemas = apischema.user
@@ -60,6 +61,7 @@ class UserController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'user:index', instance_id)
users, next_marker = models.Users.load(context, instance_id)
view = views.UsersView(users)
paged = pagination.SimplePaginatedDataView(req.url, 'users', view,
@@ -75,6 +77,7 @@ class UserController(wsgi.Controller):
"req": strutils.mask_password(req),
"body": strutils.mask_password(body)})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'user:create', instance_id)
context.notification = notification.DBaaSUserCreate(context,
request=req)
users = body['users']
@@ -94,6 +97,7 @@ class UserController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'user:delete', instance_id)
id = correct_id_with_req(id, req)
username, host = unquote_user_host(id)
user = None
@@ -122,6 +126,7 @@ class UserController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'user:show', instance_id)
id = correct_id_with_req(id, req)
username, host = unquote_user_host(id)
user = None
@@ -141,6 +146,7 @@ class UserController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": strutils.mask_password(req)})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'user:update', instance_id)
id = correct_id_with_req(id, req)
username, hostname = unquote_user_host(id)
user = None
@@ -171,6 +177,7 @@ class UserController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": strutils.mask_password(req)})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(context, 'user:update_all', instance_id)
context.notification = notification.DBaaSUserChangePassword(
context, request=req)
users = body['users']
@@ -203,7 +210,7 @@ class UserController(wsgi.Controller):
return wsgi.Result(None, 202)
-class UserAccessController(wsgi.Controller):
+class UserAccessController(ExtensionController):
"""Controller for adding and removing database access for a user."""
schemas = apischema.user
@@ -232,6 +239,8 @@ class UserAccessController(wsgi.Controller):
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(
+ context, 'user_access:index', instance_id)
# Make sure this user exists.
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
@@ -249,6 +258,8 @@ class UserAccessController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(
+ context, 'user_access:update', instance_id)
context.notification = notification.DBaaSUserGrant(
context, request=req)
user_id = correct_id_with_req(user_id, req)
@@ -270,6 +281,8 @@ class UserAccessController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(
+ context, 'user_access:delete', instance_id)
context.notification = notification.DBaaSUserRevoke(
context, request=req)
user_id = correct_id_with_req(user_id, req)
@@ -288,7 +301,7 @@ class UserAccessController(wsgi.Controller):
return wsgi.Result(None, 202)
-class SchemaController(wsgi.Controller):
+class SchemaController(ExtensionController):
"""Controller for instance functionality."""
schemas = apischema.dbschema
@@ -299,6 +312,8 @@ class SchemaController(wsgi.Controller):
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(
+ context, 'database:index', instance_id)
schemas, next_marker = models.Schemas.load(context, instance_id)
view = views.SchemasView(schemas)
paged = pagination.SimplePaginatedDataView(req.url, 'databases', view,
@@ -315,6 +330,8 @@ class SchemaController(wsgi.Controller):
"body": body})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(
+ context, 'database:create', instance_id)
schemas = body['databases']
context.notification = notification.DBaaSDatabaseCreate(context,
request=req)
@@ -334,12 +351,16 @@ class SchemaController(wsgi.Controller):
"req : '%(req)s'\n\n") %
{"id": instance_id, "req": req})
context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(
+ context, 'database:delete', instance_id)
context.notification = notification.DBaaSDatabaseDelete(
context, request=req)
with StartNotification(context, instance_id=instance_id, dbname=id):
try:
schema = guest_models.MySQLSchema(name=id)
schema.check_delete()
+ if not models.Schemas.find(context, instance_id, id):
+ raise exception.DatabaseNotFound(uuid=id)
models.Schema.delete(context, instance_id, schema.serialize())
except (ValueError, AttributeError) as e:
raise exception.BadRequest(_("Database delete error: %(e)s")
@@ -347,6 +368,9 @@ class SchemaController(wsgi.Controller):
return wsgi.Result(None, 202)
def show(self, req, tenant_id, instance_id, id):
+ context = req.environ[wsgi.CONTEXT_KEY]
+ self.authorize_target_action(
+ context, 'database:show', instance_id)
raise webob.exc.HTTPNotImplemented()
diff --git a/trove/extensions/security_group/models.py b/trove/extensions/security_group/models.py
index 1d311319..892e8f65 100644
--- a/trove/extensions/security_group/models.py
+++ b/trove/extensions/security_group/models.py
@@ -49,11 +49,10 @@ class SecurityGroup(DatabaseModelBase):
.get_instance_id_by_security_group_id(self.id)
@classmethod
- def create_sec_group(cls, name, description, context):
+ def create_sec_group(cls, name, description, context, region_name):
try:
- remote_sec_group = RemoteSecurityGroup.create(name,
- description,
- context)
+ remote_sec_group = RemoteSecurityGroup.create(
+ name, description, context, region_name)
if not remote_sec_group:
raise exception.SecurityGroupCreationError(
@@ -71,11 +70,12 @@ class SecurityGroup(DatabaseModelBase):
raise
@classmethod
- def create_for_instance(cls, instance_id, context):
+ def create_for_instance(cls, instance_id, context, region_name):
# Create a new security group
name = "%s_%s" % (CONF.trove_security_group_name_prefix, instance_id)
description = _("Security Group for %s") % instance_id
- sec_group = cls.create_sec_group(name, description, context)
+ sec_group = cls.create_sec_group(name, description, context,
+ region_name)
# Currently this locked down by default, since we don't create any
# default security group rules for the security group.
@@ -101,14 +101,14 @@ class SecurityGroup(DatabaseModelBase):
return SecurityGroupRule.find_all(group_id=self.id,
deleted=False)
- def delete(self, context):
+ def delete(self, context, region_name):
try:
sec_group_rules = self.get_rules()
if sec_group_rules:
for rule in sec_group_rules:
- rule.delete(context)
+ rule.delete(context, region_name)
- RemoteSecurityGroup.delete(self.id, context)
+ RemoteSecurityGroup.delete(self.id, context, region_name)
super(SecurityGroup, self).delete()
except exception.TroveError:
@@ -116,7 +116,7 @@ class SecurityGroup(DatabaseModelBase):
raise exception.TroveError("Failed to delete Security Group")
@classmethod
- def delete_for_instance(cls, instance_id, context):
+ def delete_for_instance(cls, instance_id, context, region_name):
try:
association = SecurityGroupInstanceAssociation.find_by(
instance_id=instance_id,
@@ -124,7 +124,7 @@ class SecurityGroup(DatabaseModelBase):
if association:
sec_group = association.get_security_group()
if sec_group:
- sec_group.delete(context)
+ sec_group.delete(context, region_name)
association.delete()
except (exception.ModelNotFoundError,
exception.TroveError):
@@ -140,7 +140,7 @@ class SecurityGroupRule(DatabaseModelBase):
@classmethod
def create_sec_group_rule(cls, sec_group, protocol, from_port,
- to_port, cidr, context):
+ to_port, cidr, context, region_name):
try:
remote_rule_id = RemoteSecurityGroup.add_rule(
sec_group_id=sec_group['id'],
@@ -148,7 +148,8 @@ class SecurityGroupRule(DatabaseModelBase):
from_port=from_port,
to_port=to_port,
cidr=cidr,
- context=context)
+ context=context,
+ region_name=region_name)
if not remote_rule_id:
raise exception.SecurityGroupRuleCreationError(
@@ -172,10 +173,10 @@ class SecurityGroupRule(DatabaseModelBase):
tenant_id=tenant_id,
deleted=False)
- def delete(self, context):
+ def delete(self, context, region_name):
try:
# Delete Remote Security Group Rule
- RemoteSecurityGroup.delete_rule(self.id, context)
+ RemoteSecurityGroup.delete_rule(self.id, context, region_name)
super(SecurityGroupRule, self).delete()
except exception.TroveError:
LOG.exception(_('Failed to delete security group.'))
@@ -210,42 +211,44 @@ class RemoteSecurityGroup(NetworkRemoteModelBase):
_data_fields = ['id', 'name', 'description', 'rules']
- def __init__(self, security_group=None, id=None, context=None):
+ def __init__(self, security_group=None, id=None, context=None,
+ region_name=None):
if id is None and security_group is None:
msg = _("Security Group does not have id defined!")
raise exception.InvalidModelError(msg)
elif security_group is None:
- driver = self.get_driver(context)
+ driver = self.get_driver(context,
+ region_name or CONF.os_region_name)
self._data_object = driver.get_sec_group_by_id(group_id=id)
else:
self._data_object = security_group
@classmethod
- def create(cls, name, description, context):
+ def create(cls, name, description, context, region_name):
"""Creates a new Security Group."""
- driver = cls.get_driver(context)
+ driver = cls.get_driver(context, region_name)
sec_group = driver.create_security_group(
name=name, description=description)
return RemoteSecurityGroup(security_group=sec_group)
@classmethod
- def delete(cls, sec_group_id, context):
+ def delete(cls, sec_group_id, context, region_name):
"""Deletes a Security Group."""
- driver = cls.get_driver(context)
+ driver = cls.get_driver(context, region_name)
driver.delete_security_group(sec_group_id)
@classmethod
def add_rule(cls, sec_group_id, protocol, from_port,
- to_port, cidr, context):
+ to_port, cidr, context, region_name):
"""Adds a new rule to an existing security group."""
- driver = cls.get_driver(context)
+ driver = cls.get_driver(context, region_name)
sec_group_rule = driver.add_security_group_rule(
sec_group_id, protocol, from_port, to_port, cidr)
return sec_group_rule.id
@classmethod
- def delete_rule(cls, sec_group_rule_id, context):
+ def delete_rule(cls, sec_group_rule_id, context, region_name):
"""Deletes a rule from an existing security group."""
- driver = cls.get_driver(context)
+ driver = cls.get_driver(context, region_name)
driver.delete_security_group_rule(sec_group_rule_id)
diff --git a/trove/extensions/security_group/service.py b/trove/extensions/security_group/service.py
index 2622accf..5a8c8bb4 100644
--- a/trove/extensions/security_group/service.py
+++ b/trove/extensions/security_group/service.py
@@ -77,7 +77,7 @@ class SecurityGroupRuleController(wsgi.Controller):
"exist or does not belong to tenant %s") % tenant_id)
raise exception.Forbidden("Unauthorized")
- sec_group_rule.delete(context)
+ sec_group_rule.delete(context, CONF.os_region_name)
sec_group.save()
return wsgi.Result(None, 204)
@@ -106,7 +106,8 @@ class SecurityGroupRuleController(wsgi.Controller):
from_, to_ = utils.gen_ports(port_or_range)
rule = models.SecurityGroupRule.create_sec_group_rule(
sec_group, protocol, int(from_), int(to_),
- body['security_group_rule']['cidr'], context)
+ body['security_group_rule']['cidr'], context,
+ CONF.os_region_name)
rules.append(rule)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
diff --git a/trove/flavor/service.py b/trove/flavor/service.py
index ea5b0d72..60e87935 100644
--- a/trove/flavor/service.py
+++ b/trove/flavor/service.py
@@ -17,6 +17,7 @@
import six
from trove.common import exception
+from trove.common import policy
from trove.common import wsgi
from trove.flavor import models
from trove.flavor import views
@@ -30,12 +31,16 @@ class FlavorController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
self._validate_flavor_id(id)
flavor = models.Flavor(context=context, flavor_id=id)
+ # Flavors do not bind to a particular tenant.
+ # Only authorize the current tenant.
+ policy.authorize_on_tenant(context, 'flavor:show')
# Pass in the request to build accurate links.
return wsgi.Result(views.FlavorView(flavor, req).data(), 200)
def index(self, req, tenant_id):
"""Return all flavors."""
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'flavor:index')
flavors = models.Flavors(context=context)
return wsgi.Result(views.FlavorsView(flavors, req).data(), 200)
diff --git a/trove/guestagent/api.py b/trove/guestagent/api.py
index 43e46f6b..180388a0 100644
--- a/trove/guestagent/api.py
+++ b/trove/guestagent/api.py
@@ -26,7 +26,6 @@ from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common.notification import NotificationCastWrapper
-import trove.common.rpc.version as rpc_version
from trove import rpc
CONF = cfg.CONF
@@ -37,19 +36,43 @@ AGENT_SNAPSHOT_TIMEOUT = CONF.agent_replication_snapshot_timeout
class API(object):
- """API for interacting with the guest manager."""
+ """API for interacting with the guest manager.
+
+ API version history:
+ * 1.0 - Initial version.
+
+ When updating this API, also update API_LATEST_VERSION
+ """
+
+ # API_LATEST_VERSION should bump the minor number each time
+ # a method signature is added or changed
+ API_LATEST_VERSION = '1.0'
+
+ # API_BASE_VERSION should only change on major version upgrade
+ API_BASE_VERSION = '1.0'
+
+ VERSION_ALIASES = {
+ 'icehouse': '1.0',
+ 'juno': '1.0',
+ 'kilo': '1.0',
+ 'liberty': '1.0',
+ 'mitaka': '1.0',
+ 'newton': '1.0',
+
+ 'latest': API_LATEST_VERSION
+ }
def __init__(self, context, id):
self.context = context
self.id = id
super(API, self).__init__()
+ version_cap = self.VERSION_ALIASES.get(
+ CONF.upgrade_levels.guestagent, CONF.upgrade_levels.guestagent)
target = messaging.Target(topic=self._get_routing_key(),
- version=rpc_version.RPC_API_VERSION)
+ version=version_cap)
- self.version_cap = rpc_version.VERSION_ALIASES.get(
- CONF.upgrade_levels.guestagent)
- self.client = self.get_client(target, self.version_cap)
+ self.client = self.get_client(target, version_cap)
def get_client(self, target, version_cap, serializer=None):
return rpc.get_client(target,
@@ -95,31 +118,44 @@ class API(object):
users.
"""
LOG.debug("Changing passwords for users on instance %s.", self.id)
- self._cast("change_passwords", self.version_cap, users=users)
+ version = self.API_BASE_VERSION
+
+ self._cast("change_passwords", version=version, users=users)
def update_attributes(self, username, hostname, user_attrs):
"""Update user attributes."""
LOG.debug("Changing user attributes on instance %s.", self.id)
- self._cast("update_attributes", self.version_cap, username=username,
+ version = self.API_BASE_VERSION
+
+ self._cast("update_attributes",
+ version=version, username=username,
hostname=hostname, user_attrs=user_attrs)
def create_user(self, users):
"""Make an asynchronous call to create a new database user"""
LOG.debug("Creating Users for instance %s.", self.id)
- self._cast("create_user", self.version_cap, users=users)
+ version = self.API_BASE_VERSION
+
+ self._cast("create_user", version=version, users=users)
def get_user(self, username, hostname):
"""Make an asynchronous call to get a single database user."""
LOG.debug("Getting a user %(username)s on instance %(id)s.",
{'username': username, 'id': self.id})
- return self._call("get_user", AGENT_LOW_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ return self._call("get_user",
+ AGENT_LOW_TIMEOUT, version=version,
username=username, hostname=hostname)
def list_access(self, username, hostname):
"""Show all the databases to which a user has more than USAGE."""
LOG.debug("Showing user %(username)s grants on instance %(id)s.",
{'username': username, 'id': self.id})
- return self._call("list_access", AGENT_LOW_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ return self._call("list_access",
+ AGENT_LOW_TIMEOUT, version=version,
username=username, hostname=hostname)
def grant_access(self, username, hostname, databases):
@@ -128,7 +164,10 @@ class API(object):
"%(username)s on instance %(id)s.", {'username': username,
'databases': databases,
'id': self.id})
- return self._call("grant_access", AGENT_LOW_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ return self._call("grant_access",
+ AGENT_LOW_TIMEOUT, version=version,
username=username, hostname=hostname,
databases=databases)
@@ -138,14 +177,20 @@ class API(object):
"%(username)s on instance %(id)s.", {'username': username,
'database': database,
'id': self.id})
- return self._call("revoke_access", AGENT_LOW_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ return self._call("revoke_access",
+ AGENT_LOW_TIMEOUT, version=version,
username=username, hostname=hostname,
database=database)
def list_users(self, limit=None, marker=None, include_marker=False):
"""Make an asynchronous call to list database users."""
LOG.debug("Listing Users for instance %s.", self.id)
- return self._call("list_users", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ return self._call("list_users", AGENT_HIGH_TIMEOUT,
+ version=version,
limit=limit, marker=marker,
include_marker=include_marker)
@@ -153,20 +198,27 @@ class API(object):
"""Make an asynchronous call to delete an existing database user."""
LOG.debug("Deleting user %(user)s for instance %(instance_id)s." %
{'user': user, 'instance_id': self.id})
- self._cast("delete_user", self.version_cap, user=user)
+ version = self.API_BASE_VERSION
+
+ self._cast("delete_user", version=version, user=user)
def create_database(self, databases):
"""Make an asynchronous call to create a new database
within the specified container
"""
LOG.debug("Creating databases for instance %s.", self.id)
- self._cast("create_database", self.version_cap, databases=databases)
+ version = self.API_BASE_VERSION
+
+ self._cast("create_database", version=version,
+ databases=databases)
def list_databases(self, limit=None, marker=None, include_marker=False):
"""Make an asynchronous call to list databases."""
LOG.debug("Listing databases for instance %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("list_databases", AGENT_LOW_TIMEOUT,
- self.version_cap, limit=limit, marker=marker,
+ version=version, limit=limit, marker=marker,
include_marker=include_marker)
def delete_database(self, database):
@@ -176,53 +228,72 @@ class API(object):
LOG.debug("Deleting database %(database)s for "
"instance %(instance_id)s." % {'database': database,
'instance_id': self.id})
- self._cast("delete_database", self.version_cap, database=database)
+ version = self.API_BASE_VERSION
+
+ self._cast("delete_database", version=version, database=database)
def enable_root(self):
"""Make a synchronous call to enable the root user for
access from anywhere
"""
LOG.debug("Enable root user for instance %s.", self.id)
- return self._call("enable_root", AGENT_HIGH_TIMEOUT, self.version_cap)
+ version = self.API_BASE_VERSION
+
+ return self._call("enable_root", AGENT_HIGH_TIMEOUT,
+ version=version)
def enable_root_with_password(self, root_password=None):
"""Make a synchronous call to enable the root user for
access from anywhere
"""
LOG.debug("Enable root user for instance %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("enable_root_with_password", AGENT_HIGH_TIMEOUT,
- self.version_cap, root_password=root_password)
+ version=version, root_password=root_password)
def disable_root(self):
"""Make a synchronous call to disable the root user for
access from anywhere
"""
LOG.debug("Disable root user for instance %s.", self.id)
- return self._call("disable_root", AGENT_LOW_TIMEOUT, self.version_cap)
+ version = self.API_BASE_VERSION
+
+ return self._call("disable_root", AGENT_LOW_TIMEOUT,
+ version=version)
def is_root_enabled(self):
"""Make a synchronous call to check if root access is
available for the container
"""
LOG.debug("Check root access for instance %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("is_root_enabled", AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def get_hwinfo(self):
"""Make a synchronous call to get hardware info for the container"""
LOG.debug("Check hwinfo on instance %s.", self.id)
- return self._call("get_hwinfo", AGENT_LOW_TIMEOUT, self.version_cap)
+ version = self.API_BASE_VERSION
+
+ return self._call("get_hwinfo", AGENT_LOW_TIMEOUT,
+ version=version)
def get_diagnostics(self):
"""Make a synchronous call to get diagnostics for the container"""
LOG.debug("Check diagnostics on instance %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("get_diagnostics", AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def rpc_ping(self):
"""Make a synchronous RPC call to check if we can ping the instance."""
LOG.debug("Check RPC ping on instance %s.", self.id)
- return self._call("rpc_ping", AGENT_LOW_TIMEOUT, self.version_cap)
+ version = self.API_BASE_VERSION
+
+ return self._call("rpc_ping", AGENT_LOW_TIMEOUT, version=version)
def prepare(self, memory_mb, packages, databases, users,
device_path='/dev/vdb', mount_point='/mnt/volume',
@@ -234,6 +305,8 @@ class API(object):
"""
LOG.debug("Sending the call to prepare the Guest.")
+ version = self.API_BASE_VERSION
+
# Taskmanager is a publisher, guestagent is a consumer. Usually
# consumer creates a queue, but in this case we have to make sure
# "prepare" doesn't get lost if for some reason guest was delayed and
@@ -242,7 +315,7 @@ class API(object):
packages = packages.split()
self._cast(
- "prepare", self.version_cap, packages=packages,
+ "prepare", version=version, packages=packages,
databases=databases, memory_mb=memory_mb, users=users,
device_path=device_path, mount_point=mount_point,
backup_info=backup_info, config_contents=config_contents,
@@ -258,7 +331,7 @@ class API(object):
server = None
target = messaging.Target(topic=self._get_routing_key(),
server=self.id,
- version=rpc_version.RPC_API_VERSION)
+ version=self.API_BASE_VERSION)
try:
server = rpc.get_server(target, [])
server.start()
@@ -270,26 +343,35 @@ class API(object):
def pre_upgrade(self):
"""Prepare the guest for upgrade."""
LOG.debug("Sending the call to prepare the guest for upgrade.")
- return self._call("pre_upgrade", AGENT_HIGH_TIMEOUT, self.version_cap)
+ version = self.API_BASE_VERSION
+
+ return self._call("pre_upgrade", AGENT_HIGH_TIMEOUT,
+ version=version)
def post_upgrade(self, upgrade_info):
"""Recover the guest after upgrading the guest's image."""
LOG.debug("Recover the guest after upgrading the guest's image.")
- self._call("post_upgrade", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("post_upgrade", AGENT_HIGH_TIMEOUT, version=version,
upgrade_info=upgrade_info)
def restart(self):
"""Restart the database server."""
LOG.debug("Sending the call to restart the database process "
"on the Guest.")
- self._call("restart", AGENT_HIGH_TIMEOUT, self.version_cap)
+ version = self.API_BASE_VERSION
+
+ self._call("restart", AGENT_HIGH_TIMEOUT, version=version)
def start_db_with_conf_changes(self, config_contents):
"""Start the database server."""
LOG.debug("Sending the call to start the database process on "
"the Guest with a timeout of %s." % AGENT_HIGH_TIMEOUT)
+ version = self.API_BASE_VERSION
+
self._call("start_db_with_conf_changes", AGENT_HIGH_TIMEOUT,
- self.version_cap, config_contents=config_contents)
+ version=version, config_contents=config_contents)
def reset_configuration(self, configuration):
"""Ignore running state of the database server; just change
@@ -297,20 +379,26 @@ class API(object):
"""
LOG.debug("Sending the call to change the database conf file on the "
"Guest with a timeout of %s." % AGENT_HIGH_TIMEOUT)
+ version = self.API_BASE_VERSION
+
self._call("reset_configuration", AGENT_HIGH_TIMEOUT,
- self.version_cap, configuration=configuration)
+ version=version, configuration=configuration)
def stop_db(self, do_not_start_on_reboot=False):
"""Stop the database server."""
LOG.debug("Sending the call to stop the database process "
"on the Guest.")
- self._call("stop_db", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("stop_db", AGENT_HIGH_TIMEOUT, version=version,
do_not_start_on_reboot=do_not_start_on_reboot)
def upgrade(self, instance_version, location, metadata=None):
"""Make an asynchronous call to self upgrade the guest agent."""
LOG.debug("Sending an upgrade call to nova-guest.")
- self._cast("upgrade", self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._cast("upgrade", version=version,
instance_version=instance_version,
location=location,
metadata=metadata)
@@ -318,158 +406,214 @@ class API(object):
def get_volume_info(self):
"""Make a synchronous call to get volume info for the container."""
LOG.debug("Check Volume Info on instance %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("get_filesystem_stats", AGENT_LOW_TIMEOUT,
- self.version_cap, fs_path=None)
+ version=version, fs_path=None)
def update_guest(self):
"""Make a synchronous call to update the guest agent."""
LOG.debug("Updating guest agent on instance %s.", self.id)
- self._call("update_guest", AGENT_HIGH_TIMEOUT, self.version_cap)
+ version = self.API_BASE_VERSION
+
+ self._call("update_guest", AGENT_HIGH_TIMEOUT, version=version)
def create_backup(self, backup_info):
"""Make async call to create a full backup of this instance."""
LOG.debug("Create Backup %(backup_id)s "
"for instance %(instance_id)s." %
{'backup_id': backup_info['id'], 'instance_id': self.id})
- self._cast("create_backup", self.version_cap, backup_info=backup_info)
+ version = self.API_BASE_VERSION
+
+ self._cast("create_backup", version=version,
+ backup_info=backup_info)
def mount_volume(self, device_path=None, mount_point=None):
"""Mount the volume."""
LOG.debug("Mount volume %(mount)s on instance %(id)s." % {
'mount': mount_point, 'id': self.id})
- self._call("mount_volume", AGENT_LOW_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("mount_volume", AGENT_LOW_TIMEOUT, version=version,
device_path=device_path, mount_point=mount_point)
def unmount_volume(self, device_path=None, mount_point=None):
"""Unmount the volume."""
LOG.debug("Unmount volume %(device)s on instance %(id)s." % {
'device': device_path, 'id': self.id})
- self._call("unmount_volume", AGENT_LOW_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("unmount_volume", AGENT_LOW_TIMEOUT, version=version,
device_path=device_path, mount_point=mount_point)
def resize_fs(self, device_path=None, mount_point=None):
"""Resize the filesystem."""
LOG.debug("Resize device %(device)s on instance %(id)s." % {
'device': device_path, 'id': self.id})
- self._call("resize_fs", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("resize_fs", AGENT_HIGH_TIMEOUT, version=version,
device_path=device_path, mount_point=mount_point)
def update_overrides(self, overrides, remove=False):
"""Update the overrides."""
LOG.debug("Updating overrides values %(overrides)s on instance "
"%(id)s.", {'overrides': overrides, 'id': self.id})
+ version = self.API_BASE_VERSION
+
self._call("update_overrides", AGENT_HIGH_TIMEOUT,
- self.version_cap, overrides=overrides, remove=remove)
+ version=version, overrides=overrides, remove=remove)
def apply_overrides(self, overrides):
LOG.debug("Applying overrides values %(overrides)s on instance "
"%(id)s.", {'overrides': overrides, 'id': self.id})
- self._call("apply_overrides", AGENT_HIGH_TIMEOUT, self.version_cap,
- overrides=overrides)
+ version = self.API_BASE_VERSION
+
+ self._call("apply_overrides", AGENT_HIGH_TIMEOUT,
+ version=version, overrides=overrides)
def backup_required_for_replication(self):
LOG.debug("Checking backup requirement for replication")
+ version = self.API_BASE_VERSION
+
return self._call("backup_required_for_replication",
AGENT_LOW_TIMEOUT,
- self.version_cap)
+ version=version)
def get_replication_snapshot(self, snapshot_info=None,
replica_source_config=None):
LOG.debug("Retrieving replication snapshot from instance %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("get_replication_snapshot", AGENT_SNAPSHOT_TIMEOUT,
- self.version_cap, snapshot_info=snapshot_info,
+ version=version, snapshot_info=snapshot_info,
replica_source_config=replica_source_config)
def attach_replication_slave(self, snapshot, replica_config=None):
LOG.debug("Configuring instance %s to replicate from %s.",
self.id, snapshot.get('master').get('id'))
- self._cast("attach_replication_slave", self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._cast("attach_replication_slave", version=version,
snapshot=snapshot, slave_config=replica_config)
def detach_replica(self, for_failover=False):
LOG.debug("Detaching replica %s from its replication source.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("detach_replica", AGENT_HIGH_TIMEOUT,
- self.version_cap, for_failover=for_failover)
+ version=version, for_failover=for_failover)
def get_replica_context(self):
LOG.debug("Getting replica context.")
+ version = self.API_BASE_VERSION
+
return self._call("get_replica_context",
- AGENT_HIGH_TIMEOUT, self.version_cap)
+ AGENT_HIGH_TIMEOUT, version=version)
def attach_replica(self, replica_info, slave_config):
LOG.debug("Attaching replica %s." % replica_info)
- self._call("attach_replica", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("attach_replica", AGENT_HIGH_TIMEOUT, version=version,
replica_info=replica_info, slave_config=slave_config)
def make_read_only(self, read_only):
LOG.debug("Executing make_read_only(%s)" % read_only)
- self._call("make_read_only", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("make_read_only", AGENT_HIGH_TIMEOUT, version=version,
read_only=read_only)
def enable_as_master(self, replica_source_config):
LOG.debug("Executing enable_as_master")
- self._call("enable_as_master", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("enable_as_master", AGENT_HIGH_TIMEOUT,
+ version=version,
replica_source_config=replica_source_config)
# DEPRECATED: Maintain for API Compatibility
def get_txn_count(self):
LOG.debug("Executing get_txn_count.")
+ version = self.API_BASE_VERSION
+
return self._call("get_txn_count",
- AGENT_HIGH_TIMEOUT, self.version_cap)
+ AGENT_HIGH_TIMEOUT, version=version)
def get_last_txn(self):
LOG.debug("Executing get_last_txn.")
+ version = self.API_BASE_VERSION
+
return self._call("get_last_txn",
- AGENT_HIGH_TIMEOUT, self.version_cap)
+ AGENT_HIGH_TIMEOUT, version=version)
def get_latest_txn_id(self):
LOG.debug("Executing get_latest_txn_id.")
+ version = self.API_BASE_VERSION
+
return self._call("get_latest_txn_id",
- AGENT_HIGH_TIMEOUT, self.version_cap)
+ AGENT_HIGH_TIMEOUT, version=version)
def wait_for_txn(self, txn):
LOG.debug("Executing wait_for_txn.")
- self._call("wait_for_txn", AGENT_HIGH_TIMEOUT, self.version_cap,
+ version = self.API_BASE_VERSION
+
+ self._call("wait_for_txn", AGENT_HIGH_TIMEOUT, version=version,
txn=txn)
def cleanup_source_on_replica_detach(self, replica_info):
LOG.debug("Cleaning up master %s on detach of replica.", self.id)
+ version = self.API_BASE_VERSION
+
self._call("cleanup_source_on_replica_detach", AGENT_HIGH_TIMEOUT,
- self.version_cap, replica_info=replica_info)
+ version=version, replica_info=replica_info)
def demote_replication_master(self):
LOG.debug("Demoting instance %s to non-master.", self.id)
+ version = self.API_BASE_VERSION
+
self._call("demote_replication_master", AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
def guest_log_list(self):
LOG.debug("Retrieving guest log list for %s.", self.id)
+ version = self.API_BASE_VERSION
+
result = self._call("guest_log_list", AGENT_HIGH_TIMEOUT,
- self.version_cap)
+ version=version)
LOG.debug("guest_log_list returns %s", result)
return result
def guest_log_action(self, log_name, enable, disable, publish, discard):
LOG.debug("Processing guest log '%s' for %s.", log_name, self.id)
+ version = self.API_BASE_VERSION
+
return self._call("guest_log_action", AGENT_HIGH_TIMEOUT,
- self.version_cap, log_name=log_name,
+ version=version, log_name=log_name,
enable=enable, disable=disable,
publish=publish, discard=discard)
def module_list(self, include_contents):
LOG.debug("Querying modules on %s (contents: %s).",
self.id, include_contents)
+ version = self.API_BASE_VERSION
+
result = self._call("module_list", AGENT_HIGH_TIMEOUT,
- self.version_cap,
+ version=version,
include_contents=include_contents)
return result
def module_apply(self, modules):
LOG.debug("Applying modules to %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("module_apply", AGENT_HIGH_TIMEOUT,
- self.version_cap, modules=modules)
+ version=version, modules=modules)
def module_remove(self, module):
LOG.debug("Removing modules from %s.", self.id)
+ version = self.API_BASE_VERSION
+
return self._call("module_remove", AGENT_HIGH_TIMEOUT,
- self.version_cap, module=module)
+ version=version, module=module)
diff --git a/trove/guestagent/common/operating_system.py b/trove/guestagent/common/operating_system.py
index aacb1391..282d8857 100644
--- a/trove/guestagent/common/operating_system.py
+++ b/trove/guestagent/common/operating_system.py
@@ -817,3 +817,23 @@ def _build_command_options(options):
"""
return ['-' + item[0] for item in options if item[1]]
+
+
+def get_device(path, as_root=False):
+ """Get the device that a given path exists on."""
+ stdout = _execute_shell_cmd('df', [], path, as_root=as_root)
+ return stdout.splitlines()[1].split()[0]
+
+
+def is_mount(path):
+ """Check if the given directory path is a mountpoint. Try the standard
+ ismount first. This fails if the path is not accessible though, so resort
+ to checking as the root user (which is slower).
+ """
+ if os.access(path, os.R_OK):
+ return os.path.ismount(path)
+ if not exists(path, is_directory=True, as_root=True):
+ return False
+ directory_dev = get_device(path, as_root=True)
+ parent_dev = get_device(os.path.join(path, '..'), as_root=True)
+ return directory_dev != parent_dev
diff --git a/trove/guestagent/datastore/experimental/mariadb/service.py b/trove/guestagent/datastore/experimental/mariadb/service.py
index b1bb4f05..a851e023 100644
--- a/trove/guestagent/datastore/experimental/mariadb/service.py
+++ b/trove/guestagent/datastore/experimental/mariadb/service.py
@@ -37,7 +37,7 @@ class MariaDBApp(galera_service.GaleraApp):
def service_candidates(self):
service_candidates = super(MariaDBApp, self).service_candidates
return {
- operating_system.DEBIAN: service_candidates,
+ operating_system.DEBIAN: ["mariadb"] + service_candidates,
operating_system.REDHAT: ["mariadb"],
operating_system.SUSE: service_candidates
}[self.OS]
diff --git a/trove/guestagent/datastore/experimental/postgresql/manager.py b/trove/guestagent/datastore/experimental/postgresql/manager.py
index 84051312..b945131c 100644
--- a/trove/guestagent/datastore/experimental/postgresql/manager.py
+++ b/trove/guestagent/datastore/experimental/postgresql/manager.py
@@ -159,7 +159,8 @@ class Manager(manager.Manager):
self.app.stop_db()
if 'device' in upgrade_info:
self.mount_volume(context, mount_point=upgrade_info['mount_point'],
- device_path=upgrade_info['device'])
+ device_path=upgrade_info['device'],
+ write_to_fstab=True)
self.app.restore_files_post_upgrade(upgrade_info)
self.app.start_db()
diff --git a/trove/guestagent/datastore/experimental/postgresql/service.py b/trove/guestagent/datastore/experimental/postgresql/service.py
index a59d7110..1f7ce687 100644
--- a/trove/guestagent/datastore/experimental/postgresql/service.py
+++ b/trove/guestagent/datastore/experimental/postgresql/service.py
@@ -561,6 +561,8 @@ class PgSqlApp(object):
force=True, as_root=True)
operating_system.remove(upgrade_info['save_etc'], force=True,
as_root=True)
+ self.configuration_manager.refresh_cache()
+ self.status.set_ready()
class PgSqlAppStatus(service.BaseDbStatus):
diff --git a/trove/guestagent/datastore/mysql_common/manager.py b/trove/guestagent/datastore/mysql_common/manager.py
index b670dd0f..07be748e 100644
--- a/trove/guestagent/datastore/mysql_common/manager.py
+++ b/trove/guestagent/datastore/mysql_common/manager.py
@@ -290,6 +290,7 @@ class MySqlManager(manager.Manager):
operating_system.copy("%s/." % upgrade_info['home_save'],
os.path.expanduser('~'),
preserve=True, as_root=True)
+ self.configuration_manager.refresh_cache()
app.start_mysql()
def restart(self, context):
diff --git a/trove/guestagent/datastore/mysql_common/service.py b/trove/guestagent/datastore/mysql_common/service.py
index 0e4d8e23..b1714327 100644
--- a/trove/guestagent/datastore/mysql_common/service.py
+++ b/trove/guestagent/datastore/mysql_common/service.py
@@ -454,7 +454,7 @@ class BaseMySqlAdmin(object):
next_marker = None
LOG.debug("database_names = %r." % database_names)
for count, database in enumerate(database_names):
- if count >= limit:
+ if limit is not None and count >= limit:
break
LOG.debug("database = %s." % str(database))
mysql_db = models.MySQLSchema(name=database[0],
@@ -517,7 +517,7 @@ class BaseMySqlAdmin(object):
next_marker = None
LOG.debug("result = " + str(result))
for count, row in enumerate(result):
- if count >= limit:
+ if limit is not None and count >= limit:
break
LOG.debug("user = " + str(row))
mysql_user = models.MySQLUser(name=row['User'],
diff --git a/trove/guestagent/datastore/service.py b/trove/guestagent/datastore/service.py
index f5042f3a..a3af9af8 100644
--- a/trove/guestagent/datastore/service.py
+++ b/trove/guestagent/datastore/service.py
@@ -97,16 +97,19 @@ class BaseDbStatus(object):
"""Called before restarting DB server."""
self.restart_mode = True
+ def set_ready(self):
+ prepare_end_file = guestagent_utils.build_file_path(
+ self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME)
+ operating_system.write_file(prepare_end_file, '')
+ self.__refresh_prepare_completed()
+
def end_install(self, error_occurred=False, post_processing=False):
"""Called after prepare has ended."""
# Set the "we're done" flag if there's no error and
# no post_processing is necessary
if not (error_occurred or post_processing):
- prepare_end_file = guestagent_utils.build_file_path(
- self.GUESTAGENT_DIR, self.PREPARE_END_FILENAME)
- operating_system.write_file(prepare_end_file, '')
- self.__refresh_prepare_completed()
+ self.set_ready()
final_status = None
if error_occurred:
diff --git a/trove/guestagent/guest_log.py b/trove/guestagent/guest_log.py
index 23be944a..26170c4b 100644
--- a/trove/guestagent/guest_log.py
+++ b/trove/guestagent/guest_log.py
@@ -209,8 +209,7 @@ class GuestLog(object):
'metafile': self._metafile_name()
}
else:
- raise exception.UnauthorizedRequest(_(
- "Not authorized to show log '%s'.") % self._name)
+ raise exception.LogAccessForbidden(action='show', log=self._name)
def _refresh_details(self):
@@ -310,16 +309,16 @@ class GuestLog(object):
self._file)
return self.show()
else:
- raise exception.UnauthorizedRequest(_(
- "Not authorized to publish log '%s'.") % self._name)
+ raise exception.LogAccessForbidden(
+ action='publish', log=self._name)
def discard_log(self):
if self.exposed:
self._delete_log_components()
return self.show()
else:
- raise exception.UnauthorizedRequest(_(
- "Not authorized to discard log '%s'.") % self._name)
+ raise exception.LogAccessForbidden(
+ action='discard', log=self._name)
def _delete_log_components(self):
container_name = self.get_container_name(force=True)
diff --git a/trove/guestagent/strategies/backup/experimental/db2_impl.py b/trove/guestagent/strategies/backup/experimental/db2_impl.py
index 855fd5c4..0b71b550 100644
--- a/trove/guestagent/strategies/backup/experimental/db2_impl.py
+++ b/trove/guestagent/strategies/backup/experimental/db2_impl.py
@@ -130,7 +130,7 @@ class DB2OnlineBackup(DB2Backup):
log_size = log_size + int(out[0])
log_size = log_size * 1024
except exception.ProcessExecutionError:
- LOG.exception(_("An error occured while trying to estimate log "
+ LOG.exception(_("An error occurred while trying to estimate log "
"size"))
LOG.debug("Estimated log size for all databases: " + str(log_size))
return log_size
diff --git a/trove/guestagent/volume.py b/trove/guestagent/volume.py
index 5427a2fe..f78965be 100644
--- a/trove/guestagent/volume.py
+++ b/trove/guestagent/volume.py
@@ -14,14 +14,14 @@
# under the License.
import os
+import shlex
from tempfile import NamedTemporaryFile
+import traceback
from oslo_log import log as logging
-import pexpect
from trove.common import cfg
-from trove.common.exception import GuestError
-from trove.common.exception import ProcessExecutionError
+from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
@@ -32,6 +32,12 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
+def log_and_raise(message):
+ LOG.exception(message)
+ raise_msg = message + _("\nExc: %s") % traceback.format_exc()
+ raise exception.GuestError(original_message=raise_msg)
+
+
class VolumeDevice(object):
def __init__(self, device_path):
@@ -47,9 +53,14 @@ class VolumeDevice(object):
target_dir = TMP_MOUNT_POINT
if target_subdir:
target_dir = target_dir + "/" + target_subdir
- utils.execute("sudo", "rsync", "--safe-links", "--perms",
- "--recursive", "--owner", "--group", "--xattrs",
- "--sparse", source_dir, target_dir)
+ try:
+ utils.execute("rsync", "--safe-links", "--perms",
+ "--recursive", "--owner", "--group", "--xattrs",
+ "--sparse", source_dir, target_dir,
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Could not migrate data.")
+ log_and_raise(msg)
self.unmount(TMP_MOUNT_POINT)
def _check_device_exists(self):
@@ -63,46 +74,48 @@ class VolumeDevice(object):
num_tries = CONF.num_tries
LOG.debug("Checking if %s exists." % self.device_path)
- utils.execute('sudo', 'blockdev', '--getsize64', self.device_path,
+ utils.execute("blockdev", "--getsize64", self.device_path,
+ run_as_root=True, root_helper="sudo",
attempts=num_tries)
- except ProcessExecutionError:
- LOG.exception(_("Error getting device status"))
- raise GuestError(original_message=_(
- "InvalidDevicePath(path=%s)") % self.device_path)
+ except exception.ProcessExecutionError:
+ msg = _("Device '%s' is not ready.") % self.device_path
+ log_and_raise(msg)
def _check_format(self):
- """Checks that an unmounted volume is formatted."""
- cmd = "sudo dumpe2fs %s" % self.device_path
- LOG.debug("Checking whether %s is formatted: %s." %
- (self.device_path, cmd))
-
- child = pexpect.spawn(cmd)
+ """Checks that a volume is formatted."""
+ LOG.debug("Checking whether '%s' is formatted." % self.device_path)
try:
- i = child.expect(['has_journal', 'Wrong magic number'])
- if i == 0:
- return
- volume_fstype = CONF.volume_fstype
- raise IOError(
- _('Device path at {0} did not seem to be {1}.').format(
- self.device_path, volume_fstype))
-
- except pexpect.EOF:
- raise IOError(_("Volume was not formatted."))
- child.expect(pexpect.EOF)
+ stdout, stderr = utils.execute(
+ "dumpe2fs", self.device_path,
+ run_as_root=True, root_helper="sudo")
+ if 'has_journal' not in stdout:
+ msg = _("Volume '%s' does not appear to be formatted.") % (
+ self.device_path)
+ raise exception.GuestError(original_message=msg)
+ except exception.ProcessExecutionError as pe:
+ if 'Wrong magic number' in pe.stderr:
+ volume_fstype = CONF.volume_fstype
+ msg = _("'Device '%(dev)s' did not seem to be '%(type)s'.") % (
+ {'dev': self.device_path, 'type': volume_fstype})
+ log_and_raise(msg)
+ msg = _("Volume '%s' was not formatted.") % self.device_path
+ log_and_raise(msg)
def _format(self):
"""Calls mkfs to format the device at device_path."""
volume_fstype = CONF.volume_fstype
- format_options = CONF.format_options
- cmd = "sudo mkfs -t %s %s %s" % (volume_fstype,
- format_options, self.device_path)
+ format_options = shlex.split(CONF.format_options)
+ format_options.append(self.device_path)
volume_format_timeout = CONF.volume_format_timeout
- LOG.debug("Formatting %s. Executing: %s." %
- (self.device_path, cmd))
- child = pexpect.spawn(cmd, timeout=volume_format_timeout)
- # child.expect("(y,n)")
- # child.sendline('y')
- child.expect(pexpect.EOF)
+ LOG.debug("Formatting '%s'." % self.device_path)
+ try:
+ utils.execute_with_timeout(
+ "mkfs", "--type", volume_fstype, *format_options,
+ run_as_root=True, root_helper="sudo",
+ timeout=volume_format_timeout)
+ except exception.ProcessExecutionError:
+ msg = _("Could not format '%s'.") % self.device_path
+ log_and_raise(msg)
def format(self):
"""Formats the device at device_path and checks the filesystem."""
@@ -119,62 +132,77 @@ class VolumeDevice(object):
if write_to_fstab:
mount_point.write_to_fstab()
+ def _wait_for_mount(self, mount_point, timeout=2):
+ """Wait for a fs to be mounted."""
+ def wait_for_mount():
+ return operating_system.is_mount(mount_point)
+
+ try:
+ utils.poll_until(wait_for_mount, sleep_time=1, time_out=timeout)
+ except exception.PollTimeOut:
+ return False
+
+ return True
+
def resize_fs(self, mount_point):
"""Resize the filesystem on the specified device."""
self._check_device_exists()
+ # Some OS's will mount a file systems after it's attached if
+ # an entry is put in the fstab file (like Trove does).
+ # Thus it may be necessary to wait for the mount and then unmount
+ # the fs again (since the volume was just attached).
+ if self._wait_for_mount(mount_point, timeout=2):
+ LOG.debug("Unmounting '%s' before resizing." % mount_point)
+ self.unmount(mount_point)
try:
- # check if the device is mounted at mount_point before e2fsck
- if not os.path.ismount(mount_point):
- utils.execute("e2fsck", "-f", "-p", self.device_path,
- run_as_root=True, root_helper="sudo")
+ utils.execute("e2fsck", "-f", "-p", self.device_path,
+ run_as_root=True, root_helper="sudo")
utils.execute("resize2fs", self.device_path,
run_as_root=True, root_helper="sudo")
- except ProcessExecutionError:
- LOG.exception(_("Error resizing file system."))
- raise GuestError(original_message=_(
- "Error resizing the filesystem: %s") % self.device_path)
+ except exception.ProcessExecutionError:
+ msg = _("Error resizing the filesystem with device '%s'.") % (
+ self.device_path)
+ log_and_raise(msg)
def unmount(self, mount_point):
- if os.path.exists(mount_point):
- cmd = "sudo umount %s" % mount_point
- child = pexpect.spawn(cmd)
- child.expect(pexpect.EOF)
+ if operating_system.is_mount(mount_point):
+ try:
+ utils.execute("umount", mount_point,
+ run_as_root=True, root_helper='sudo')
+ except exception.ProcessExecutionError:
+ msg = _("Error unmounting '%s'.") % mount_point
+ log_and_raise(msg)
+ else:
+ LOG.debug("'%s' is not a mounted fs, cannot unmount", mount_point)
def unmount_device(self, device_path):
# unmount if device is already mounted
mount_points = self.mount_points(device_path)
for mnt in mount_points:
- LOG.info(_("Device %(device)s is already mounted in "
- "%(mount_point)s. Unmounting now.") %
+ LOG.info(_("Device '%(device)s' is mounted on "
+ "'%(mount_point)s'. Unmounting now.") %
{'device': device_path, 'mount_point': mnt})
self.unmount(mnt)
def mount_points(self, device_path):
"""Returns a list of mount points on the specified device."""
- try:
- cmd = "grep %s /etc/mtab | awk '{print $2}'" % device_path
- stdout, stderr = utils.execute(cmd, shell=True)
- return stdout.strip().split('\n')
-
- except ProcessExecutionError:
- LOG.exception(_("Error retrieving mount points"))
- raise GuestError(original_message=_(
- "Could not obtain a list of mount points for device: %s") %
- device_path)
-
- def set_readahead_size(self, readahead_size,
- execute_function=utils.execute):
+ stdout, stderr = utils.execute(
+ "grep '^%s ' /etc/mtab" % device_path,
+ shell=True, check_exit_code=[0, 1])
+ return [entry.strip().split()[1] for entry in stdout.splitlines()]
+
+ def set_readahead_size(self, readahead_size):
"""Set the readahead size of disk."""
self._check_device_exists()
try:
- execute_function("sudo", "blockdev", "--setra",
- readahead_size, self.device_path)
- except ProcessExecutionError:
- LOG.exception(_("Error setting readhead size to %(size)s "
- "for device %(device)s.") %
- {'size': readahead_size, 'device': self.device_path})
- raise GuestError(original_message=_(
- "Error setting readhead size: %s.") % self.device_path)
+ utils.execute("blockdev", "--setra",
+ readahead_size, self.device_path,
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Error setting readahead size to %(size)s "
+ "for device %(device)s.") % {
+ 'size': readahead_size, 'device': self.device_path}
+ log_and_raise(msg)
class VolumeMountPoint(object):
@@ -186,17 +214,21 @@ class VolumeMountPoint(object):
self.mount_options = CONF.mount_options
def mount(self):
- if not os.path.exists(self.mount_point):
+ if not operating_system.exists(self.mount_point, is_directory=True,
+ as_root=True):
operating_system.create_directory(self.mount_point, as_root=True)
LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, "
"volume_type:{2}, mount options:{3}".format(
self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))
- cmd = ("sudo mount -t %s -o %s %s %s" %
- (self.volume_fstype, self.mount_options, self.device_path,
- self.mount_point))
- child = pexpect.spawn(cmd)
- child.expect(pexpect.EOF)
+ try:
+ utils.execute("mount", "-t", self.volume_fstype,
+ "-o", self.mount_options,
+ self.device_path, self.mount_point,
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Could not mount '%s'.") % self.mount_point
+ log_and_raise(msg)
def write_to_fstab(self):
fstab_line = ("%s\t%s\t%s\t%s\t0\t0" %
@@ -207,6 +239,11 @@ class VolumeMountPoint(object):
fstab_content = fstab.read()
with NamedTemporaryFile(mode='w', delete=False) as tempfstab:
tempfstab.write(fstab_content + fstab_line)
- utils.execute("sudo", "install", "-o", "root", "-g", "root", "-m",
- "644", tempfstab.name, "/etc/fstab")
+ try:
+ utils.execute("install", "-o", "root", "-g", "root",
+ "-m", "644", tempfstab.name, "/etc/fstab",
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Could not add '%s' to fstab.") % self.mount_point
+ log_and_raise(msg)
os.remove(tempfstab.name)
diff --git a/trove/instance/models.py b/trove/instance/models.py
index 8b3668e6..56f59799 100644
--- a/trove/instance/models.py
+++ b/trove/instance/models.py
@@ -27,6 +27,7 @@ from oslo_log import log as logging
from trove.backup.models import Backup
from trove.common import cfg
from trove.common import exception
+from trove.common.glance_remote import create_glance_client
from trove.common.i18n import _, _LE, _LI, _LW
import trove.common.instance as tr_instance
from trove.common.notification import StartNotification
@@ -36,6 +37,7 @@ from trove.common.remote import create_guest_client
from trove.common.remote import create_nova_client
from trove.common import server_group as srv_grp
from trove.common import template
+from trove.common.trove_remote import create_trove_client
from trove.common import utils
from trove.configuration.models import Configuration
from trove.datastore import models as datastore_models
@@ -62,7 +64,7 @@ def filter_ips(ips, white_list_regex, black_list_regex):
and not re.search(black_list_regex, ip)]
-def load_server(context, instance_id, server_id):
+def load_server(context, instance_id, server_id, region_name):
"""
Loads a server or raises an exception.
:param context: request context used to access nova
@@ -74,7 +76,7 @@ def load_server(context, instance_id, server_id):
:type server_id: unicode
:rtype: novaclient.v2.servers.Server
"""
- client = create_nova_client(context)
+ client = create_nova_client(context, region_name=region_name)
try:
server = client.servers.get(server_id)
except nova_exceptions.NotFound:
@@ -120,7 +122,7 @@ def load_simple_instance_server_status(context, db_info):
db_info.server_status = "BUILD"
db_info.addresses = {}
else:
- client = create_nova_client(context)
+ client = create_nova_client(context, db_info.region_id)
try:
server = client.servers.get(db_info.compute_instance_id)
db_info.server_status = server.status
@@ -427,6 +429,10 @@ class SimpleInstance(object):
def shard_id(self):
return self.db_info.shard_id
+ @property
+ def region_name(self):
+ return self.db_info.region_id
+
class DetailInstance(SimpleInstance):
"""A detailed view of an Instance.
@@ -511,7 +517,8 @@ def load_instance(cls, context, id, needs_server=False,
else:
try:
server = load_server(context, db_info.id,
- db_info.compute_instance_id)
+ db_info.compute_instance_id,
+ region_name=db_info.region_id)
# TODO(tim.simpson): Remove this hack when we have notifications!
db_info.server_status = server.status
db_info.addresses = server.addresses
@@ -547,7 +554,7 @@ def load_guest_info(instance, context, id):
instance.volume_used = volume_info['used']
instance.volume_total = volume_info['total']
except Exception as e:
- LOG.error(e)
+ LOG.exception(e)
return instance
@@ -646,8 +653,8 @@ class BaseInstance(SimpleInstance):
self.set_instance_fault_deleted()
# Delete associated security group
if CONF.trove_security_groups_support:
- SecurityGroup.delete_for_instance(self.db_info.id,
- self.context)
+ SecurityGroup.delete_for_instance(self.db_info.id, self.context,
+ self.db_info.region_id)
@property
def guest(self):
@@ -658,7 +665,8 @@ class BaseInstance(SimpleInstance):
@property
def nova_client(self):
if not self._nova_client:
- self._nova_client = create_nova_client(self.context)
+ self._nova_client = create_nova_client(
+ self.context, region_name=self.db_info.region_id)
return self._nova_client
def update_db(self, **values):
@@ -684,7 +692,8 @@ class BaseInstance(SimpleInstance):
@property
def volume_client(self):
if not self._volume_client:
- self._volume_client = create_cinder_client(self.context)
+ self._volume_client = create_cinder_client(
+ self.context, region_name=self.db_info.region_id)
return self._volume_client
def reset_task_status(self):
@@ -774,12 +783,60 @@ class Instance(BuiltInstance):
return False
@classmethod
+ def _validate_remote_datastore(cls, context, region_name, flavor,
+ datastore, datastore_version):
+ remote_nova_client = create_nova_client(context,
+ region_name=region_name)
+ try:
+ remote_flavor = remote_nova_client.flavors.get(flavor.id)
+ if (flavor.ram != remote_flavor.ram or
+ flavor.vcpus != remote_flavor.vcpus):
+ raise exception.TroveError(
+ "Flavors differ between regions"
+ " %(local)s and %(remote)s." %
+ {'local': CONF.os_region_name, 'remote': region_name})
+ except nova_exceptions.NotFound:
+ raise exception.TroveError(
+ "Flavors %(flavor)s not found in region %(remote)s."
+ % {'flavor': flavor.id, 'remote': region_name})
+
+ remote_trove_client = create_trove_client(
+ context, region_name=region_name)
+ try:
+ remote_ds_ver = remote_trove_client.datastore_versions.get(
+ datastore.name, datastore_version.name)
+ if datastore_version.name != remote_ds_ver.name:
+ raise exception.TroveError(
+ "Datastore versions differ between regions "
+ "%(local)s and %(remote)s." %
+ {'local': CONF.os_region_name, 'remote': region_name})
+ except exception.NotFound:
+ raise exception.TroveError(
+ "Datastore Version %(dsv)s not found in region %(remote)s."
+ % {'dsv': datastore_version.name, 'remote': region_name})
+
+ glance_client = create_glance_client(context)
+ local_image = glance_client.images.get(datastore_version.image)
+ remote_glance_client = create_glance_client(
+ context, region_name=region_name)
+ remote_image = remote_glance_client.images.get(
+ remote_ds_ver.image)
+ if local_image.checksum != remote_image.checksum:
+ raise exception.TroveError(
+ "Images for Datastore %(ds)s do not match"
+ "between regions %(local)s and %(remote)s." %
+ {'ds': datastore.name, 'local': CONF.os_region_name,
+ 'remote': region_name})
+
+ @classmethod
def create(cls, context, name, flavor_id, image_id, databases, users,
datastore, datastore_version, volume_size, backup_id,
availability_zone=None, nics=None,
configuration_id=None, slave_of_id=None, cluster_config=None,
replica_count=None, volume_type=None, modules=None,
- locality=None):
+ locality=None, region_name=None):
+
+ region_name = region_name or CONF.os_region_name
call_args = {
'name': name,
@@ -788,6 +845,7 @@ class Instance(BuiltInstance):
'datastore_version': datastore_version.name,
'image_id': image_id,
'availability_zone': availability_zone,
+ 'region_name': region_name,
}
# All nova flavors are permitted for a datastore-version unless one
@@ -812,6 +870,12 @@ class Instance(BuiltInstance):
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=flavor_id)
+ # If a different region is specified for the instance, ensure
+ # that the flavor and image are the same in both regions
+ if region_name and region_name != CONF.os_region_name:
+ cls._validate_remote_datastore(context, region_name, flavor,
+ datastore, datastore_version)
+
deltas = {'instances': 1}
volume_support = datastore_cfg.volume_support
if volume_support:
@@ -945,10 +1009,12 @@ class Instance(BuiltInstance):
task_status=InstanceTasks.BUILDING,
configuration_id=configuration_id,
slave_of_id=slave_of_id, cluster_id=cluster_id,
- shard_id=shard_id, type=instance_type)
+ shard_id=shard_id, type=instance_type,
+ region_id=region_name)
LOG.debug("Tenant %(tenant)s created new Trove instance "
- "%(db)s.",
- {'tenant': context.tenant, 'db': db_info.id})
+ "%(db)s in region %(region)s.",
+ {'tenant': context.tenant, 'db': db_info.id,
+ 'region': region_name})
instance_id = db_info.id
cls.add_instance_modules(context, instance_id, modules)
@@ -1009,8 +1075,7 @@ class Instance(BuiltInstance):
context, instance_id, module.id, module.md5)
def get_flavor(self):
- client = create_nova_client(self.context)
- return client.flavors.get(self.flavor_id)
+ return self.nova_client.flavors.get(self.flavor_id)
def get_default_configuration_template(self):
flavor = self.get_flavor()
@@ -1036,13 +1101,12 @@ class Instance(BuiltInstance):
raise exception.BadRequest(_("The new flavor id must be different "
"than the current flavor id of '%s'.")
% self.flavor_id)
- client = create_nova_client(self.context)
try:
- new_flavor = client.flavors.get(new_flavor_id)
+ new_flavor = self.nova_client.flavors.get(new_flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=new_flavor_id)
- old_flavor = client.flavors.get(self.flavor_id)
+ old_flavor = self.nova_client.flavors.get(self.flavor_id)
if self.volume_support:
if new_flavor.ephemeral != 0:
raise exception.LocalStorageNotSupported()
@@ -1322,8 +1386,8 @@ class Instances(object):
@staticmethod
def load(context, include_clustered, instance_ids=None):
- def load_simple_instance(context, db, status, **kwargs):
- return SimpleInstance(context, db, status)
+ def load_simple_instance(context, db_info, status, **kwargs):
+ return SimpleInstance(context, db_info, status)
if context is None:
raise TypeError("Argument context not defined.")
@@ -1359,9 +1423,18 @@ class Instances(object):
def load_all_by_cluster_id(context, cluster_id, load_servers=True):
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False)
- return [load_any_instance(context, db_inst.id,
- load_server=load_servers)
- for db_inst in db_instances]
+ db_insts = []
+ for db_instance in db_instances:
+ try:
+ db_inst = load_any_instance(
+ context, db_instance.id, load_server=load_servers)
+ db_insts.append(db_inst)
+ except exception.NotFound:
+ # The instance may be gone if we're in the middle of a
+ # shrink operation, so just log and continue
+ LOG.debug("Instance %s is no longer available, skipping." %
+ db_instance.id)
+ return db_insts
@staticmethod
def _load_servers_status(load_instance, context, db_items, find_server):
@@ -1375,7 +1448,14 @@ class Instances(object):
db.addresses = {}
else:
try:
- server = find_server(db.id, db.compute_instance_id)
+ if (not db.region_id
+ or db.region_id == CONF.os_region_name):
+ server = find_server(db.id, db.compute_instance_id)
+ else:
+ nova_client = create_nova_client(
+ context, region_name=db.region_id)
+ server = nova_client.servers.get(
+ db.compute_instance_id)
db.server_status = server.status
db.addresses = server.addresses
except exception.ComputeInstanceNotFound:
@@ -1402,13 +1482,12 @@ class Instances(object):
class DBInstance(dbmodels.DatabaseModelBase):
- """Defines the task being executed plus the start time."""
_data_fields = ['name', 'created', 'compute_instance_id',
'task_id', 'task_description', 'task_start_time',
'volume_id', 'deleted', 'tenant_id',
'datastore_version_id', 'configuration_id', 'slave_of_id',
- 'cluster_id', 'shard_id', 'type']
+ 'cluster_id', 'shard_id', 'type', 'region_id']
def __init__(self, task_status, **kwargs):
"""
@@ -1455,7 +1534,7 @@ def persist_instance_fault(notification, event_qualifier):
save_instance_fault(instance_id, message, details)
-def save_instance_fault(instance_id, message, details):
+def save_instance_fault(instance_id, message, details, skip_delta=None):
if instance_id:
try:
# Make sure it's a valid id - sometimes the error is related
@@ -1465,8 +1544,19 @@ def save_instance_fault(instance_id, message, details):
det = utils.format_output(details)
try:
fault = DBInstanceFault.find_by(instance_id=instance_id)
- fault.set_info(msg, det)
- fault.save()
+ skip = False
+ # If we were passed in a skip_delta, only update the fault
+ # if the old one is at least skip_delta seconds in the past
+ if skip_delta:
+ skip_time = fault.updated + timedelta(seconds=skip_delta)
+ now = datetime.now()
+ skip = now < skip_time
+ if skip:
+ LOG.debug(
+ "Skipping fault message in favor of previous one")
+ else:
+ fault.set_info(msg, det)
+ fault.save()
except exception.ModelNotFoundError:
DBInstanceFault.create(
instance_id=instance_id,
diff --git a/trove/instance/service.py b/trove/instance/service.py
index 7c819da1..686e3e53 100644
--- a/trove/instance/service.py
+++ b/trove/instance/service.py
@@ -20,12 +20,14 @@ import webob.exc
from trove.backup.models import Backup as backup_model
from trove.backup import views as backup_views
import trove.common.apischema as apischema
+from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common.i18n import _LI
from trove.common import notification
from trove.common.notification import StartNotification
from trove.common import pagination
+from trove.common import policy
from trove.common.remote import create_guest_client
from trove.common import utils
from trove.common import wsgi
@@ -37,6 +39,7 @@ from trove.module import models as module_models
from trove.module import views as module_views
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -46,6 +49,11 @@ class InstanceController(wsgi.Controller):
schemas = apischema.instance.copy()
@classmethod
+ def authorize_instance_action(cls, context, instance_rule_name, instance):
+ policy.authorize_on_target(context, 'instance:%s' % instance_rule_name,
+ {'tenant': instance.tenant_id})
+
+ @classmethod
def get_action_schema(cls, body, action_schema):
action_type = list(body.keys())[0]
action_schema = action_schema.get(action_type, {})
@@ -104,6 +112,7 @@ class InstanceController(wsgi.Controller):
def _action_restart(self, context, req, instance, body):
context.notification = notification.DBaaSInstanceRestart(context,
request=req)
+ self.authorize_instance_action(context, 'restart', instance)
with StartNotification(context, instance_id=instance.id):
instance.restart()
return wsgi.Result(None, 202)
@@ -134,6 +143,8 @@ class InstanceController(wsgi.Controller):
def _action_resize_volume(self, context, req, instance, volume):
context.notification = notification.DBaaSInstanceResizeVolume(
context, request=req)
+ self.authorize_instance_action(context, 'resize_volume', instance)
+
with StartNotification(context, instance_id=instance.id,
new_size=volume['size']):
instance.resize_volume(volume['size'])
@@ -142,6 +153,8 @@ class InstanceController(wsgi.Controller):
def _action_resize_flavor(self, context, req, instance, flavorRef):
context.notification = notification.DBaaSInstanceResizeInstance(
context, request=req)
+ self.authorize_instance_action(context, 'resize_flavor', instance)
+
new_flavor_id = utils.get_id_from_href(flavorRef)
with StartNotification(context, instance_id=instance.id,
new_flavor_id=new_flavor_id):
@@ -152,6 +165,8 @@ class InstanceController(wsgi.Controller):
raise webob.exc.HTTPNotImplemented()
def _action_promote_to_replica_source(self, context, req, instance, body):
+ self.authorize_instance_action(
+ context, 'promote_to_replica_source', instance)
context.notification = notification.DBaaSInstanceEject(context,
request=req)
with StartNotification(context, instance_id=instance.id):
@@ -159,6 +174,8 @@ class InstanceController(wsgi.Controller):
return wsgi.Result(None, 202)
def _action_eject_replica_source(self, context, req, instance, body):
+ self.authorize_instance_action(
+ context, 'eject_replica_source', instance)
context.notification = notification.DBaaSInstancePromote(context,
request=req)
with StartNotification(context, instance_id=instance.id):
@@ -166,6 +183,11 @@ class InstanceController(wsgi.Controller):
return wsgi.Result(None, 202)
def _action_reset_status(self, context, req, instance, body):
+ if 'force_delete' in body['reset_status']:
+ self.authorize_instance_action(context, 'force_delete', instance)
+ else:
+ self.authorize_instance_action(
+ context, 'reset_status', instance)
context.notification = notification.DBaaSInstanceResetStatus(
context, request=req)
with StartNotification(context, instance_id=instance.id):
@@ -181,6 +203,7 @@ class InstanceController(wsgi.Controller):
LOG.info(_LI("Listing database instances for tenant '%s'"), tenant_id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'instance:index')
clustered_q = req.GET.get('include_clustered', '').lower()
include_clustered = clustered_q == 'true'
servers, marker = models.Instances.load(context, include_clustered)
@@ -195,6 +218,10 @@ class InstanceController(wsgi.Controller):
id)
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
+
+ instance = models.Instance.load(context, id)
+ self.authorize_instance_action(context, 'backups', instance)
+
backups, marker = backup_model.list_for_instance(context, id)
view = backup_views.BackupViews(backups)
paged = pagination.SimplePaginatedDataView(req.url, 'backups', view,
@@ -211,6 +238,7 @@ class InstanceController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
server = models.load_instance_with_info(models.DetailInstance,
context, id)
+ self.authorize_instance_action(context, 'show', server)
return wsgi.Result(views.InstanceDetailView(server,
req=req).data(), 200)
@@ -222,6 +250,7 @@ class InstanceController(wsgi.Controller):
LOG.debug("req : '%s'\n\n", req)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.load_any_instance(context, id)
+ self.authorize_instance_action(context, 'delete', instance)
context.notification = notification.DBaaSInstanceDelete(
context, request=req)
with StartNotification(context, instance_id=instance.id):
@@ -245,6 +274,7 @@ class InstanceController(wsgi.Controller):
LOG.debug("req : '%s'\n\n", strutils.mask_password(req))
LOG.debug("body : '%s'\n\n", strutils.mask_password(body))
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'instance:create')
context.notification = notification.DBaaSInstanceCreate(context,
request=req)
datastore_args = body['instance'].get('datastore', {})
@@ -266,6 +296,25 @@ class InstanceController(wsgi.Controller):
except ValueError as ve:
raise exception.BadRequest(msg=ve)
+ modules = body['instance'].get('modules')
+
+ # The following operations have their own API calls.
+ # We need to make sure the same policies are enforced when
+ # creating an instance.
+ # i.e. if attaching configuration group to an existing instance is not
+ # allowed, it should not be possible to create a new instance with the
+ # group attached either
+ if configuration:
+ policy.authorize_on_tenant(context, 'instance:update')
+ if modules:
+ policy.authorize_on_tenant(context, 'instance:module_apply')
+ if users:
+ policy.authorize_on_tenant(
+ context, 'instance:extension:user:create')
+ if databases:
+ policy.authorize_on_tenant(
+ context, 'instance:extension:database:create')
+
if 'volume' in body['instance']:
volume_info = body['instance']['volume']
volume_size = int(volume_info['size'])
@@ -287,7 +336,6 @@ class InstanceController(wsgi.Controller):
# also check for older name
body['instance'].get('slave_of'))
replica_count = body['instance'].get('replica_count')
- modules = body['instance'].get('modules')
locality = body['instance'].get('locality')
if locality:
locality_domain = ['affinity', 'anti-affinity']
@@ -302,6 +350,7 @@ class InstanceController(wsgi.Controller):
'Cannot specify locality when adding replicas to existing '
'master.')
raise exception.BadRequest(msg=dupe_locality_msg)
+ region_name = body['instance'].get('region_name', CONF.os_region_name)
instance = models.Instance.create(context, name, flavor_id,
image_id, databases, users,
@@ -312,7 +361,8 @@ class InstanceController(wsgi.Controller):
replica_count=replica_count,
volume_type=volume_type,
modules=modules,
- locality=locality)
+ locality=locality,
+ region_name=region_name)
view = views.InstanceDetailView(instance, req=req)
return wsgi.Result(view.data(), 200)
@@ -367,6 +417,7 @@ class InstanceController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
+ self.authorize_instance_action(context, 'update', instance)
# Make sure args contains a 'configuration_id' argument,
args = {}
@@ -384,6 +435,7 @@ class InstanceController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
+ self.authorize_instance_action(context, 'edit', instance)
args = {}
args['detach_replica'] = ('replica_of' in body['instance'] or
@@ -407,6 +459,8 @@ class InstanceController(wsgi.Controller):
LOG.info(_LI("Getting default configuration for instance %s"), id)
context = req.environ[wsgi.CONTEXT_KEY]
instance = models.Instance.load(context, id)
+ self.authorize_instance_action(context, 'configuration', instance)
+
LOG.debug("Server: %s", instance)
config = instance.get_default_configuration_template()
LOG.debug("Default config for instance %(instance_id)s is %(config)s",
@@ -421,6 +475,7 @@ class InstanceController(wsgi.Controller):
instance = models.Instance.load(context, id)
if not instance:
raise exception.NotFound(uuid=id)
+ self.authorize_instance_action(context, 'guest_log_list', instance)
client = create_guest_client(context, id)
guest_log_list = client.guest_log_list()
return wsgi.Result({'logs': guest_log_list}, 200)
@@ -450,6 +505,7 @@ class InstanceController(wsgi.Controller):
instance = models.Instance.load(context, id)
if not instance:
raise exception.NotFound(uuid=id)
+ self.authorize_instance_action(context, 'module_list', instance)
from_guest = bool(req.GET.get('from_guest', '').lower())
include_contents = bool(req.GET.get('include_contents', '').lower())
if from_guest:
@@ -477,6 +533,7 @@ class InstanceController(wsgi.Controller):
instance = models.Instance.load(context, id)
if not instance:
raise exception.NotFound(uuid=id)
+ self.authorize_instance_action(context, 'module_apply', instance)
module_ids = [mod['id'] for mod in body.get('modules', [])]
modules = module_models.Modules.load_by_ids(context, module_ids)
module_list = []
@@ -497,12 +554,15 @@ class InstanceController(wsgi.Controller):
instance = models.Instance.load(context, id)
if not instance:
raise exception.NotFound(uuid=id)
+ self.authorize_instance_action(context, 'module_remove', instance)
module = module_models.Module.load(context, module_id)
module_info = module_views.DetailedModuleView(module).data()
client = create_guest_client(context, id)
client.module_remove(module_info)
- instance_module = module_models.InstanceModule.load(
+ instance_modules = module_models.InstanceModules.load_all(
context, instance_id=id, module_id=module_id)
- if instance_module:
+ for instance_module in instance_modules:
module_models.InstanceModule.delete(context, instance_module)
+ LOG.debug("Deleted IM record %s (instance %s, module %s)." %
+ (instance_module.id, id, module_id))
return wsgi.Result(None, 200)
diff --git a/trove/instance/views.py b/trove/instance/views.py
index cb383dc5..6721ec10 100644
--- a/trove/instance/views.py
+++ b/trove/instance/views.py
@@ -16,6 +16,7 @@
from oslo_log import log as logging
from trove.common.views import create_links
+from trove.common import wsgi
from trove.instance import models
LOG = logging.getLogger(__name__)
@@ -27,6 +28,7 @@ class InstanceView(object):
def __init__(self, instance, req=None):
self.instance = instance
self.req = req
+ self.context = req.environ[wsgi.CONTEXT_KEY]
def data(self):
instance_dict = {
@@ -37,6 +39,7 @@ class InstanceView(object):
"flavor": self._build_flavor_info(),
"datastore": {"type": self.instance.datastore.name,
"version": self.instance.datastore_version.name},
+ "region": self.instance.region_name
}
if self.instance.volume_support:
instance_dict['volume'] = {'size': self.instance.volume_size}
@@ -121,6 +124,10 @@ class InstanceDetailView(InstanceView):
if self.instance.shard_id:
result['instance']['shard_id'] = self.instance.shard_id
+ if self.context.is_admin:
+ result['instance']['server_id'] = self.instance.server_id
+ result['instance']['volume_id'] = self.instance.volume_id
+
return result
def _build_fault_info(self):
diff --git a/trove/limits/service.py b/trove/limits/service.py
index 28d3ea66..5200b79f 100644
--- a/trove/limits/service.py
+++ b/trove/limits/service.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from trove.common import policy
from trove.common import wsgi
from trove.limits import views
from trove.quota.quota import QUOTAS
@@ -27,6 +28,8 @@ class LimitsController(wsgi.Controller):
"""
Return all absolute and rate limit information.
"""
+ context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'limits:index')
quotas = QUOTAS.get_all_quotas_by_tenant(tenant_id)
abs_limits = {k: v['hard_limit'] for k, v in quotas.items()}
rate_limits = req.environ.get("trove.limits", [])
diff --git a/trove/module/models.py b/trove/module/models.py
index 7c7af87a..19cfb0f3 100644
--- a/trove/module/models.py
+++ b/trove/module/models.py
@@ -21,6 +21,8 @@ import hashlib
import six
from sqlalchemy.sql.expression import or_
+from oslo_log import log as logging
+
from trove.common import cfg
from trove.common import crypto_utils
from trove.common import exception
@@ -29,8 +31,6 @@ from trove.common import utils
from trove.datastore import models as datastore_models
from trove.db import models
-from oslo_log import log as logging
-
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -319,14 +319,8 @@ class InstanceModules(object):
@staticmethod
def load(context, instance_id=None, module_id=None, md5=None):
- selection = {'deleted': False}
- if instance_id:
- selection['instance_id'] = instance_id
- if module_id:
- selection['module_id'] = module_id
- if md5:
- selection['md5'] = md5
- db_info = DBInstanceModule.find_all(**selection)
+ db_info = InstanceModules.load_all(
+ context, instance_id=instance_id, module_id=module_id, md5=md5)
if db_info.count() == 0:
LOG.debug("No instance module records found")
@@ -337,6 +331,17 @@ class InstanceModules(object):
next_marker = data_view.next_page_marker
return data_view.collection, next_marker
+ @staticmethod
+ def load_all(context, instance_id=None, module_id=None, md5=None):
+ query_opts = {'deleted': False}
+ if instance_id:
+ query_opts['instance_id'] = instance_id
+ if module_id:
+ query_opts['module_id'] = module_id
+ if md5:
+ query_opts['md5'] = md5
+ return DBInstanceModule.find_all(**query_opts)
+
class InstanceModule(object):
@@ -347,10 +352,33 @@ class InstanceModule(object):
@staticmethod
def create(context, instance_id, module_id, md5):
- instance_module = DBInstanceModule.create(
- instance_id=instance_id,
- module_id=module_id,
- md5=md5)
+ instance_module = None
+ # First mark any 'old' records as deleted and/or update the
+ # current one.
+ old_ims = InstanceModules.load_all(
+ context, instance_id=instance_id, module_id=module_id)
+ for old_im in old_ims:
+ if old_im.md5 == md5 and not instance_module:
+ instance_module = old_im
+ InstanceModule.update(context, instance_module)
+ else:
+ if old_im.md5 == md5 and instance_module:
+ LOG.debug("Found dupe IM record %s; marking as deleted "
+ "(instance %s, module %s)." %
+ (old_im.id, instance_id, module_id))
+ else:
+ LOG.debug("Deleting IM record %s (instance %s, "
+ "module %s)." %
+ (old_im.id, instance_id, module_id))
+ InstanceModule.delete(context, old_im)
+
+ # If we don't have an instance module, it means we need to create
+ # a new one.
+ if not instance_module:
+ instance_module = DBInstanceModule.create(
+ instance_id=instance_id,
+ module_id=module_id,
+ md5=md5)
return instance_module
@staticmethod
diff --git a/trove/module/service.py b/trove/module/service.py
index 555fee5d..c6b08e1c 100644
--- a/trove/module/service.py
+++ b/trove/module/service.py
@@ -22,6 +22,7 @@ import trove.common.apischema as apischema
from trove.common import exception
from trove.common.i18n import _
from trove.common import pagination
+from trove.common import policy
from trove.common import wsgi
from trove.datastore import models as datastore_models
from trove.instance import models as instance_models
@@ -37,8 +38,20 @@ class ModuleController(wsgi.Controller):
schemas = apischema.module
+ @classmethod
+ def authorize_module_action(cls, context, module_rule_name, module):
+ """If a modules in not owned by any particular tenant just check
+ the current tenant is allowed to perform the action.
+ """
+ if module.tenant_id is not None:
+ policy.authorize_on_target(context, 'module:%s' % module_rule_name,
+ {'tenant': module.tenant_id})
+ else:
+ policy.authorize_on_tenant(context, 'module:%s' % module_rule_name)
+
def index(self, req, tenant_id):
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'module:index')
datastore = req.GET.get('datastore', '')
if datastore and datastore.lower() != models.Modules.MATCH_ALL_NAME:
ds, ds_ver = datastore_models.get_datastore_version(
@@ -53,6 +66,7 @@ class ModuleController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
module = models.Module.load(context, id)
+ self.authorize_module_action(context, 'show', module)
module.instance_count = len(models.InstanceModules.load(
context, module_id=module.id, md5=module.md5))
@@ -65,6 +79,7 @@ class ModuleController(wsgi.Controller):
LOG.info(_("Creating module '%s'") % name)
context = req.environ[wsgi.CONTEXT_KEY]
+ policy.authorize_on_tenant(context, 'module:create')
module_type = body['module']['module_type']
contents = body['module']['contents']
@@ -89,6 +104,7 @@ class ModuleController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
module = models.Module.load(context, id)
+ self.authorize_module_action(context, 'delete', module)
models.Module.delete(context, module)
return wsgi.Result(None, 200)
@@ -97,6 +113,7 @@ class ModuleController(wsgi.Controller):
context = req.environ[wsgi.CONTEXT_KEY]
module = models.Module.load(context, id)
+ self.authorize_module_action(context, 'update', module)
original_module = copy.deepcopy(module)
if 'name' in body['module']:
module.name = body['module']['name']
@@ -146,6 +163,10 @@ class ModuleController(wsgi.Controller):
LOG.info(_("Getting instances for module %s") % id)
context = req.environ[wsgi.CONTEXT_KEY]
+
+ module = models.Module.load(context, id)
+ self.authorize_module_action(context, 'instances', module)
+
instance_modules, marker = models.InstanceModules.load(
context, module_id=id)
if instance_modules:
diff --git a/trove/network/neutron.py b/trove/network/neutron.py
index 6f8e966d..c8c60d9c 100644
--- a/trove/network/neutron.py
+++ b/trove/network/neutron.py
@@ -41,9 +41,9 @@ class NovaNetworkStruct(object):
class NeutronDriver(base.NetworkDriver):
- def __init__(self, context):
+ def __init__(self, context, region_name):
try:
- self.client = remote.create_neutron_client(context)
+ self.client = remote.create_neutron_client(context, region_name)
except neutron_exceptions.NeutronClientException as e:
raise exception.TroveError(str(e))
diff --git a/trove/network/nova.py b/trove/network/nova.py
index 5f0623c5..a66a8be4 100644
--- a/trove/network/nova.py
+++ b/trove/network/nova.py
@@ -27,10 +27,10 @@ LOG = logging.getLogger(__name__)
class NovaNetwork(base.NetworkDriver):
- def __init__(self, context):
+ def __init__(self, context, region_name):
try:
self.client = remote.create_nova_client(
- context)
+ context, region_name)
except nova_exceptions.ClientException as e:
raise exception.TroveError(str(e))
diff --git a/trove/taskmanager/api.py b/trove/taskmanager/api.py
index 881574a2..1c1b01aa 100644
--- a/trove/taskmanager/api.py
+++ b/trove/taskmanager/api.py
@@ -24,7 +24,6 @@ import oslo_messaging as messaging
from trove.common import cfg
from trove.common import exception
from trove.common.notification import NotificationCastWrapper
-import trove.common.rpc.version as rpc_version
from trove.common.strategies.cluster import strategy
from trove.guestagent import models as agent_models
from trove import rpc
@@ -34,18 +33,42 @@ LOG = logging.getLogger(__name__)
class API(object):
- """API for interacting with the task manager."""
+ """API for interacting with the task manager.
+
+ API version history:
+ * 1.0 - Initial version.
+
+ When updating this API, also update API_LATEST_VERSION
+ """
+
+ # API_LATEST_VERSION should bump the minor number each time
+ # a method signature is added or changed
+ API_LATEST_VERSION = '1.0'
+
+ # API_BASE_VERSION should only change on major version upgrade
+ API_BASE_VERSION = '1.0'
+
+ VERSION_ALIASES = {
+ 'icehouse': '1.0',
+ 'juno': '1.0',
+ 'kilo': '1.0',
+ 'liberty': '1.0',
+ 'mitaka': '1.0',
+ 'newton': '1.0',
+
+ 'latest': API_LATEST_VERSION
+ }
def __init__(self, context):
self.context = context
super(API, self).__init__()
+ version_cap = self.VERSION_ALIASES.get(
+ CONF.upgrade_levels.taskmanager, CONF.upgrade_levels.taskmanager)
target = messaging.Target(topic=CONF.taskmanager_queue,
- version=rpc_version.RPC_API_VERSION)
+ version=version_cap)
- self.version_cap = rpc_version.VERSION_ALIASES.get(
- CONF.upgrade_levels.taskmanager)
- self.client = self.get_client(target, self.version_cap)
+ self.client = self.get_client(target, version_cap)
def _cast(self, method_name, version, **kwargs):
LOG.debug("Casting %s" % method_name)
@@ -79,72 +102,83 @@ class API(object):
def resize_volume(self, new_size, instance_id):
LOG.debug("Making async call to resize volume for instance: %s"
% instance_id)
+ version = self.API_BASE_VERSION
- self._cast("resize_volume", self.version_cap,
+ self._cast("resize_volume", version=version,
new_size=new_size,
instance_id=instance_id)
def resize_flavor(self, instance_id, old_flavor, new_flavor):
LOG.debug("Making async call to resize flavor for instance: %s" %
instance_id)
+ version = self.API_BASE_VERSION
- self._cast("resize_flavor", self.version_cap,
+ self._cast("resize_flavor", version=version,
instance_id=instance_id,
old_flavor=self._transform_obj(old_flavor),
new_flavor=self._transform_obj(new_flavor))
def reboot(self, instance_id):
LOG.debug("Making async call to reboot instance: %s" % instance_id)
+ version = self.API_BASE_VERSION
- self._cast("reboot", self.version_cap, instance_id=instance_id)
+ self._cast("reboot", version=version, instance_id=instance_id)
def restart(self, instance_id):
LOG.debug("Making async call to restart instance: %s" % instance_id)
+ version = self.API_BASE_VERSION
- self._cast("restart", self.version_cap, instance_id=instance_id)
+ self._cast("restart", version=version, instance_id=instance_id)
def detach_replica(self, instance_id):
LOG.debug("Making async call to detach replica: %s" % instance_id)
+ version = self.API_BASE_VERSION
- self._cast("detach_replica", self.version_cap,
+ self._cast("detach_replica", version=version,
instance_id=instance_id)
def promote_to_replica_source(self, instance_id):
LOG.debug("Making async call to promote replica to source: %s" %
instance_id)
- self._cast("promote_to_replica_source", self.version_cap,
+ version = self.API_BASE_VERSION
+ self._cast("promote_to_replica_source", version=version,
instance_id=instance_id)
def eject_replica_source(self, instance_id):
LOG.debug("Making async call to eject replica source: %s" %
instance_id)
- self._cast("eject_replica_source", self.version_cap,
+ version = self.API_BASE_VERSION
+ self._cast("eject_replica_source", version=version,
instance_id=instance_id)
def migrate(self, instance_id, host):
LOG.debug("Making async call to migrate instance: %s" % instance_id)
+ version = self.API_BASE_VERSION
- self._cast("migrate", self.version_cap,
+ self._cast("migrate", version=version,
instance_id=instance_id, host=host)
def delete_instance(self, instance_id):
LOG.debug("Making async call to delete instance: %s" % instance_id)
+ version = self.API_BASE_VERSION
- self._cast("delete_instance", self.version_cap,
+ self._cast("delete_instance", version=version,
instance_id=instance_id)
def create_backup(self, backup_info, instance_id):
LOG.debug("Making async call to create a backup for instance: %s" %
instance_id)
+ version = self.API_BASE_VERSION
- self._cast("create_backup", self.version_cap,
+ self._cast("create_backup", version=version,
backup_info=backup_info,
instance_id=instance_id)
def delete_backup(self, backup_id):
LOG.debug("Making async call to delete backup: %s" % backup_id)
+ version = self.API_BASE_VERSION
- self._cast("delete_backup", self.version_cap, backup_id=backup_id)
+ self._cast("delete_backup", version=version, backup_id=backup_id)
def create_instance(self, instance_id, name, flavor,
image_id, databases, users, datastore_manager,
@@ -155,7 +189,8 @@ class API(object):
modules=None, locality=None):
LOG.debug("Making async call to create instance %s " % instance_id)
- self._cast("create_instance", self.version_cap,
+ version = self.API_BASE_VERSION
+ self._cast("create_instance", version=version,
instance_id=instance_id, name=name,
flavor=self._transform_obj(flavor),
image_id=image_id,
@@ -176,33 +211,38 @@ class API(object):
def create_cluster(self, cluster_id):
LOG.debug("Making async call to create cluster %s " % cluster_id)
+ version = self.API_BASE_VERSION
- self._cast("create_cluster", self.version_cap, cluster_id=cluster_id)
+ self._cast("create_cluster", version=version, cluster_id=cluster_id)
def grow_cluster(self, cluster_id, new_instance_ids):
LOG.debug("Making async call to grow cluster %s " % cluster_id)
+ version = self.API_BASE_VERSION
- cctxt = self.client.prepare(version=self.version_cap)
+ cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, "grow_cluster",
cluster_id=cluster_id, new_instance_ids=new_instance_ids)
def shrink_cluster(self, cluster_id, instance_ids):
LOG.debug("Making async call to shrink cluster %s " % cluster_id)
+ version = self.API_BASE_VERSION
- cctxt = self.client.prepare(version=self.version_cap)
+ cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, "shrink_cluster",
cluster_id=cluster_id, instance_ids=instance_ids)
def delete_cluster(self, cluster_id):
LOG.debug("Making async call to delete cluster %s " % cluster_id)
+ version = self.API_BASE_VERSION
- self._cast("delete_cluster", self.version_cap, cluster_id=cluster_id)
+ self._cast("delete_cluster", version=version, cluster_id=cluster_id)
def upgrade(self, instance_id, datastore_version_id):
LOG.debug("Making async call to upgrade guest to datastore "
"version %s " % datastore_version_id)
+ version = self.API_BASE_VERSION
- cctxt = self.client.prepare(version=self.version_cap)
+ cctxt = self.client.prepare(version=version)
cctxt.cast(self.context, "upgrade", instance_id=instance_id,
datastore_version_id=datastore_version_id)
diff --git a/trove/taskmanager/manager.py b/trove/taskmanager/manager.py
index a70872b7..4e2555d2 100644
--- a/trove/taskmanager/manager.py
+++ b/trove/taskmanager/manager.py
@@ -14,7 +14,6 @@
# under the License.
from oslo_log import log as logging
-import oslo_messaging as messaging
from oslo_service import periodic_task
from oslo_utils import importutils
@@ -27,7 +26,6 @@ from trove.common.exception import TroveError
from trove.common.i18n import _
from trove.common.notification import DBaaSQuotas, EndNotification
from trove.common import remote
-import trove.common.rpc.version as rpc_version
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import strategy
from trove.datastore.models import DatastoreVersion
@@ -43,8 +41,6 @@ CONF = cfg.CONF
class Manager(periodic_task.PeriodicTasks):
- target = messaging.Target(version=rpc_version.RPC_API_VERSION)
-
def __init__(self):
super(Manager, self).__init__(CONF)
self.admin_context = TroveContext(
@@ -428,13 +424,14 @@ class Manager(periodic_task.PeriodicTasks):
mgmtmodels.publish_exist_events(self.exists_transformer,
self.admin_context)
- @periodic_task.periodic_task(spacing=CONF.quota_notification_interval)
- def publish_quota_notifications(self, context):
- nova_client = remote.create_nova_client(self.admin_context)
- for tenant in nova_client.tenants.list():
- for quota in QUOTAS.get_all_quotas_by_tenant(tenant.id):
- usage = QUOTAS.get_quota_usage(quota)
- DBaaSQuotas(self.admin_context, quota, usage).notify()
+ if CONF.quota_notification_interval:
+ @periodic_task.periodic_task(spacing=CONF.quota_notification_interval)
+ def publish_quota_notifications(self, context):
+ nova_client = remote.create_nova_client(self.admin_context)
+ for tenant in nova_client.tenants.list():
+ for quota in QUOTAS.get_all_quotas_by_tenant(tenant.id):
+ usage = QUOTAS.get_quota_usage(quota)
+ DBaaSQuotas(self.admin_context, quota, usage).notify()
def __getattr__(self, name):
"""
diff --git a/trove/taskmanager/models.py b/trove/taskmanager/models.py
index ed62b90f..48199618 100755
--- a/trove/taskmanager/models.py
+++ b/trove/taskmanager/models.py
@@ -364,7 +364,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
finally:
if error_message:
inst_models.save_instance_fault(
- self.id, error_message, error_details)
+ self.id, error_message, error_details,
+ skip_delta=USAGE_SLEEP_TIME + 1)
def create_instance(self, flavor, image_id, databases, users,
datastore_manager, packages, volume_size,
@@ -565,7 +566,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
LOG.error(msg_create)
# Make sure we log any unexpected errors from the create
if not isinstance(e_create, TroveError):
- LOG.error(e_create)
+ LOG.exception(e_create)
msg_delete = (
_("An error occurred while deleting a bad "
"replication snapshot from instance %(source)s.") %
@@ -633,10 +634,12 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
server_status = server.status
if server_status in [InstanceStatus.ERROR,
InstanceStatus.FAILED]:
- server_message = ''
- if server.fault:
- server_message = "\nServer error: %s" % (
- server.fault.get('message', 'Unknown'))
+ server_fault_message = 'No fault found'
+ try:
+ server_fault_message = server.fault.get('message', 'Unknown')
+ except AttributeError:
+ pass
+ server_message = "\nServer error: %s" % server_fault_message
raise TroveError(_("Server not active, status: %(status)s"
"%(srv_msg)s") %
{'status': server_status,
@@ -866,7 +869,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
def _create_volume(self, volume_size, volume_type, datastore_manager):
LOG.debug("Begin _create_volume for id: %s" % self.id)
- volume_client = create_cinder_client(self.context)
+ volume_client = create_cinder_client(self.context, self.region_name)
volume_desc = ("datastore volume for %s" % self.id)
volume_ref = volume_client.volumes.create(
volume_size, name="datastore-%s" % self.id,
@@ -1009,7 +1012,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
def _create_secgroup(self, datastore_manager):
security_group = SecurityGroup.create_for_instance(
- self.id, self.context)
+ self.id, self.context, self.region_name)
tcp_ports = CONF.get(datastore_manager).tcp_ports
udp_ports = CONF.get(datastore_manager).udp_ports
icmp = CONF.get(datastore_manager).icmp
@@ -1037,7 +1040,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
if protocol == 'icmp':
SecurityGroupRule.create_sec_group_rule(
s_group, 'icmp', None, None,
- cidr, self.context)
+ cidr, self.context, self.region_name)
else:
for port_or_range in set(ports):
try:
@@ -1045,7 +1048,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
from_, to_ = utils.gen_ports(port_or_range)
SecurityGroupRule.create_sec_group_rule(
s_group, protocol, int(from_), int(to_),
- cidr, self.context)
+ cidr, self.context, self.region_name)
except (ValueError, TroveError):
set_error_and_raise([from_, to_])
@@ -1143,7 +1146,8 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
# If volume has been resized it must be manually removed in cinder
try:
if self.volume_id:
- volume_client = create_cinder_client(self.context)
+ volume_client = create_cinder_client(self.context,
+ self.region_name)
volume = volume_client.volumes.get(self.volume_id)
if volume.status == "available":
LOG.info(_("Deleting volume %(v)s for instance: %(i)s.")
diff --git a/trove/tests/api/backups.py b/trove/tests/api/backups.py
index 36897fce..ddb7dc14 100644
--- a/trove/tests/api/backups.py
+++ b/trove/tests/api/backups.py
@@ -26,7 +26,6 @@ from trove.common import exception
from trove.common.utils import generate_uuid
from trove.common.utils import poll_until
from trove import tests
-from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
@@ -57,30 +56,6 @@ backup_count_for_instance_prior_to_create = 0
class CreateBackups(object):
@test
- def test_backup_create_instance_invalid(self):
- """Test create backup with unknown instance."""
- invalid_inst_id = 'invalid-inst-id'
- try:
- instance_info.dbaas.backups.create(BACKUP_NAME, invalid_inst_id,
- BACKUP_DESC)
- except exceptions.BadRequest as e:
- resp, body = instance_info.dbaas.client.last_response
- assert_equal(resp.status, 400)
- assert_equal(e.message,
- "Validation error: "
- "backup['instance'] u'%s' does not match "
- "'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-"
- "([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-"
- "([0-9a-fA-F]){12}$'" %
- invalid_inst_id)
-
- @test
- def test_backup_create_instance_not_found(self):
- """Test create backup with unknown instance."""
- assert_raises(exceptions.NotFound, instance_info.dbaas.backups.create,
- BACKUP_NAME, generate_uuid(), BACKUP_DESC)
-
- @test
def test_backup_create_instance(self):
"""Test create backup for a given instance."""
# Necessary to test that the count increases.
@@ -89,6 +64,9 @@ class CreateBackups(object):
global backup_count_for_instance_prior_to_create
backup_count_for_instance_prior_to_create = len(
instance_info.dbaas.instances.backups(instance_info.id))
+ datastore_version = instance_info.dbaas.datastore_versions.get(
+ instance_info.dbaas_datastore,
+ instance_info.dbaas_datastore_version)
result = instance_info.dbaas.backups.create(BACKUP_NAME,
instance_info.id,
@@ -101,10 +79,6 @@ class CreateBackups(object):
assert_equal('NEW', result.status)
instance = instance_info.dbaas.instances.get(instance_info.id)
- datastore_version = instance_info.dbaas.datastore_versions.get(
- instance_info.dbaas_datastore,
- instance_info.dbaas_datastore_version)
-
assert_equal('BACKUP', instance.status)
assert_equal(instance_info.dbaas_datastore,
result.datastore['type'])
@@ -113,37 +87,6 @@ class CreateBackups(object):
assert_equal(datastore_version.id, result.datastore['version_id'])
-@test(runs_after=[CreateBackups],
- groups=[GROUP, tests.INSTANCES])
-class AfterBackupCreation(object):
-
- @test
- def test_restore_instance_from_not_completed_backup(self):
- assert_raises(exceptions.Conflict,
- RestoreUsingBackup._restore, backup_info.id)
- assert_equal(409, instance_info.dbaas.last_http_code)
-
- @test
- def test_instance_action_right_after_backup_create(self):
- """Test any instance action while backup is running."""
- assert_unprocessable(instance_info.dbaas.instances.resize_instance,
- instance_info.id, 1)
-
- @test
- def test_backup_create_another_backup_running(self):
- """Test create backup when another backup is running."""
- assert_unprocessable(instance_info.dbaas.backups.create,
- 'backup_test2', instance_info.id,
- 'test description2')
-
- @test
- def test_backup_delete_still_running(self):
- """Test delete backup when it is running."""
- result = instance_info.dbaas.backups.list()
- backup = result[0]
- assert_unprocessable(instance_info.dbaas.backups.delete, backup.id)
-
-
class BackupRestoreMixin(object):
def verify_backup(self, backup_id):
@@ -198,7 +141,7 @@ class BackupRestoreMixin(object):
time_out=TIMEOUT_INSTANCE_CREATE)
-@test(runs_after=[AfterBackupCreation],
+@test(runs_after=[CreateBackups],
groups=[GROUP, tests.INSTANCES])
class WaitForBackupCreateToFinish(BackupRestoreMixin):
"""
@@ -340,6 +283,7 @@ class RestoreUsingBackup(object):
instance_info.volume,
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version,
+ nics=instance_info.nics,
restorePoint=restorePoint)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal("BUILD", result.status)
@@ -532,6 +476,7 @@ class FakeTestHugeBackupOnSmallInstance(BackupRestoreMixin):
datastore=instance_info.dbaas_datastore,
datastore_version=(instance_info.
dbaas_datastore_version),
+ nics=instance_info.nics,
restorePoint={"backupRef": self.new_backup.id})
assert_equal(403, instance_info.dbaas.last_http_code)
@@ -549,6 +494,7 @@ class FakeTestHugeBackupOnSmallInstance(BackupRestoreMixin):
datastore=instance_info.dbaas_datastore,
datastore_version=(instance_info.
dbaas_datastore_version),
+ nics=instance_info.nics,
restorePoint={"backupRef": self.new_backup.id})
assert_equal(403, instance_info.dbaas.last_http_code)
diff --git a/trove/tests/api/configurations.py b/trove/tests/api/configurations.py
index 0382fe47..40a5023e 100644
--- a/trove/tests/api/configurations.py
+++ b/trove/tests/api/configurations.py
@@ -16,6 +16,7 @@
from datetime import datetime
import json
+import netaddr
from time import sleep
import uuid
@@ -24,6 +25,7 @@ from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
+from proboscis.asserts import fail
from proboscis import before_class
from proboscis.decorators import time_out
from proboscis import SkipTest
@@ -84,7 +86,11 @@ def _execute_query(host, user_name, password, query):
def _get_address(instance_id):
result = instance_info.dbaas_admin.mgmt.instances.show(instance_id)
- return result.ip[0]
+ try:
+ return next(str(ip) for ip in result.ip
+ if netaddr.valid_ipv4(ip))
+ except StopIteration:
+ fail("No IPV4 ip found")
def _test_configuration_is_applied_to_instance(instance, configuration_id):
diff --git a/trove/tests/api/flavors.py b/trove/tests/api/flavors.py
index f1886848..c5f34234 100644
--- a/trove/tests/api/flavors.py
+++ b/trove/tests/api/flavors.py
@@ -25,6 +25,7 @@ from proboscis import test
from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
+from trove.tests.config import CONFIG
from trove.tests.util.check import AttrCheck
from trove.tests.util import create_dbaas_client
from trove.tests.util import create_nova_client
@@ -215,6 +216,10 @@ class DatastoreFlavorAssociation(object):
self.name2 = "test_instance2"
self.volume = {'size': 2}
self.instance_id = None
+ self.nics = None
+ shared_network = CONFIG.get('shared_network', None)
+ if shared_network:
+ self.nics = [{'net-id': shared_network}]
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
@@ -222,7 +227,8 @@ class DatastoreFlavorAssociation(object):
# all the nova flavors are associated with the default datastore
result = self.rd_client.instances.create(
name=self.name1, flavor_id='1', volume=self.volume,
- datastore=self.datastore.id)
+ datastore=self.datastore.id,
+ nics=self.nics)
self.instance_id = result.id
assert_equal(200, self.rd_client.last_http_code)
@@ -255,4 +261,5 @@ class DatastoreFlavorAssociation(object):
assert_raises(exceptions.BadRequest,
self.rd_client.instances.create, self.name2,
flavor_not_associated, self.volume,
- datastore=self.datastore.id)
+ datastore=self.datastore.id,
+ nics=self.nics)
diff --git a/trove/tests/api/instances.py b/trove/tests/api/instances.py
index 6cacbb91..5dab9d35 100644
--- a/trove/tests/api/instances.py
+++ b/trove/tests/api/instances.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import netaddr
import os
import re
import time
@@ -86,7 +87,10 @@ class InstanceTestInfo(object):
self.id = None # The ID of the instance in the database.
self.local_id = None
self.address = None
- self.nics = None # The dict of type/id for nics used on the intance.
+ self.nics = None # The dict of type/id for nics used on the instance.
+ shared_network = CONFIG.get('shared_network', None)
+ if shared_network:
+ self.nics = [{'net-id': shared_network}]
self.initial_result = None # The initial result from the create call.
self.user_ip = None # The IP address of the instance, given to user.
self.infra_ip = None # The infrastructure network IP address.
@@ -126,9 +130,14 @@ class InstanceTestInfo(object):
def get_address(self):
result = self.dbaas_admin.mgmt.instances.show(self.id)
if not hasattr(result, 'hostname'):
- return result.ip[0]
+ try:
+ return next(str(ip) for ip in result.ip
+ if netaddr.valid_ipv4(ip))
+ except StopIteration:
+ fail("No IPV4 ip found")
else:
- return result.server['addresses']
+ return [str(ip) for ip in result.server['addresses']
+ if netaddr.valid_ipv4(ip)]
def get_local_id(self):
mgmt_instance = self.dbaas_admin.management.show(self.id)
@@ -257,7 +266,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
dbaas.instances.create,
self.test_info.name,
self.test_info.dbaas_flavor_href,
- self.test_info.volume)
+ self.test_info.volume,
+ nics=instance_info.nics)
def test_update_quota_invalid_resource_should_fail(self):
quota_dict = {'invalid_resource': 100}
@@ -297,7 +307,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
dbaas.instances.create,
self.test_info.name,
self.test_info.dbaas_flavor_href,
- self.test_info.volume)
+ self.test_info.volume,
+ nics=instance_info.nics)
assert_equal(413, dbaas.last_http_code)
@@ -316,7 +327,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
dbaas.instances.create,
self.test_info.name,
self.test_info.dbaas_flavor_href,
- self.test_info.volume)
+ self.test_info.volume,
+ nics=instance_info.nics)
assert_equal(413, dbaas.last_http_code)
@@ -358,7 +370,8 @@ class CreateInstanceFail(object):
result = dbaas.instances.create(instance_name,
instance_info.dbaas_flavor_href,
volume, databases,
- availability_zone="BAD_ZONE")
+ availability_zone="BAD_ZONE",
+ nics=instance_info.nics)
poll_until(self.instance_in_error(result.id))
instance = dbaas.instances.get(result.id)
@@ -395,7 +408,8 @@ class CreateInstanceFail(object):
volume = None
assert_raises(exceptions.BadRequest, dbaas.instances.create,
instance_name, '',
- volume, databases)
+ volume, databases,
+ nics=instance_info.nics)
assert_equal(400, dbaas.last_http_code)
@test(enabled=VOLUME_SUPPORT)
@@ -405,7 +419,8 @@ class CreateInstanceFail(object):
volume = {}
assert_raises(exceptions.BadRequest, dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
- volume, databases)
+ volume, databases,
+ nics=instance_info.nics)
assert_equal(400, dbaas.last_http_code)
@test(enabled=VOLUME_SUPPORT)
@@ -415,7 +430,8 @@ class CreateInstanceFail(object):
volume = {'size': None}
assert_raises(exceptions.BadRequest, dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
- volume, databases)
+ volume, databases,
+ nics=instance_info.nics)
assert_equal(400, dbaas.last_http_code)
@test(enabled=not VOLUME_SUPPORT)
@@ -425,7 +441,8 @@ class CreateInstanceFail(object):
volume = {'size': 2}
assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
- volume, databases)
+ volume, databases,
+ nics=instance_info.nics)
assert_equal(501, dbaas.last_http_code)
def test_create_failure_with_volume_size_and_disabled_for_datastore(self):
@@ -436,7 +453,8 @@ class CreateInstanceFail(object):
volume = {'size': 2}
assert_raises(exceptions.HTTPNotImplemented, dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
- volume, databases, datastore=datastore)
+ volume, databases, datastore=datastore,
+ nics=instance_info.nics)
assert_equal(501, dbaas.last_http_code)
@test(enabled=EPHEMERAL_SUPPORT)
@@ -446,7 +464,8 @@ class CreateInstanceFail(object):
flavor_name = CONFIG.values.get('instance_flavor_name', 'm1.tiny')
flavors = dbaas.find_flavors_by_name(flavor_name)
assert_raises(exceptions.BadRequest, dbaas.instances.create,
- instance_name, flavors[0].id, None, databases)
+ instance_name, flavors[0].id, None, databases,
+ nics=instance_info.nics)
assert_equal(400, dbaas.last_http_code)
@test
@@ -459,7 +478,8 @@ class CreateInstanceFail(object):
databases = []
assert_raises(exceptions.BadRequest, dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
- volume, databases)
+ volume, databases,
+ nics=instance_info.nics)
assert_equal(400, dbaas.last_http_code)
@test
@@ -472,7 +492,8 @@ class CreateInstanceFail(object):
databases = []
assert_raises(exceptions.BadRequest, dbaas.instances.create,
instance_name, instance_info.dbaas_flavor_href,
- volume, databases)
+ volume, databases,
+ nics=instance_info.nics)
assert_equal(400, dbaas.last_http_code)
@test
@@ -511,7 +532,8 @@ class CreateInstanceFail(object):
assert_raises(exceptions.NotFound,
dbaas.instances.create, instance_name,
instance_info.dbaas_flavor_href,
- volume, databases, users)
+ volume, databases, users,
+ nics=instance_info.nics)
except exceptions.BadRequest as e:
assert_equal(e.message,
"Please specify datastore. No default datastore "
@@ -534,7 +556,8 @@ class CreateInstanceFail(object):
dbaas.instances.create, instance_name,
instance_info.dbaas_flavor_href,
volume, databases, users,
- datastore=datastore)
+ datastore=datastore,
+ nics=instance_info.nics)
except exceptions.BadRequest as e:
assert_equal(e.message,
"Default version for datastore '%s' not found." %
@@ -555,7 +578,8 @@ class CreateInstanceFail(object):
dbaas.instances.create, instance_name,
instance_info.dbaas_flavor_href,
volume, databases, users,
- datastore=datastore)
+ datastore=datastore,
+ nics=instance_info.nics)
except exceptions.BadRequest as e:
assert_equal(e.message,
"Datastore '%s' cannot be found." %
@@ -578,7 +602,8 @@ class CreateInstanceFail(object):
instance_info.dbaas_flavor_href,
volume, databases, users,
datastore=datastore,
- datastore_version=datastore_version)
+ datastore_version=datastore_version,
+ nics=instance_info.nics)
except exceptions.BadRequest as e:
assert_equal(e.message,
"Datastore version '%s' cannot be found." %
@@ -601,7 +626,8 @@ class CreateInstanceFail(object):
instance_info.dbaas_flavor_href,
volume, databases, users,
datastore=datastore,
- datastore_version=datastore_version)
+ datastore_version=datastore_version,
+ nics=instance_info.nics)
except exceptions.BadRequest as e:
assert_equal(e.message,
"Datastore version '%s' is not active." %
@@ -661,10 +687,6 @@ class CreateInstance(object):
else:
instance_info.volume = None
- shared_network = CONFIG.get('shared_network', None)
- if shared_network:
- instance_info.nics = [{'net-id': shared_network}]
-
if create_new_instance():
instance_info.initial_result = dbaas.instances.create(
instance_info.name,
@@ -696,7 +718,8 @@ class CreateInstance(object):
# Check these attrs only are returned in create response
allowed_attrs = ['created', 'flavor', 'addresses', 'id', 'links',
- 'name', 'status', 'updated', 'datastore', 'fault']
+ 'name', 'status', 'updated', 'datastore', 'fault',
+ 'region']
if ROOT_ON_CREATE:
allowed_attrs.append('password')
if VOLUME_SUPPORT:
@@ -753,7 +776,8 @@ class CreateInstanceFlavors(object):
else:
volume = None
self.result = dbaas.instances.create(instance_name, flavor_id, volume,
- databases)
+ databases,
+ nics=instance_info.nics)
poll_until(self._result_is_active)
self._delete_async(self.result.id)
@@ -792,7 +816,8 @@ class CreateInstanceWithNeutron(unittest.TestCase):
self.result = self.dbaas_client.instances.create(
self.instance_name,
instance_info.dbaas_flavor_href,
- volume, databases)
+ volume, databases,
+ nics=instance_info.nics)
self.instance_id = self.result.id
def verify_instance_is_active():
@@ -1138,7 +1163,8 @@ class TestInstanceListing(object):
@test
def test_index_list(self):
allowed_attrs = ['id', 'links', 'name', 'status', 'flavor',
- 'datastore', 'ip', 'hostname', 'replica_of']
+ 'datastore', 'ip', 'hostname', 'replica_of',
+ 'region']
if VOLUME_SUPPORT:
allowed_attrs.append('volume')
instances = dbaas.instances.list()
@@ -1159,7 +1185,7 @@ class TestInstanceListing(object):
def test_get_instance(self):
allowed_attrs = ['created', 'databases', 'flavor', 'hostname', 'id',
'links', 'name', 'status', 'updated', 'ip',
- 'datastore', 'fault']
+ 'datastore', 'fault', 'region']
if VOLUME_SUPPORT:
allowed_attrs.append('volume')
else:
@@ -1247,7 +1273,7 @@ class TestInstanceListing(object):
'flavor', 'guest_status', 'host', 'hostname', 'id',
'name', 'root_enabled_at', 'root_enabled_by',
'server_state_description', 'status', 'datastore',
- 'updated', 'users', 'volume', 'fault']
+ 'updated', 'users', 'volume', 'fault', 'region']
with CheckInstance(result._info) as check:
check.contains_allowed_attrs(
result._info, allowed_attrs,
@@ -1642,7 +1668,8 @@ class BadInstanceStatusBug(object):
result = self.client.instances.create('testbox',
instance_info.dbaas_flavor_href,
- size)
+ size,
+ nics=instance_info.nics)
id = result.id
self.instances.append(id)
diff --git a/trove/tests/api/instances_delete.py b/trove/tests/api/instances_delete.py
index 63e000de..bdb6dd83 100644
--- a/trove/tests/api/instances_delete.py
+++ b/trove/tests/api/instances_delete.py
@@ -50,7 +50,8 @@ class TestBase(object):
volume = {'size': size}
result = self.dbaas.instances.create(name,
instance_info.dbaas_flavor_href,
- volume, [], [])
+ volume, [], [],
+ nics=instance_info.nics)
return result.id
def wait_for_instance_status(self, instance_id, status="ACTIVE",
diff --git a/trove/tests/api/instances_mysql_down.py b/trove/tests/api/instances_mysql_down.py
index e61337f2..18e08fde 100644
--- a/trove/tests/api/instances_mysql_down.py
+++ b/trove/tests/api/instances_mysql_down.py
@@ -28,6 +28,7 @@ from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove.tests.api.instances import EPHEMERAL_SUPPORT
from trove.tests.api.instances import VOLUME_SUPPORT
+from trove.tests.config import CONFIG
from trove.tests.util import create_client
from trove.tests.util import test_config
@@ -69,8 +70,13 @@ class TestBase(object):
volume = None
if VOLUME_SUPPORT:
volume = {'size': 1}
+ nics = None
+ shared_network = CONFIG.get('shared_network', None)
+ if shared_network:
+ nics = [{'net-id': shared_network}]
initial = self.client.instances.create(self.name, self.flavor_id,
- volume, [], [])
+ volume, [], [],
+ nics=nics)
self.id = initial.id
self._wait_for_active()
diff --git a/trove/tests/api/mgmt/instances.py b/trove/tests/api/mgmt/instances.py
index f8fbf840..7961d461 100644
--- a/trove/tests/api/mgmt/instances.py
+++ b/trove/tests/api/mgmt/instances.py
@@ -232,7 +232,9 @@ class MgmtInstancesIndex(object):
'task_description',
'tenant_id',
'updated',
+ 'region'
]
+
if CONFIG.trove_volume_support:
expected_fields.append('volume')
@@ -254,6 +256,7 @@ class MgmtInstancesIndex(object):
Make sure that the deleted= filter works as expected, and no instances
are excluded.
"""
+
if not hasattr(self.client.management.index, 'deleted'):
raise SkipTest("instance index must have a deleted "
"label for this test")
diff --git a/trove/tests/api/mgmt/instances_actions.py b/trove/tests/api/mgmt/instances_actions.py
index 42c5230f..beebaf7b 100644
--- a/trove/tests/api/mgmt/instances_actions.py
+++ b/trove/tests/api/mgmt/instances_actions.py
@@ -18,6 +18,7 @@ from proboscis import after_class
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_raises
from proboscis import before_class
+from proboscis import SkipTest
from proboscis import test
from trove.backup import models as backup_models
@@ -30,6 +31,7 @@ from trove.extensions.mgmt.instances.service import MgmtInstanceController
from trove.instance import models as imodels
from trove.instance.models import DBInstance
from trove.instance.tasks import InstanceTasks
+from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
@@ -79,6 +81,7 @@ class MgmtInstanceBase(object):
@test(groups=[GROUP])
class RestartTaskStatusTests(MgmtInstanceBase):
+
@before_class
def setUp(self):
super(RestartTaskStatusTests, self).setUp()
@@ -137,6 +140,9 @@ class RestartTaskStatusTests(MgmtInstanceBase):
@test
def mgmt_reset_task_status_clears_backups(self):
+ if CONFIG.fake_mode:
+ raise SkipTest("Test requires an instance.")
+
self.reset_task_status()
self._reload_db_info()
assert_equal(self.db_info.task_status, InstanceTasks.NONE)
@@ -201,5 +207,6 @@ class RestartTaskStatusTests(MgmtInstanceBase):
found_backup.delete()
admin = test_config.users.find_user(Requirements(is_admin=True))
admin_dbaas = create_dbaas_client(admin)
- result = admin_dbaas.instances.backups(self.db_info.id)
- assert_equal(0, len(result))
+ if not CONFIG.fake_mode:
+ result = admin_dbaas.instances.backups(self.db_info.id)
+ assert_equal(0, len(result))
diff --git a/trove/tests/api/replication.py b/trove/tests/api/replication.py
index 6596ba61..fd9d5d34 100644
--- a/trove/tests/api/replication.py
+++ b/trove/tests/api/replication.py
@@ -92,9 +92,9 @@ def create_slave():
instance_info.name + "_slave",
instance_info.dbaas_flavor_href,
instance_info.volume,
- nics=instance_info.nics,
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version,
+ nics=instance_info.nics,
replica_of=instance_info.id)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal("BUILD", result.status)
@@ -132,6 +132,7 @@ class CreateReplicationSlave(object):
instance_info.volume,
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version,
+ nics=instance_info.nics,
replica_of="Missing replica source")
assert_equal(404, instance_info.dbaas.last_http_code)
diff --git a/trove/tests/fakes/guestagent.py b/trove/tests/fakes/guestagent.py
index 79e0a02d..baf0bee9 100644
--- a/trove/tests/fakes/guestagent.py
+++ b/trove/tests/fakes/guestagent.py
@@ -361,6 +361,9 @@ class FakeGuest(object):
def backup_required_for_replication(self):
return True
+ def post_processing_required_for_replication(self):
+ return False
+
def module_list(self, context, include_contents=False):
return []
diff --git a/trove/tests/fakes/nova.py b/trove/tests/fakes/nova.py
index c4064315..ffa7a1df 100644
--- a/trove/tests/fakes/nova.py
+++ b/trove/tests/fakes/nova.py
@@ -870,13 +870,13 @@ def get_client_data(context):
return CLIENT_DATA[context]
-def fake_create_nova_client(context):
+def fake_create_nova_client(context, region_name=None):
return get_client_data(context)['nova']
-def fake_create_nova_volume_client(context):
+def fake_create_nova_volume_client(context, region_name=None):
return get_client_data(context)['volume']
-def fake_create_cinder_client(context):
+def fake_create_cinder_client(context, region_name=None):
return get_client_data(context)['volume']
diff --git a/trove/tests/int_tests.py b/trove/tests/int_tests.py
index 519db3ec..2141a9c1 100644
--- a/trove/tests/int_tests.py
+++ b/trove/tests/int_tests.py
@@ -34,7 +34,7 @@ from trove.tests.api import users
from trove.tests.api import versions
from trove.tests.scenario import groups
from trove.tests.scenario.groups import backup_group
-from trove.tests.scenario.groups import cluster_actions_group
+from trove.tests.scenario.groups import cluster_group
from trove.tests.scenario.groups import configuration_group
from trove.tests.scenario.groups import database_actions_group
from trove.tests.scenario.groups import guest_log_group
@@ -69,8 +69,20 @@ def build_group(*groups):
return out
-def register(datastores, *test_groups):
- proboscis.register(groups=build_group(datastores),
+def register(group_names, *test_groups, **kwargs):
+ if kwargs:
+ register(group_names, kwargs.values())
+ for suffix, grp_set in kwargs.items():
+ # Recursively call without the kwargs
+ register([name + '_' + suffix for name in group_names], *grp_set)
+ return
+
+ # Do the actual registration here
+ proboscis.register(groups=build_group(group_names),
+ depends_on_groups=build_group(*test_groups))
+ # Now register the same groups with '-' instead of '_'
+ proboscis.register(groups=build_group(
+ [name.replace('_', '-') for name in group_names]),
depends_on_groups=build_group(*test_groups))
black_box_groups = [
@@ -136,24 +148,44 @@ base_groups = [
]
# Cluster-based groups
-cluster_actions_groups = list(base_groups)
-cluster_actions_groups.extend([cluster_actions_group.GROUP,
- negative_cluster_actions_group.GROUP])
+cluster_create_groups = list(base_groups)
+cluster_create_groups.extend([groups.CLUSTER_DELETE_WAIT])
+
+cluster_actions_groups = list(cluster_create_groups)
+cluster_actions_groups.extend([groups.CLUSTER_ACTIONS_SHRINK_WAIT])
+
+cluster_negative_actions_groups = list(negative_cluster_actions_group.GROUP)
+
+cluster_root_groups = list(cluster_create_groups)
+cluster_root_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ENABLE])
+
+cluster_root_actions_groups = list(cluster_actions_groups)
+cluster_root_actions_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ACTIONS])
+
+cluster_upgrade_groups = list(cluster_create_groups)
+cluster_upgrade_groups.extend([groups.CLUSTER_UPGRADE_WAIT])
+
+cluster_groups = list(cluster_actions_groups)
+cluster_groups.extend([cluster_group.GROUP])
# Single-instance based groups
instance_create_groups = list(base_groups)
-instance_create_groups.extend([instance_create_group.GROUP,
- instance_delete_group.GROUP])
+instance_create_groups.extend([groups.INST_CREATE,
+ groups.INST_DELETE_WAIT])
instance_error_create_groups = list(base_groups)
instance_error_create_groups.extend([instance_error_create_group.GROUP])
-instance_upgrade_groups = list(instance_create_groups)
-instance_upgrade_groups.extend([instance_upgrade_group.GROUP])
-
instance_force_delete_groups = list(base_groups)
instance_force_delete_groups.extend([instance_force_delete_group.GROUP])
+instance_init_groups = list(base_groups)
+instance_init_groups.extend([instance_create_group.GROUP,
+ instance_delete_group.GROUP])
+
+instance_upgrade_groups = list(instance_create_groups)
+instance_upgrade_groups.extend([instance_upgrade_group.GROUP])
+
backup_groups = list(instance_create_groups)
backup_groups.extend([groups.BACKUP,
groups.BACKUP_INST])
@@ -161,6 +193,9 @@ backup_groups.extend([groups.BACKUP,
backup_incremental_groups = list(backup_groups)
backup_incremental_groups.extend([backup_group.GROUP])
+backup_negative_groups = list(backup_groups)
+backup_negative_groups.extend([groups.BACKUP_CREATE_NEGATIVE])
+
configuration_groups = list(instance_create_groups)
configuration_groups.extend([configuration_group.GROUP])
@@ -202,12 +237,19 @@ user_actions_groups.extend([user_actions_group.GROUP])
# groups common to all datastores
common_groups = list(instance_groups)
-common_groups.extend([guest_log_groups, module_groups])
+common_groups.extend([guest_log_groups, instance_init_groups, module_groups])
# Register: Component based groups
register(["backup"], backup_groups)
register(["backup_incremental"], backup_incremental_groups)
+register(["backup_negative"], backup_negative_groups)
register(["cluster"], cluster_actions_groups)
+register(["cluster_actions"], cluster_actions_groups)
+register(["cluster_create"], cluster_create_groups)
+register(["cluster_negative_actions"], cluster_negative_actions_groups)
+register(["cluster_root"], cluster_root_groups)
+register(["cluster_root_actions"], cluster_root_actions_groups)
+register(["cluster_upgrade"], cluster_upgrade_groups)
register(["common"], common_groups)
register(["configuration"], configuration_groups)
register(["configuration_create"], configuration_create_groups)
@@ -218,6 +260,7 @@ register(["instance_actions"], instance_actions_groups)
register(["instance_create"], instance_create_groups)
register(["instance_error"], instance_error_create_groups)
register(["instance_force_delete"], instance_force_delete_groups)
+register(["instance_init"], instance_init_groups)
register(["instance_upgrade"], instance_upgrade_groups)
register(["module"], module_groups)
register(["module_create"], module_create_groups)
@@ -230,97 +273,135 @@ register(["user"], user_actions_groups)
# These should contain all functionality currently supported by the datastore.
# Keeping them in alphabetical order may reduce the number of merge conflicts.
register(
- ["db2_supported"], common_groups,
- configuration_groups,
- database_actions_groups,
- user_actions_groups,
+ ["db2_supported"],
+ single=[common_groups,
+ configuration_groups,
+ database_actions_groups,
+ user_actions_groups, ],
+ multi=[]
+)
+
+register(
+ ["cassandra_supported"],
+ single=[common_groups,
+ backup_groups,
+ database_actions_groups,
+ configuration_groups,
+ user_actions_groups, ],
+ multi=[cluster_actions_groups,
+ cluster_negative_actions_groups,
+ cluster_root_actions_groups, ]
)
register(
- ["cassandra_supported"], common_groups,
- backup_groups,
- database_actions_groups,
- cluster_actions_groups,
- configuration_groups,
- user_actions_groups,
+ ["couchbase_supported"],
+ single=[common_groups,
+ backup_groups,
+ root_actions_groups, ],
+ multi=[]
)
register(
- ["couchbase_supported"], common_groups,
- backup_groups,
- root_actions_groups,
+ ["couchdb_supported"],
+ single=[common_groups,
+ backup_groups,
+ database_actions_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[]
)
register(
- ["couchdb_supported"], common_groups,
- backup_groups,
- database_actions_groups,
- root_actions_groups,
- user_actions_groups,
+ ["mariadb_supported"],
+ single=[common_groups,
+ backup_incremental_groups,
+ configuration_groups,
+ database_actions_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[replication_promote_groups, ]
+ # multi=[cluster_actions_groups,
+ # cluster_negative_actions_groups,
+ # cluster_root_actions_groups,
+ # replication_promote_groups, ]
)
register(
- ["postgresql_supported"], common_groups,
- backup_incremental_groups,
- database_actions_groups,
- configuration_groups,
- replication_groups,
- root_actions_groups,
- user_actions_groups,
+ ["mongodb_supported"],
+ single=[common_groups,
+ backup_groups,
+ configuration_groups,
+ database_actions_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[cluster_actions_groups, ]
)
register(
- ["mysql_supported", "percona_supported"], common_groups,
- backup_incremental_groups,
- configuration_groups,
- database_actions_groups,
- instance_upgrade_groups,
- replication_promote_groups,
- root_actions_groups,
- user_actions_groups,
+ ["mysql_supported"],
+ single=[common_groups,
+ backup_incremental_groups,
+ configuration_groups,
+ database_actions_groups,
+ instance_upgrade_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[replication_promote_groups, ]
)
register(
- ["mariadb_supported"], common_groups,
- backup_incremental_groups,
- cluster_actions_groups,
- configuration_groups,
- database_actions_groups,
- replication_promote_groups,
- root_actions_groups,
- user_actions_groups,
+ ["percona_supported"],
+ single=[common_groups,
+ backup_incremental_groups,
+ configuration_groups,
+ database_actions_groups,
+ instance_upgrade_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[replication_promote_groups, ]
)
register(
- ["mongodb_supported"], common_groups,
- backup_groups,
- cluster_actions_groups,
- configuration_groups,
- database_actions_groups,
- root_actions_groups,
- user_actions_groups,
+ ["postgresql_supported"],
+ single=[common_groups,
+ backup_incremental_groups,
+ database_actions_groups,
+ configuration_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[replication_groups, ]
)
register(
- ["pxc_supported"], common_groups,
- backup_incremental_groups,
- cluster_actions_groups,
- configuration_groups,
- database_actions_groups,
- root_actions_groups,
- user_actions_groups,
+ ["pxc_supported"],
+ single=[common_groups,
+ backup_incremental_groups,
+ configuration_groups,
+ database_actions_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[]
+ # multi=[cluster_actions_groups,
+ # cluster_negative_actions_groups,
+ # cluster_root_actions_groups, ]
)
register(
- ["redis_supported"], common_groups,
- backup_groups,
- cluster_actions_groups,
- replication_promote_groups,
+ ["redis_supported"],
+ single=[common_groups,
+ backup_groups,
+ backup_negative_groups, ],
+ multi=[cluster_actions_groups,
+ cluster_negative_actions_groups,
+ replication_promote_groups, ]
)
register(
- ["vertica_supported"], common_groups,
- cluster_actions_groups,
- configuration_groups,
- root_actions_groups,
+ ["vertica_supported"],
+ single=[common_groups,
+ configuration_groups,
+ root_actions_groups, ],
+ multi=[cluster_actions_groups,
+ cluster_negative_actions_groups,
+ cluster_root_actions_groups, ]
)
diff --git a/trove/tests/scenario/groups/__init__.py b/trove/tests/scenario/groups/__init__.py
index 42cb2755..75c326dd 100644
--- a/trove/tests/scenario/groups/__init__.py
+++ b/trove/tests/scenario/groups/__init__.py
@@ -21,6 +21,8 @@
# Backup Group
BACKUP = "scenario.backup_grp"
BACKUP_CREATE = "scenario.backup_create_grp"
+BACKUP_CREATE_NEGATIVE = "scenario.backup_create_negative_grp"
+BACKUP_CREATE_WAIT = "scenario.backup_create_wait_grp"
BACKUP_DELETE = "scenario.backup_delete_grp"
BACKUP_INST = "scenario.backup_inst_grp"
BACKUP_INST_CREATE = "scenario.backup_inst_create_grp"
@@ -48,6 +50,34 @@ CFGGRP_INST_DELETE = "scenario.cfggrp_inst_delete_grp"
CFGGRP_INST_DELETE_WAIT = "scenario.cfggrp_inst_delete_wait_grp"
+# Cluster Actions Group
+CLUSTER_ACTIONS = "scenario.cluster_actions_grp"
+CLUSTER_ACTIONS_ROOT_ENABLE = "scenario.cluster_actions_root_enable_grp"
+CLUSTER_ACTIONS_ROOT_ACTIONS = "scenario.cluster_actions_root_actions_grp"
+CLUSTER_ACTIONS_ROOT_GROW = "scenario.cluster_actions_root_grow_grp"
+CLUSTER_ACTIONS_ROOT_SHRINK = "scenario.cluster_actions_root_shrink_grp"
+CLUSTER_ACTIONS_GROW_SHRINK = "scenario.cluster_actions_grow_shrink_grp"
+CLUSTER_ACTIONS_GROW = "scenario.cluster_actions_grow_grp"
+CLUSTER_ACTIONS_GROW_WAIT = "scenario.cluster_actions_grow_wait_grp"
+CLUSTER_ACTIONS_SHRINK = "scenario.cluster_actions_shrink_grp"
+CLUSTER_ACTIONS_SHRINK_WAIT = "scenario.cluster_actions_shrink_wait_grp"
+
+
+# Cluster Create Group (in cluster_actions file)
+CLUSTER_CREATE = "scenario.cluster_create_grp"
+CLUSTER_CREATE_WAIT = "scenario.cluster_create_wait_grp"
+
+
+# Cluster Delete Group (in cluster_actions file)
+CLUSTER_DELETE = "scenario.cluster_delete_grp"
+CLUSTER_DELETE_WAIT = "scenario.cluster_delete_wait_grp"
+
+
+# Cluster Upgrade Group (in cluster_actions file)
+CLUSTER_UPGRADE = "scenario.cluster_upgrade_grp"
+CLUSTER_UPGRADE_WAIT = "scenario.cluster_upgrade_wait_grp"
+
+
# Database Actions Group
DB_ACTION_CREATE = "scenario.db_action_create_grp"
DB_ACTION_DELETE = "scenario.db_action_delete_grp"
@@ -71,6 +101,8 @@ INST_UPGRADE = "scenario.inst_upgrade_grp"
# Instance Create Group
INST_CREATE = "scenario.inst_create_grp"
INST_CREATE_WAIT = "scenario.inst_create_wait_grp"
+INST_INIT_CREATE = "scenario.inst_init_create_grp"
+INST_INIT_CREATE_WAIT = "scenario.inst_init_create_wait_grp"
INST_INIT_DELETE = "scenario.inst_init_delete_grp"
INST_INIT_DELETE_WAIT = "scenario.inst_init_delete_wait_grp"
diff --git a/trove/tests/scenario/groups/backup_group.py b/trove/tests/scenario/groups/backup_group.py
index edfc6e4e..16d93def 100644
--- a/trove/tests/scenario/groups/backup_group.py
+++ b/trove/tests/scenario/groups/backup_group.py
@@ -31,8 +31,8 @@ class BackupRunnerFactory(test_runners.RunnerFactory):
@test(depends_on_groups=[groups.INST_CREATE_WAIT],
groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE],
- runs_after_groups=[groups.MODULE_INST_CREATE_WAIT,
- groups.CFGGRP_INST_CREATE_WAIT])
+ runs_after_groups=[groups.MODULE_INST_DELETE,
+ groups.CFGGRP_INST_DELETE])
class BackupCreateGroup(TestGroup):
"""Test Backup Create functionality."""
@@ -60,47 +60,63 @@ class BackupCreateGroup(TestGroup):
"""Check that create backup is started successfully."""
self.test_runner.run_backup_create()
- @test(depends_on=[backup_create])
+
+@test(depends_on_groups=[groups.BACKUP_CREATE],
+ groups=[groups.BACKUP_CREATE_NEGATIVE])
+class BackupCreateNegativeGroup(TestGroup):
+ """Test Backup Create Negative functionality."""
+
+ def __init__(self):
+ super(BackupCreateNegativeGroup, self).__init__(
+ BackupRunnerFactory.instance())
+
+ @test
def backup_delete_while_backup_running(self):
"""Ensure delete backup fails while it is running."""
self.test_runner.run_backup_delete_while_backup_running()
- @test(depends_on=[backup_create],
- runs_after=[backup_delete_while_backup_running])
+ @test(runs_after=[backup_delete_while_backup_running])
def restore_instance_from_not_completed_backup(self):
"""Ensure a restore fails while the backup is running."""
self.test_runner.run_restore_instance_from_not_completed_backup()
- @test(depends_on=[backup_create],
- runs_after=[restore_instance_from_not_completed_backup])
+ @test(runs_after=[restore_instance_from_not_completed_backup])
def backup_create_another_backup_running(self):
"""Ensure create backup fails when another backup is running."""
self.test_runner.run_backup_create_another_backup_running()
- @test(depends_on=[backup_create],
- runs_after=[backup_create_another_backup_running])
+ @test(runs_after=[backup_create_another_backup_running])
def instance_action_right_after_backup_create(self):
"""Ensure any instance action fails while backup is running."""
self.test_runner.run_instance_action_right_after_backup_create()
- @test
+ @test(runs_after=[instance_action_right_after_backup_create])
def delete_unknown_backup(self):
"""Ensure deleting an unknown backup fails."""
self.test_runner.run_delete_unknown_backup()
- @test
+ @test(runs_after=[instance_action_right_after_backup_create])
def backup_create_instance_invalid(self):
"""Ensure create backup fails with invalid instance id."""
self.test_runner.run_backup_create_instance_invalid()
- @test
+ @test(runs_after=[instance_action_right_after_backup_create])
def backup_create_instance_not_found(self):
"""Ensure create backup fails with unknown instance id."""
self.test_runner.run_backup_create_instance_not_found()
- @test(depends_on=[backup_create],
- runs_after=[delete_unknown_backup, backup_create_instance_invalid,
- backup_create_instance_not_found])
+
+@test(depends_on_groups=[groups.BACKUP_CREATE],
+ groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE_WAIT],
+ runs_after_groups=[groups.BACKUP_CREATE_NEGATIVE])
+class BackupCreateWaitGroup(TestGroup):
+ """Wait for Backup Create to Complete."""
+
+ def __init__(self):
+ super(BackupCreateWaitGroup, self).__init__(
+ BackupRunnerFactory.instance())
+
+ @test
def backup_create_completed(self):
"""Check that the backup completes successfully."""
self.test_runner.run_backup_create_completed()
@@ -209,9 +225,7 @@ class BackupIncCreateGroup(TestGroup):
@test(depends_on_groups=[groups.BACKUP_CREATE],
- groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE],
- runs_after_groups=[groups.MODULE_INST_DELETE,
- groups.CFGGRP_INST_DELETE])
+ groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE])
class BackupInstCreateGroup(TestGroup):
"""Test Backup Instance Create functionality."""
diff --git a/trove/tests/scenario/groups/cluster_actions_group.py b/trove/tests/scenario/groups/cluster_actions_group.py
deleted file mode 100644
index d69a6d5a..00000000
--- a/trove/tests/scenario/groups/cluster_actions_group.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2015 Tesora Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from proboscis import test
-
-from trove.tests.scenario import groups
-from trove.tests.scenario.groups.test_group import TestGroup
-from trove.tests.scenario.runners import test_runners
-
-
-GROUP = "scenario.cluster_actions_group"
-
-
-class ClusterActionsRunnerFactory(test_runners.RunnerFactory):
-
- _runner_ns = 'cluster_actions_runners'
- _runner_cls = 'ClusterActionsRunner'
-
-
-@test(groups=[GROUP],
- runs_after_groups=[groups.MODULE_INST_DELETE,
- groups.CFGGRP_INST_DELETE,
- groups.INST_ACTIONS_RESIZE_WAIT,
- groups.DB_ACTION_INST_DELETE,
- groups.USER_ACTION_DELETE,
- groups.USER_ACTION_INST_DELETE,
- groups.ROOT_ACTION_INST_DELETE,
- groups.REPL_INST_DELETE_WAIT,
- groups.INST_DELETE_WAIT])
-class ClusterActionsGroup(TestGroup):
-
- def __init__(self):
- super(ClusterActionsGroup, self).__init__(
- ClusterActionsRunnerFactory.instance())
-
- @test
- def cluster_create(self):
- """Create a cluster."""
- self.test_runner.run_cluster_create()
-
- @test(depends_on=[cluster_create])
- def cluster_list(self):
- """List the clusters."""
- self.test_runner.run_cluster_list()
-
- @test(depends_on=[cluster_create])
- def cluster_show(self):
- """Show a cluster."""
- self.test_runner.run_cluster_show()
-
- @test(depends_on=[cluster_create])
- def add_initial_cluster_data(self):
- """Add data to cluster."""
- self.test_runner.run_add_initial_cluster_data()
-
- @test(depends_on=[add_initial_cluster_data])
- def verify_initial_cluster_data(self):
- """Verify the initial data exists on cluster."""
- self.test_runner.run_verify_initial_cluster_data()
-
- @test(depends_on=[cluster_create])
- def cluster_root_enable(self):
- """Root Enable."""
- self.test_runner.run_cluster_root_enable()
-
- @test(depends_on=[cluster_root_enable])
- def verify_cluster_root_enable(self):
- """Verify Root Enable."""
- self.test_runner.run_verify_cluster_root_enable()
-
- @test(depends_on=[cluster_create],
- runs_after=[verify_initial_cluster_data, verify_cluster_root_enable,
- cluster_list, cluster_show])
- def cluster_grow(self):
- """Grow cluster."""
- self.test_runner.run_cluster_grow()
-
- @test(depends_on=[cluster_grow])
- def verify_cluster_root_enable_after_grow(self):
- """Verify Root Enabled after grow."""
- self.test_runner.run_verify_cluster_root_enable()
-
- @test(depends_on=[cluster_grow, add_initial_cluster_data])
- def verify_initial_cluster_data_after_grow(self):
- """Verify the initial data still exists after cluster grow."""
- self.test_runner.run_verify_initial_cluster_data()
-
- @test(depends_on=[cluster_grow],
- runs_after=[verify_initial_cluster_data_after_grow])
- def add_extra_cluster_data_after_grow(self):
- """Add more data to cluster."""
- self.test_runner.run_add_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_grow])
- def verify_extra_cluster_data_after_grow(self):
- """Verify the data added after cluster grow."""
- self.test_runner.run_verify_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_grow],
- runs_after=[verify_extra_cluster_data_after_grow])
- def remove_extra_cluster_data_after_grow(self):
- """Remove the data added after cluster grow."""
- self.test_runner.run_remove_extra_cluster_data()
-
- @test(depends_on=[cluster_create],
- runs_after=[remove_extra_cluster_data_after_grow,
- verify_cluster_root_enable_after_grow])
- def cluster_shrink(self):
- """Shrink cluster."""
- self.test_runner.run_cluster_shrink()
-
- @test(depends_on=[cluster_shrink])
- def verify_cluster_root_enable_after_shrink(self):
- """Verify Root Enable after shrink."""
- self.test_runner.run_verify_cluster_root_enable()
-
- @test(depends_on=[cluster_shrink, add_initial_cluster_data])
- def verify_initial_cluster_data_after_shrink(self):
- """Verify the initial data still exists after cluster shrink."""
- self.test_runner.run_verify_initial_cluster_data()
-
- @test(depends_on=[cluster_shrink],
- runs_after=[verify_initial_cluster_data_after_shrink])
- def add_extra_cluster_data_after_shrink(self):
- """Add more data to cluster."""
- self.test_runner.run_add_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_shrink])
- def verify_extra_cluster_data_after_shrink(self):
- """Verify the data added after cluster shrink."""
- self.test_runner.run_verify_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_shrink],
- runs_after=[verify_extra_cluster_data_after_shrink])
- def remove_extra_cluster_data_after_shrink(self):
- """Remove the data added after cluster shrink."""
- self.test_runner.run_remove_extra_cluster_data()
-
- @test(depends_on=[add_initial_cluster_data],
- runs_after=[remove_extra_cluster_data_after_shrink])
- def remove_initial_cluster_data(self):
- """Remove the initial data from cluster."""
- self.test_runner.run_remove_initial_cluster_data()
-
- @test(depends_on=[cluster_create],
- runs_after=[remove_initial_cluster_data,
- verify_cluster_root_enable_after_shrink])
- def cluster_delete(self):
- """Delete an existing cluster."""
- self.test_runner.run_cluster_delete()
diff --git a/trove/tests/scenario/groups/cluster_group.py b/trove/tests/scenario/groups/cluster_group.py
new file mode 100644
index 00000000..cadd8565
--- /dev/null
+++ b/trove/tests/scenario/groups/cluster_group.py
@@ -0,0 +1,341 @@
+# Copyright 2015 Tesora Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from proboscis import test
+
+from trove.tests.scenario import groups
+from trove.tests.scenario.groups.test_group import TestGroup
+from trove.tests.scenario.runners import test_runners
+
+
+GROUP = "scenario.cluster_group"
+
+
+class ClusterRunnerFactory(test_runners.RunnerFactory):
+
+ _runner_ns = 'cluster_runners'
+ _runner_cls = 'ClusterRunner'
+
+
+@test(groups=[GROUP, groups.CLUSTER_CREATE],
+ runs_after_groups=[groups.MODULE_DELETE,
+ groups.CFGGRP_INST_DELETE,
+ groups.INST_ACTIONS_RESIZE_WAIT,
+ groups.DB_ACTION_INST_DELETE,
+ groups.USER_ACTION_DELETE,
+ groups.USER_ACTION_INST_DELETE,
+ groups.ROOT_ACTION_INST_DELETE,
+ groups.REPL_INST_DELETE_WAIT,
+ groups.INST_DELETE])
+class ClusterCreateGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterCreateGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_create(self):
+ """Create a cluster."""
+ self.test_runner.run_cluster_create()
+
+
+@test(groups=[GROUP, groups.CLUSTER_CREATE_WAIT],
+ depends_on_groups=[groups.CLUSTER_CREATE],
+ runs_after_groups=[groups.MODULE_INST_DELETE_WAIT,
+ groups.CFGGRP_INST_DELETE_WAIT,
+ groups.DB_ACTION_INST_DELETE_WAIT,
+ groups.USER_ACTION_INST_DELETE_WAIT,
+ groups.ROOT_ACTION_INST_DELETE_WAIT,
+ groups.INST_DELETE_WAIT])
+class ClusterCreateWaitGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterCreateWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_create_wait(self):
+ """Wait for cluster create to complete."""
+ self.test_runner.run_cluster_create_wait()
+
+ @test(depends_on=[cluster_create_wait])
+ def add_initial_cluster_data(self):
+ """Add data to cluster."""
+ self.test_runner.run_add_initial_cluster_data()
+
+ @test(depends_on=[add_initial_cluster_data])
+ def verify_initial_cluster_data(self):
+ """Verify the initial data exists on cluster."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(depends_on=[cluster_create_wait])
+ def cluster_list(self):
+ """List the clusters."""
+ self.test_runner.run_cluster_list()
+
+ @test(depends_on=[cluster_create_wait])
+ def cluster_show(self):
+ """Show a cluster."""
+ self.test_runner.run_cluster_show()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_ENABLE],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT])
+class ClusterRootEnableGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterRootEnableGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_root_enable(self):
+ """Root Enable."""
+ self.test_runner.run_cluster_root_enable()
+
+ @test(depends_on=[cluster_root_enable])
+ def verify_cluster_root_enable(self):
+ """Verify Root Enable."""
+ self.test_runner.run_verify_cluster_root_enable()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_GROW_SHRINK,
+ groups.CLUSTER_ACTIONS_GROW],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
+ runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE])
+class ClusterGrowGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterGrowGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_grow(self):
+ """Grow cluster."""
+ self.test_runner.run_cluster_grow()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_GROW_SHRINK,
+ groups.CLUSTER_ACTIONS_GROW_WAIT],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_GROW])
+class ClusterGrowWaitGroup(TestGroup):
+ def __init__(self):
+ super(ClusterGrowWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_grow_wait(self):
+ """Wait for cluster grow to complete."""
+ self.test_runner.run_cluster_grow_wait()
+
+ @test(depends_on=[cluster_grow_wait])
+ def verify_initial_cluster_data_after_grow(self):
+ """Verify the initial data still exists after cluster grow."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(depends_on=[cluster_grow_wait],
+ runs_after=[verify_initial_cluster_data_after_grow])
+ def add_grow_cluster_data(self):
+ """Add more data to cluster after grow."""
+ self.test_runner.run_add_grow_cluster_data()
+
+ @test(depends_on=[add_grow_cluster_data])
+ def verify_grow_cluster_data(self):
+ """Verify the data added after cluster grow."""
+ self.test_runner.run_verify_grow_cluster_data()
+
+ @test(depends_on=[add_grow_cluster_data],
+ runs_after=[verify_grow_cluster_data])
+ def remove_grow_cluster_data(self):
+ """Remove the data added after cluster grow."""
+ self.test_runner.run_remove_grow_cluster_data()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_GROW],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT])
+class ClusterRootEnableGrowGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterRootEnableGrowGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def verify_cluster_root_enable_after_grow(self):
+ """Verify Root Enabled after grow."""
+ self.test_runner.run_verify_cluster_root_enable()
+
+
+@test(groups=[GROUP, groups.CLUSTER_UPGRADE],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
+ runs_after_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT,
+ groups.CLUSTER_ACTIONS_ROOT_GROW])
+class ClusterUpgradeGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterUpgradeGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_upgrade(self):
+ """Upgrade cluster."""
+ self.test_runner.run_cluster_upgrade()
+
+
+@test(groups=[GROUP, groups.CLUSTER_UPGRADE_WAIT],
+ depends_on_groups=[groups.CLUSTER_UPGRADE])
+class ClusterUpgradeWaitGroup(TestGroup):
+ def __init__(self):
+ super(ClusterUpgradeWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_upgrade_wait(self):
+ """Wait for cluster upgrade to complete."""
+ self.test_runner.run_cluster_upgrade_wait()
+
+ @test(depends_on=[cluster_upgrade_wait])
+ def verify_initial_cluster_data_after_upgrade(self):
+ """Verify the initial data still exists after cluster upgrade."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(depends_on=[cluster_upgrade_wait],
+ runs_after=[verify_initial_cluster_data_after_upgrade])
+ def add_upgrade_cluster_data_after_upgrade(self):
+ """Add more data to cluster after upgrade."""
+ self.test_runner.run_add_upgrade_cluster_data()
+
+ @test(depends_on=[add_upgrade_cluster_data_after_upgrade])
+ def verify_upgrade_cluster_data_after_upgrade(self):
+ """Verify the data added after cluster upgrade."""
+ self.test_runner.run_verify_upgrade_cluster_data()
+
+ @test(depends_on=[add_upgrade_cluster_data_after_upgrade],
+ runs_after=[verify_upgrade_cluster_data_after_upgrade])
+ def remove_upgrade_cluster_data_after_upgrade(self):
+ """Remove the data added after cluster upgrade."""
+ self.test_runner.run_remove_upgrade_cluster_data()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_GROW_SHRINK,
+ groups.CLUSTER_ACTIONS_SHRINK],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT],
+ runs_after_groups=[groups.CLUSTER_UPGRADE_WAIT])
+class ClusterShrinkGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterShrinkGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_shrink(self):
+ """Shrink cluster."""
+ self.test_runner.run_cluster_shrink()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_SHRINK_WAIT],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK])
+class ClusterShrinkWaitGroup(TestGroup):
+ def __init__(self):
+ super(ClusterShrinkWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_shrink_wait(self):
+ """Wait for the cluster shrink to complete."""
+ self.test_runner.run_cluster_shrink_wait()
+
+ @test(depends_on=[cluster_shrink_wait])
+ def verify_initial_cluster_data_after_shrink(self):
+ """Verify the initial data still exists after cluster shrink."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(runs_after=[verify_initial_cluster_data_after_shrink])
+ def add_shrink_cluster_data(self):
+ """Add more data to cluster after shrink."""
+ self.test_runner.run_add_shrink_cluster_data()
+
+ @test(depends_on=[add_shrink_cluster_data])
+ def verify_shrink_cluster_data(self):
+ """Verify the data added after cluster shrink."""
+ self.test_runner.run_verify_shrink_cluster_data()
+
+ @test(depends_on=[add_shrink_cluster_data],
+ runs_after=[verify_shrink_cluster_data])
+ def remove_shrink_cluster_data(self):
+ """Remove the data added after cluster shrink."""
+ self.test_runner.run_remove_shrink_cluster_data()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_SHRINK],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK_WAIT])
+class ClusterRootEnableShrinkGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterRootEnableShrinkGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def verify_cluster_root_enable_after_shrink(self):
+ """Verify Root Enable after shrink."""
+ self.test_runner.run_verify_cluster_root_enable()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_DELETE],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
+ runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE,
+ groups.CLUSTER_ACTIONS_ROOT_GROW,
+ groups.CLUSTER_ACTIONS_ROOT_SHRINK,
+ groups.CLUSTER_ACTIONS_GROW_WAIT,
+ groups.CLUSTER_ACTIONS_SHRINK_WAIT,
+ groups.CLUSTER_UPGRADE_WAIT])
+class ClusterDeleteGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterDeleteGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def remove_initial_cluster_data(self):
+ """Remove the initial data from cluster."""
+ self.test_runner.run_remove_initial_cluster_data()
+
+ @test(runs_after=[remove_initial_cluster_data])
+ def cluster_delete(self):
+ """Delete an existing cluster."""
+ self.test_runner.run_cluster_delete()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_DELETE_WAIT],
+ depends_on_groups=[groups.CLUSTER_DELETE])
+class ClusterDeleteWaitGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterDeleteWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_delete_wait(self):
+ """Wait for the existing cluster to be gone."""
+ self.test_runner.run_cluster_delete_wait()
diff --git a/trove/tests/scenario/groups/configuration_group.py b/trove/tests/scenario/groups/configuration_group.py
index 82894538..4c366c35 100644
--- a/trove/tests/scenario/groups/configuration_group.py
+++ b/trove/tests/scenario/groups/configuration_group.py
@@ -212,14 +212,8 @@ class ConfigurationInstCreateGroup(TestGroup):
"""Ensure deleting attached non-dynamic group fails."""
self.test_runner.run_delete_attached_non_dynamic_group()
- @test(runs_after=[list_dynamic_inst_conf_groups_after,
- list_non_dynamic_inst_conf_groups_after])
- def create_instance_with_conf(self):
- """Test create instance with conf group."""
- self.test_runner.run_create_instance_with_conf()
-
@test(depends_on=[attach_non_dynamic_group],
- runs_after=[create_instance_with_conf])
+ runs_after=[delete_attached_non_dynamic_group])
def update_non_dynamic_group(self):
"""Test update non-dynamic group."""
self.test_runner.run_update_non_dynamic_group()
@@ -230,6 +224,11 @@ class ConfigurationInstCreateGroup(TestGroup):
"""Test detach non-dynamic group."""
self.test_runner.run_detach_non_dynamic_group()
+ @test(runs_after=[detach_non_dynamic_group])
+ def create_instance_with_conf(self):
+ """Test create instance with conf group."""
+ self.test_runner.run_create_instance_with_conf()
+
@test(depends_on_groups=[groups.CFGGRP_INST_CREATE],
groups=[GROUP, groups.CFGGRP_INST,
diff --git a/trove/tests/scenario/groups/database_actions_group.py b/trove/tests/scenario/groups/database_actions_group.py
index ccd2fa61..b3b77c4a 100644
--- a/trove/tests/scenario/groups/database_actions_group.py
+++ b/trove/tests/scenario/groups/database_actions_group.py
@@ -136,17 +136,17 @@ class DatabaseActionsInstCreateWaitGroup(TestGroup):
@test
def wait_for_instances(self):
- """Waiting for all instances to become active."""
- self.instance_create_runner.run_wait_for_created_instances()
+ """Waiting for database instance to become active."""
+ self.instance_create_runner.run_wait_for_init_instance()
@test(depends_on=[wait_for_instances])
def add_initialized_instance_data(self):
- """Add data to the initialized instance."""
+ """Add data to the database instance."""
self.instance_create_runner.run_add_initialized_instance_data()
@test(runs_after=[add_initialized_instance_data])
def validate_initialized_instance(self):
- """Validate the initialized instance data and properties."""
+ """Validate the database instance data and properties."""
self.instance_create_runner.run_validate_initialized_instance()
@@ -162,7 +162,7 @@ class DatabaseActionsInstDeleteGroup(TestGroup):
@test
def delete_initialized_instance(self):
- """Delete the initialized instance."""
+ """Delete the database instance."""
self.instance_create_runner.run_initialized_instance_delete()
@@ -179,5 +179,5 @@ class DatabaseActionsInstDeleteWaitGroup(TestGroup):
@test
def wait_for_delete_initialized_instance(self):
- """Wait for the initialized instance to delete."""
+ """Wait for the database instance to delete."""
self.instance_create_runner.run_wait_for_init_delete()
diff --git a/trove/tests/scenario/groups/instance_actions_group.py b/trove/tests/scenario/groups/instance_actions_group.py
index 3730c24a..4fe5e5e9 100644
--- a/trove/tests/scenario/groups/instance_actions_group.py
+++ b/trove/tests/scenario/groups/instance_actions_group.py
@@ -41,15 +41,42 @@ class InstanceActionsGroup(TestGroup):
InstanceActionsRunnerFactory.instance())
@test
+ def add_test_data(self):
+ """Add test data."""
+ self.test_runner.run_add_test_data()
+
+ @test(depends_on=[add_test_data])
+ def verify_test_data(self):
+ """Verify test data."""
+ self.test_runner.run_verify_test_data()
+
+ @test(runs_after=[verify_test_data])
def instance_restart(self):
"""Restart an existing instance."""
self.test_runner.run_instance_restart()
- @test(depends_on=[instance_restart])
+ @test(depends_on=[verify_test_data, instance_restart])
+ def verify_test_data_after_restart(self):
+ """Verify test data after restart."""
+ self.test_runner.run_verify_test_data()
+
+ @test(depends_on=[instance_restart],
+ runs_after=[verify_test_data_after_restart])
def instance_resize_volume(self):
"""Resize attached volume."""
self.test_runner.run_instance_resize_volume()
+ @test(depends_on=[verify_test_data, instance_resize_volume])
+ def verify_test_data_after_volume_resize(self):
+ """Verify test data after volume resize."""
+ self.test_runner.run_verify_test_data()
+
+ @test(depends_on=[add_test_data],
+ runs_after=[verify_test_data_after_volume_resize])
+ def remove_test_data(self):
+ """Remove test data."""
+ self.test_runner.run_remove_test_data()
+
@test(depends_on_groups=[groups.INST_CREATE_WAIT],
groups=[GROUP, groups.INST_ACTIONS_RESIZE],
@@ -67,6 +94,16 @@ class InstanceActionsResizeGroup(TestGroup):
InstanceActionsRunnerFactory.instance())
@test
+ def add_test_data(self):
+ """Add test data."""
+ self.test_runner.run_add_test_data()
+
+ @test(depends_on=[add_test_data])
+ def verify_test_data(self):
+ """Verify test data."""
+ self.test_runner.run_verify_test_data()
+
+ @test(runs_after=[verify_test_data])
def instance_resize_flavor(self):
"""Resize instance flavor."""
self.test_runner.run_instance_resize_flavor()
@@ -88,3 +125,13 @@ class InstanceActionsResizeWaitGroup(TestGroup):
def wait_for_instance_resize_flavor(self):
"""Wait for resize instance flavor to complete."""
self.test_runner.run_wait_for_instance_resize_flavor()
+
+ @test(depends_on=[wait_for_instance_resize_flavor])
+ def verify_test_data_after_flavor_resize(self):
+ """Verify test data after flavor resize."""
+ self.test_runner.run_verify_test_data()
+
+ @test(runs_after=[verify_test_data_after_flavor_resize])
+ def remove_test_data(self):
+ """Remove test data."""
+ self.test_runner.run_remove_test_data()
diff --git a/trove/tests/scenario/groups/instance_create_group.py b/trove/tests/scenario/groups/instance_create_group.py
index 228be246..83a1f16b 100644
--- a/trove/tests/scenario/groups/instance_create_group.py
+++ b/trove/tests/scenario/groups/instance_create_group.py
@@ -45,7 +45,17 @@ class InstanceCreateGroup(TestGroup):
"""Create an empty instance."""
self.test_runner.run_empty_instance_create()
- @test(runs_after=[create_empty_instance])
+
+@test(depends_on_groups=[groups.INST_CREATE],
+ groups=[GROUP, groups.INST_INIT_CREATE])
+class InstanceInitCreateGroup(TestGroup):
+ """Test Instance Init Create functionality."""
+
+ def __init__(self):
+ super(InstanceInitCreateGroup, self).__init__(
+ InstanceCreateRunnerFactory.instance())
+
+ @test
def create_initial_configuration(self):
"""Create a configuration group for a new initialized instance."""
self.test_runner.run_initial_configuration_create()
@@ -59,7 +69,7 @@ class InstanceCreateGroup(TestGroup):
@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_CREATE_WAIT],
runs_after_groups=[groups.MODULE_CREATE, groups.CFGGRP_CREATE,
- groups.INST_ERROR_CREATE_WAIT])
+ groups.INST_ERROR_DELETE])
class InstanceCreateWaitGroup(TestGroup):
"""Test that Instance Create Completes."""
@@ -68,11 +78,27 @@ class InstanceCreateWaitGroup(TestGroup):
InstanceCreateRunnerFactory.instance())
@test
- def wait_for_instances(self):
- """Waiting for all instances to become active."""
- self.test_runner.run_wait_for_created_instances()
+ def wait_for_instance(self):
+ """Waiting for main instance to become active."""
+ self.test_runner.run_wait_for_instance()
+
+
+@test(depends_on_groups=[groups.INST_INIT_CREATE],
+ groups=[GROUP, groups.INST_INIT_CREATE_WAIT],
+ runs_after_groups=[groups.INST_CREATE_WAIT])
+class InstanceInitCreateWaitGroup(TestGroup):
+ """Test that Instance Init Create Completes."""
+
+ def __init__(self):
+ super(InstanceInitCreateWaitGroup, self).__init__(
+ InstanceCreateRunnerFactory.instance())
+
+ @test
+ def wait_for_init_instance(self):
+ """Waiting for init instance to become active."""
+ self.test_runner.run_wait_for_init_instance()
- @test(depends_on=[wait_for_instances])
+ @test(depends_on=[wait_for_init_instance])
def add_initialized_instance_data(self):
"""Add data to the initialized instance."""
self.test_runner.run_add_initialized_instance_data()
@@ -83,7 +109,7 @@ class InstanceCreateWaitGroup(TestGroup):
self.test_runner.run_validate_initialized_instance()
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
+@test(depends_on_groups=[groups.INST_INIT_CREATE_WAIT],
groups=[GROUP, groups.INST_INIT_DELETE])
class InstanceInitDeleteGroup(TestGroup):
"""Test Initialized Instance Delete functionality."""
diff --git a/trove/tests/scenario/groups/instance_upgrade_group.py b/trove/tests/scenario/groups/instance_upgrade_group.py
index c0d00ba4..04def6d5 100644
--- a/trove/tests/scenario/groups/instance_upgrade_group.py
+++ b/trove/tests/scenario/groups/instance_upgrade_group.py
@@ -65,6 +65,16 @@ class InstanceUpgradeGroup(TestGroup):
self.user_actions_runner.run_users_create()
@test(runs_after=[create_users])
+ def add_test_data(self):
+ """Add test data."""
+ self.test_runner.run_add_test_data()
+
+ @test(depends_on=[add_test_data])
+ def verify_test_data(self):
+ """Verify test data."""
+ self.test_runner.run_verify_test_data()
+
+ @test(runs_after=[verify_test_data])
def instance_upgrade(self):
"""Upgrade an existing instance."""
self.test_runner.run_instance_upgrade()
@@ -80,6 +90,17 @@ class InstanceUpgradeGroup(TestGroup):
"""List the created users."""
self.user_actions_runner.run_users_list()
+ @test(depends_on=[verify_test_data, instance_upgrade])
+ def verify_test_data_after_upgrade(self):
+ """Verify test data after upgrade."""
+ self.test_runner.run_verify_test_data()
+
+ @test(depends_on=[add_test_data],
+ runs_after=[verify_test_data_after_upgrade])
+ def remove_test_data(self):
+ """Remove test data."""
+ self.test_runner.run_remove_test_data()
+
@test(depends_on=[create_users],
runs_after=[list_users])
def delete_user(self):
diff --git a/trove/tests/scenario/groups/user_actions_group.py b/trove/tests/scenario/groups/user_actions_group.py
index db13e72c..b3ae309d 100644
--- a/trove/tests/scenario/groups/user_actions_group.py
+++ b/trove/tests/scenario/groups/user_actions_group.py
@@ -229,12 +229,12 @@ class UserActionsInstCreateWaitGroup(TestGroup):
@test
def wait_for_instances(self):
- """Waiting for all instances to become active."""
- self.instance_create_runner.run_wait_for_created_instances()
+ """Waiting for user instance to become active."""
+ self.instance_create_runner.run_wait_for_init_instance()
@test(depends_on=[wait_for_instances])
def validate_initialized_instance(self):
- """Validate the initialized instance data and properties."""
+ """Validate the user instance data and properties."""
self.instance_create_runner.run_validate_initialized_instance()
@@ -250,7 +250,7 @@ class UserActionsInstDeleteGroup(TestGroup):
@test
def delete_initialized_instance(self):
- """Delete the initialized instance."""
+ """Delete the user instance."""
self.instance_create_runner.run_initialized_instance_delete()
@@ -268,5 +268,5 @@ class UserActionsInstDeleteWaitGroup(TestGroup):
@test
def wait_for_delete_initialized_instance(self):
- """Wait for the initialized instance to delete."""
+ """Wait for the user instance to delete."""
self.instance_create_runner.run_wait_for_init_delete()
diff --git a/trove/tests/scenario/helpers/test_helper.py b/trove/tests/scenario/helpers/test_helper.py
index 0deeb9ca..9dbfb899 100644
--- a/trove/tests/scenario/helpers/test_helper.py
+++ b/trove/tests/scenario/helpers/test_helper.py
@@ -44,11 +44,13 @@ class DataType(Enum):
tiny2 = 4
# a third tiny dataset (also for replication propagation)
tiny3 = 5
+ # a forth tiny dataset (for cluster propagation)
+ tiny4 = 6
# small amount of data (this can be added to each instance
# after creation, for example).
- small = 6
+ small = 7
# large data, enough to make creating a backup take 20s or more.
- large = 7
+ large = 8
class TestHelper(object):
@@ -130,6 +132,9 @@ class TestHelper(object):
DataType.tiny3.name: {
self.DATA_START: 3000,
self.DATA_SIZE: 100},
+ DataType.tiny4.name: {
+ self.DATA_START: 4000,
+ self.DATA_SIZE: 100},
DataType.small.name: {
self.DATA_START: 10000,
self.DATA_SIZE: 1000},
@@ -481,9 +486,24 @@ class TestHelper(object):
"""
return False
- ##############
+ ################
# Module related
- ##############
+ ################
def get_valid_module_type(self):
"""Return a valid module type."""
return "Ping"
+
+ #################
+ # Cluster related
+ #################
+ def get_cluster_types(self):
+ """Returns a list of cluster type lists to use when creating instances.
+ The list should be the same size as the number of cluster instances
+ that will be created. If not specified, no types are sent to
+ cluster-create. Cluster grow uses the first type in the list for the
+ first instance, and doesn't use anything for the second instance
+ (i.e. doesn't pass in anything for 'type').
+ An example for this method would be:
+ return [['data', 'other_type'], ['third_type']]
+ """
+ return None
diff --git a/trove/tests/scenario/runners/__init__.py b/trove/tests/scenario/runners/__init__.py
index 9d0d6a95..ecddc075 100644
--- a/trove/tests/scenario/runners/__init__.py
+++ b/trove/tests/scenario/runners/__init__.py
@@ -1,2 +1,3 @@
BUG_EJECT_VALID_MASTER = 1622014
BUG_WRONG_API_VALIDATION = 1498573
+BUG_STOP_DB_IN_CLUSTER = 1645096
diff --git a/trove/tests/scenario/runners/backup_runners.py b/trove/tests/scenario/runners/backup_runners.py
index 1c2f26fd..5e660aab 100644
--- a/trove/tests/scenario/runners/backup_runners.py
+++ b/trove/tests/scenario/runners/backup_runners.py
@@ -53,17 +53,19 @@ class BackupRunner(TestRunner):
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
invalid_inst_id = 'invalid-inst-id'
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.backups.create,
+ client, client.backups.create,
self.BACKUP_NAME, invalid_inst_id, self.BACKUP_DESC)
def run_backup_create_instance_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.backups.create,
+ client, client.backups.create,
self.BACKUP_NAME, generate_uuid(), self.BACKUP_DESC)
def run_add_data_for_backup(self):
@@ -109,11 +111,15 @@ class BackupRunner(TestRunner):
def assert_backup_create(self, name, desc, instance_id, parent_id=None,
incremental=False):
+ client = self.auth_client
+ datastore_version = client.datastore_versions.get(
+ self.instance_info.dbaas_datastore,
+ self.instance_info.dbaas_datastore_version)
if incremental:
- result = self.auth_client.backups.create(
+ result = client.backups.create(
name, instance_id, desc, incremental=incremental)
else:
- result = self.auth_client.backups.create(
+ result = client.backups.create(
name, instance_id, desc, parent_id=parent_id)
self.assert_equal(name, result.name,
'Unexpected backup name')
@@ -127,11 +133,7 @@ class BackupRunner(TestRunner):
self.assert_equal(parent_id, result.parent_id,
'Unexpected status for backup')
- instance = self.auth_client.instances.get(instance_id)
- datastore_version = self.auth_client.datastore_versions.get(
- self.instance_info.dbaas_datastore,
- self.instance_info.dbaas_datastore_version)
-
+ instance = client.instances.get(instance_id)
self.assert_equal('BACKUP', instance.status,
'Unexpected instance status')
self.assert_equal(self.instance_info.dbaas_datastore,
@@ -147,32 +149,37 @@ class BackupRunner(TestRunner):
def run_restore_instance_from_not_completed_backup(
self, expected_exception=exceptions.Conflict,
expected_http_code=409):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self._restore_from_backup, self.backup_info.id)
+ None, self._restore_from_backup, client, self.backup_info.id)
+ self.assert_client_code(client, expected_http_code)
def run_instance_action_right_after_backup_create(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
+ client = self.auth_client
self.assert_raises(expected_exception, expected_http_code,
- self.auth_client.instances.resize_instance,
+ client, client.instances.resize_instance,
self.instance_info.id, 1)
def run_backup_create_another_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
+ client = self.auth_client
self.assert_raises(expected_exception, expected_http_code,
- self.auth_client.backups.create,
+ client, client.backups.create,
'backup_test2', self.instance_info.id,
'test description2')
def run_backup_delete_while_backup_running(
self, expected_exception=exceptions.UnprocessableEntity,
expected_http_code=422):
- result = self.auth_client.backups.list()
+ client = self.auth_client
+ result = client.backups.list()
backup = result[0]
self.assert_raises(expected_exception, expected_http_code,
- self.auth_client.backups.delete, backup.id)
+ client, client.backups.delete, backup.id)
def run_backup_create_completed(self):
self._verify_backup(self.backup_info.id)
@@ -226,9 +233,10 @@ class BackupRunner(TestRunner):
def run_backup_list_filter_datastore_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.backups.list,
+ client, client.backups.list,
datastore='NOT_FOUND')
def run_backup_list_for_instance(self):
@@ -255,12 +263,10 @@ class BackupRunner(TestRunner):
def run_backup_get_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.unauth_client
self.assert_raises(
- expected_exception, None,
- self.unauth_client.backups.get, self.backup_info.id)
- # we're using a different client, so we'll check the return code
- # on it explicitly, instead of depending on 'assert_raises'
- self.assert_client_code(expected_http_code, client=self.unauth_client)
+ expected_exception, expected_http_code,
+ client, client.backups.get, self.backup_info.id)
def run_add_data_for_inc_backup_1(self):
self.backup_host = self.get_instance_host()
@@ -302,15 +308,16 @@ class BackupRunner(TestRunner):
def assert_restore_from_backup(self, backup_ref, suffix='',
expected_http_code=200):
- result = self._restore_from_backup(backup_ref, suffix=suffix)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ result = self._restore_from_backup(client, backup_ref, suffix=suffix)
+ self.assert_client_code(client, expected_http_code)
self.assert_equal('BUILD', result.status,
'Unexpected instance status')
return result.id
- def _restore_from_backup(self, backup_ref, suffix=''):
+ def _restore_from_backup(self, client, backup_ref, suffix=''):
restore_point = {'backupRef': backup_ref}
- result = self.auth_client.instances.create(
+ result = client.instances.create(
self.instance_info.name + '_restore' + suffix,
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
@@ -372,8 +379,9 @@ class BackupRunner(TestRunner):
def assert_delete_restored_instance(
self, instance_id, expected_http_code):
- self.auth_client.instances.delete(instance_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ client.instances.delete(instance_id)
+ self.assert_client_code(client, expected_http_code)
def run_delete_restored_inc_1_instance(self, expected_http_code=202):
self.assert_delete_restored_instance(
@@ -398,20 +406,19 @@ class BackupRunner(TestRunner):
def run_delete_unknown_backup(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.backups.delete,
+ client, client.backups.delete,
'unknown_backup')
def run_delete_backup_unauthorized_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.unauth_client
self.assert_raises(
- expected_exception, None,
- self.unauth_client.backups.delete, self.backup_info.id)
- # we're using a different client, so we'll check the return code
- # on it explicitly, instead of depending on 'assert_raises'
- self.assert_client_code(expected_http_code, client=self.unauth_client)
+ expected_exception, expected_http_code,
+ client, client.backups.delete, self.backup_info.id)
def run_delete_inc_2_backup(self, expected_http_code=202):
self.assert_delete_backup(
@@ -420,14 +427,15 @@ class BackupRunner(TestRunner):
def assert_delete_backup(
self, backup_id, expected_http_code):
- self.auth_client.backups.delete(backup_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
- self._wait_until_backup_is_gone(backup_id)
+ client = self.auth_client
+ client.backups.delete(backup_id)
+ self.assert_client_code(client, expected_http_code)
+ self._wait_until_backup_is_gone(client, backup_id)
- def _wait_until_backup_is_gone(self, backup_id):
+ def _wait_until_backup_is_gone(self, client, backup_id):
def _backup_is_gone():
try:
- self.auth_client.backups.get(backup_id)
+ client.backups.get(backup_id)
return False
except exceptions.NotFound:
return True
@@ -443,9 +451,10 @@ class BackupRunner(TestRunner):
expected_http_code=404):
if self.backup_inc_1_info is None:
raise SkipTest("Incremental Backup not created")
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.backups.get,
+ client, client.backups.get,
self.backup_inc_1_info.id)
self.backup_inc_1_info = None
diff --git a/trove/tests/scenario/runners/cluster_actions_runners.py b/trove/tests/scenario/runners/cluster_runners.py
index ad863870..9e4fc177 100644
--- a/trove/tests/scenario/runners/cluster_actions_runners.py
+++ b/trove/tests/scenario/runners/cluster_runners.py
@@ -22,12 +22,14 @@ import time as timer
from trove.common import exception
from trove.common.utils import poll_until
from trove.tests.scenario.helpers.test_helper import DataType
+from trove.tests.scenario import runners
+from trove.tests.scenario.runners.test_runners import SkipKnownBug
from trove.tests.scenario.runners.test_runners import TestRunner
from trove.tests.util.check import TypeCheck
from troveclient.compat import exceptions
-class ClusterActionsRunner(TestRunner):
+class ClusterRunner(TestRunner):
USE_CLUSTER_ID_FLAG = 'TESTS_USE_CLUSTER_ID'
DO_NOT_DELETE_CLUSTER_FLAG = 'TESTS_DO_NOT_DELETE_CLUSTER'
@@ -35,7 +37,7 @@ class ClusterActionsRunner(TestRunner):
EXTRA_INSTANCE_NAME = "named_instance"
def __init__(self):
- super(ClusterActionsRunner, self).__init__()
+ super(ClusterRunner, self).__init__()
self.cluster_name = 'test_cluster'
self.cluster_id = 0
@@ -44,6 +46,9 @@ class ClusterActionsRunner(TestRunner):
self.srv_grp_id = None
self.current_root_creds = None
self.locality = 'affinity'
+ self.initial_instance_count = None
+ self.cluster_instances = None
+ self.cluster_removed_instances = None
@property
def is_using_existing_cluster(self):
@@ -58,7 +63,6 @@ class ClusterActionsRunner(TestRunner):
return 2
def run_cluster_create(self, num_nodes=None, expected_task_name='BUILDING',
- expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
self.cluster_count_before_create = len(
self.auth_client.clusters.list())
@@ -67,59 +71,70 @@ class ClusterActionsRunner(TestRunner):
instance_flavor = self.get_instance_flavor()
- instances_def = [
+ instance_defs = [
self.build_flavor(
flavor_id=self.get_flavor_href(instance_flavor),
- volume_size=self.instance_info.volume['size'])] * num_nodes
+ volume_size=self.instance_info.volume['size'])
+ for count in range(0, num_nodes)]
+ types = self.test_helper.get_cluster_types()
+ for index, instance_def in enumerate(instance_defs):
+ instance_def['nics'] = self.instance_info.nics
+ if types and index < len(types):
+ instance_def['type'] = types[index]
self.cluster_id = self.assert_cluster_create(
- self.cluster_name, instances_def, self.locality,
- expected_task_name, expected_instance_states, expected_http_code)
+ self.cluster_name, instance_defs, self.locality,
+ expected_task_name, expected_http_code)
def assert_cluster_create(
self, cluster_name, instances_def, locality, expected_task_name,
- expected_instance_states, expected_http_code):
+ expected_http_code):
+
self.report.log("Testing cluster create: %s" % cluster_name)
+ client = self.auth_client
cluster = self.get_existing_cluster()
if cluster:
self.report.log("Using an existing cluster: %s" % cluster.id)
- cluster_instances = self._get_cluster_instances(cluster.id)
- self.assert_all_instance_states(
- cluster_instances, expected_instance_states[-1:])
else:
- cluster = self.auth_client.clusters.create(
+ cluster = client.clusters.create(
cluster_name, self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
instances=instances_def, locality=locality)
+ self.assert_client_code(client, expected_http_code)
self._assert_cluster_values(cluster, expected_task_name)
- # Don't give an expected task here or it will do a 'get' on
- # the cluster. We tested the cluster values above.
- self._assert_cluster_action(cluster.id, None,
- expected_http_code)
- cluster_instances = self._get_cluster_instances(cluster.id)
- self.assert_all_instance_states(
- cluster_instances, expected_instance_states)
- # Create the helper user/database on the first node.
- # The cluster should handle the replication itself.
+ return cluster.id
+
+ def run_cluster_create_wait(self,
+ expected_instance_states=['BUILD', 'ACTIVE']):
+
+ self.assert_cluster_create_wait(
+ self.cluster_id, expected_instance_states=expected_instance_states)
+
+ def assert_cluster_create_wait(
+ self, cluster_id, expected_instance_states):
+ client = self.auth_client
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
+ self.assert_all_instance_states(
+ cluster_instances, expected_instance_states)
+ # Create the helper user/database on the first node.
+ # The cluster should handle the replication itself.
+ if not self.get_existing_cluster():
self.create_test_helper_on_instance(cluster_instances[0])
- # make sure the server_group was created
- self.cluster_inst_ids = [inst.id for inst in cluster_instances]
- for id in self.cluster_inst_ids:
- srv_grp_id = self.assert_server_group_exists(id)
- if self.srv_grp_id and self.srv_grp_id != srv_grp_id:
- self.fail("Found multiple server groups for cluster")
- self.srv_grp_id = srv_grp_id
-
- cluster_id = cluster.id
# Although all instances have already acquired the expected state,
# we still need to poll for the final cluster task, because
# it may take up to the periodic task interval until the task name
# gets updated in the Trove database.
- self._assert_cluster_states(cluster_id, ['NONE'])
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
- return cluster_id
+ # make sure the server_group was created
+ self.cluster_inst_ids = [inst.id for inst in cluster_instances]
+ for id in self.cluster_inst_ids:
+ srv_grp_id = self.assert_server_group_exists(id)
+ if self.srv_grp_id and self.srv_grp_id != srv_grp_id:
+ self.fail("Found multiple server groups for cluster")
+ self.srv_grp_id = srv_grp_id
def get_existing_cluster(self):
if self.is_using_existing_cluster:
@@ -132,10 +147,10 @@ class ClusterActionsRunner(TestRunner):
self.cluster_count_before_create + 1,
expected_http_code)
- def assert_cluster_list(self, expected_count,
- expected_http_code):
- count = len(self.auth_client.clusters.list())
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ def assert_cluster_list(self, expected_count, expected_http_code):
+ client = self.auth_client
+ count = len(client.clusters.list())
+ self.assert_client_code(client, expected_http_code)
self.assert_equal(expected_count, count, "Unexpected cluster count")
def run_cluster_show(self, expected_http_code=200,
@@ -145,19 +160,23 @@ class ClusterActionsRunner(TestRunner):
def assert_cluster_show(self, cluster_id, expected_task_name,
expected_http_code):
- self._assert_cluster_response(cluster_id, expected_task_name)
+ self._assert_cluster_response(self.auth_client,
+ cluster_id, expected_task_name)
def run_cluster_root_enable(self, expected_task_name=None,
expected_http_code=200):
root_credentials = self.test_helper.get_helper_credentials_root()
- self.current_root_creds = self.auth_client.root.create_cluster_root(
+ if not root_credentials or not root_credentials.get('name'):
+ raise SkipTest("No root credentials provided.")
+ client = self.auth_client
+ self.current_root_creds = client.root.create_cluster_root(
self.cluster_id, root_credentials['password'])
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
self.assert_equal(root_credentials['name'],
self.current_root_creds[0])
self.assert_equal(root_credentials['password'],
self.current_root_creds[1])
- self._assert_cluster_action(self.cluster_id, expected_task_name,
- expected_http_code)
def run_verify_cluster_root_enable(self):
if not self.current_root_creds:
@@ -168,46 +187,37 @@ class ClusterActionsRunner(TestRunner):
instance['id'])
self.assert_true(root_enabled_test.rootEnabled)
- for ip in cluster.ip:
- self.report.log("Pinging cluster as superuser via node: %s" % ip)
+ for ipv4 in self.extract_ipv4s(cluster.ip):
+ self.report.log("Pinging cluster as superuser via node: %s" % ipv4)
ping_response = self.test_helper.ping(
- ip,
+ ipv4,
username=self.current_root_creds[0],
- password=self.current_root_creds[1]
- )
+ password=self.current_root_creds[1])
self.assert_true(ping_response)
def run_add_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_add_cluster_data(data_type, self.cluster_id)
- def run_add_extra_cluster_data(self, data_type=DataType.tiny2):
- self.assert_add_cluster_data(data_type, self.cluster_id)
-
def assert_add_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
- self.test_helper.add_data(data_type, cluster.ip[0])
+ self.test_helper.add_data(data_type, self.extract_ipv4s(cluster.ip)[0])
def run_verify_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_verify_cluster_data(data_type, self.cluster_id)
- def run_verify_extra_cluster_data(self, data_type=DataType.tiny2):
- self.assert_verify_cluster_data(data_type, self.cluster_id)
-
def assert_verify_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
- for ip in cluster.ip:
- self.report.log("Verifying cluster data via node: %s" % ip)
- self.test_helper.verify_data(data_type, ip)
+ for ipv4 in self.extract_ipv4s(cluster.ip):
+ self.report.log("Verifying cluster data via node: %s" % ipv4)
+ self.test_helper.verify_data(data_type, ipv4)
def run_remove_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_remove_cluster_data(data_type, self.cluster_id)
- def run_remove_extra_cluster_data(self, data_type=DataType.tiny2):
- self.assert_remove_cluster_data(data_type, self.cluster_id)
-
def assert_remove_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
- self.test_helper.remove_data(data_type, cluster.ip[0])
+ self.test_helper.remove_data(
+ data_type, self.extract_ipv4s(cluster.ip)[0])
def run_cluster_grow(self, expected_task_name='GROWING_CLUSTER',
expected_http_code=202):
@@ -219,6 +229,10 @@ class ClusterActionsRunner(TestRunner):
self._build_instance_def(flavor_href,
self.instance_info.volume['size'],
self.EXTRA_INSTANCE_NAME)]
+ types = self.test_helper.get_cluster_types()
+ if types and types[0]:
+ added_instance_defs[0]['type'] = types[0]
+
self.assert_cluster_grow(
self.cluster_id, added_instance_defs, expected_task_name,
expected_http_code)
@@ -228,115 +242,200 @@ class ClusterActionsRunner(TestRunner):
flavor_id=flavor_id, volume_size=volume_size)
if name:
instance_def.update({'name': name})
+ instance_def.update({'nics': self.instance_info.nics})
return instance_def
def assert_cluster_grow(self, cluster_id, added_instance_defs,
expected_task_name, expected_http_code):
- cluster = self.auth_client.clusters.get(cluster_id)
+ client = self.auth_client
+ cluster = client.clusters.get(cluster_id)
initial_instance_count = len(cluster.instances)
- cluster = self.auth_client.clusters.grow(cluster_id,
- added_instance_defs)
- self._assert_cluster_action(cluster_id, expected_task_name,
- expected_http_code)
+ cluster = client.clusters.grow(cluster_id, added_instance_defs)
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
self.assert_equal(len(added_instance_defs),
len(cluster.instances) - initial_instance_count,
"Unexpected number of added nodes.")
- cluster_instances = self._get_cluster_instances(cluster_id)
+ def run_cluster_grow_wait(self):
+ self.assert_cluster_grow_wait(self.cluster_id)
+
+ def assert_cluster_grow_wait(self, cluster_id):
+ client = self.auth_client
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
- self._assert_cluster_states(cluster_id, ['NONE'])
- self._assert_cluster_response(cluster_id, 'NONE')
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
+ self._assert_cluster_response(client, cluster_id, 'NONE')
+
+ def run_add_grow_cluster_data(self, data_type=DataType.tiny2):
+ self.assert_add_cluster_data(data_type, self.cluster_id)
+
+ def run_verify_grow_cluster_data(self, data_type=DataType.tiny2):
+ self.assert_verify_cluster_data(data_type, self.cluster_id)
+
+ def run_remove_grow_cluster_data(self, data_type=DataType.tiny2):
+ self.assert_remove_cluster_data(data_type, self.cluster_id)
- def run_cluster_shrink(
- self, expected_task_name=None, expected_http_code=202):
- self.assert_cluster_shrink(self.cluster_id, [self.EXTRA_INSTANCE_NAME],
+ def run_cluster_upgrade(self, expected_task_name='UPGRADING_CLUSTER',
+ expected_http_code=202):
+ self.assert_cluster_upgrade(self.cluster_id,
+ expected_task_name, expected_http_code)
+
+ def assert_cluster_upgrade(self, cluster_id,
+ expected_task_name, expected_http_code):
+ client = self.auth_client
+ cluster = client.clusters.get(cluster_id)
+ self.initial_instance_count = len(cluster.instances)
+
+ client.clusters.upgrade(
+ cluster_id, self.instance_info.dbaas_datastore_version)
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
+
+ def run_cluster_upgrade_wait(self):
+ self.assert_cluster_upgrade_wait(
+ self.cluster_id, expected_last_instance_state='ACTIVE')
+
+ def assert_cluster_upgrade_wait(self, cluster_id,
+ expected_last_instance_state):
+ client = self.auth_client
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
+ self.assert_equal(
+ self.initial_instance_count,
+ len(cluster_instances),
+ "Unexpected number of instances after upgrade.")
+ self.assert_all_instance_states(cluster_instances,
+ [expected_last_instance_state])
+ self._assert_cluster_response(client, cluster_id, 'NONE')
+
+ def run_add_upgrade_cluster_data(self, data_type=DataType.tiny3):
+ self.assert_add_cluster_data(data_type, self.cluster_id)
+
+ def run_verify_upgrade_cluster_data(self, data_type=DataType.tiny3):
+ self.assert_verify_cluster_data(data_type, self.cluster_id)
+
+ def run_remove_upgrade_cluster_data(self, data_type=DataType.tiny3):
+ self.assert_remove_cluster_data(data_type, self.cluster_id)
+
+ def run_cluster_shrink(self, expected_task_name='SHRINKING_CLUSTER',
+ expected_http_code=202):
+ self.assert_cluster_shrink(self.auth_client,
+ self.cluster_id, [self.EXTRA_INSTANCE_NAME],
expected_task_name, expected_http_code)
- def assert_cluster_shrink(self, cluster_id, removed_instance_names,
+ def assert_cluster_shrink(self, client, cluster_id, removed_instance_names,
expected_task_name, expected_http_code):
- cluster = self.auth_client.clusters.get(cluster_id)
- initial_instance_count = len(cluster.instances)
+ cluster = client.clusters.get(cluster_id)
+ self.initial_instance_count = len(cluster.instances)
- removed_instances = self._find_cluster_instances_by_name(
- cluster, removed_instance_names)
+ self.cluster_removed_instances = (
+ self._find_cluster_instances_by_name(
+ cluster, removed_instance_names))
- cluster = self.auth_client.clusters.shrink(
- cluster_id, [{'id': instance['id']}
- for instance in removed_instances])
+ client.clusters.shrink(
+ cluster_id, [{'id': instance.id}
+ for instance in self.cluster_removed_instances])
- self._assert_cluster_action(cluster_id, expected_task_name,
- expected_http_code)
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
- self._assert_cluster_states(cluster_id, ['NONE'])
- cluster = self.auth_client.clusters.get(cluster_id)
+ def _find_cluster_instances_by_name(self, cluster, instance_names):
+ return [self.auth_client.instances.get(instance['id'])
+ for instance in cluster.instances
+ if instance['name'] in instance_names]
+
+ def run_cluster_shrink_wait(self):
+ self.assert_cluster_shrink_wait(
+ self.cluster_id, expected_last_instance_state='SHUTDOWN')
+
+ def assert_cluster_shrink_wait(self, cluster_id,
+ expected_last_instance_state):
+ client = self.auth_client
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
+ cluster = client.clusters.get(cluster_id)
self.assert_equal(
- len(removed_instance_names),
- initial_instance_count - len(cluster.instances),
+ len(self.cluster_removed_instances),
+ self.initial_instance_count - len(cluster.instances),
"Unexpected number of removed nodes.")
- cluster_instances = self._get_cluster_instances(cluster_id)
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
+ self.assert_all_gone(self.cluster_removed_instances,
+ expected_last_instance_state)
+ self._assert_cluster_response(client, cluster_id, 'NONE')
- self._assert_cluster_response(cluster_id, 'NONE')
+ def run_add_shrink_cluster_data(self, data_type=DataType.tiny4):
+ self.assert_add_cluster_data(data_type, self.cluster_id)
- def _find_cluster_instances_by_name(self, cluster, instance_names):
- return [instance for instance in cluster.instances
- if instance['name'] in instance_names]
+ def run_verify_shrink_cluster_data(self, data_type=DataType.tiny4):
+ self.assert_verify_cluster_data(data_type, self.cluster_id)
+
+ def run_remove_shrink_cluster_data(self, data_type=DataType.tiny4):
+ self.assert_remove_cluster_data(data_type, self.cluster_id)
def run_cluster_delete(
- self, expected_task_name='DELETING',
- expected_last_instance_state='SHUTDOWN', expected_http_code=202):
+ self, expected_task_name='DELETING', expected_http_code=202):
if self.has_do_not_delete_cluster:
self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was "
"specified, skipping delete...")
raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.")
self.assert_cluster_delete(
- self.cluster_id, expected_task_name, expected_last_instance_state,
- expected_http_code)
+ self.cluster_id, expected_http_code)
- def assert_cluster_delete(
- self, cluster_id, expected_task_name, expected_last_instance_state,
- expected_http_code):
+ def assert_cluster_delete(self, cluster_id, expected_http_code):
self.report.log("Testing cluster delete: %s" % cluster_id)
- cluster_instances = self._get_cluster_instances(cluster_id)
+ client = self.auth_client
+ self.cluster_instances = self._get_cluster_instances(client,
+ cluster_id)
+
+ client.clusters.delete(cluster_id)
+ self.assert_client_code(client, expected_http_code)
+
+ def _get_cluster_instances(self, client, cluster_id):
+ cluster = client.clusters.get(cluster_id)
+ return [client.instances.get(instance['id'])
+ for instance in cluster.instances]
- self.auth_client.clusters.delete(cluster_id)
+ def run_cluster_delete_wait(
+ self, expected_task_name='DELETING',
+ expected_last_instance_state='SHUTDOWN'):
+ if self.has_do_not_delete_cluster:
+ self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was "
+ "specified, skipping delete wait...")
+ raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.")
+
+ self.assert_cluster_delete_wait(
+ self.cluster_id, expected_task_name, expected_last_instance_state)
+
+ def assert_cluster_delete_wait(
+ self, cluster_id, expected_task_name,
+ expected_last_instance_state):
+ client = self.auth_client
# Since the server_group is removed right at the beginning of the
# cluster delete process we can't check for locality anymore.
- self._assert_cluster_action(cluster_id, expected_task_name,
- expected_http_code, check_locality=False)
+ self._assert_cluster_response(client, cluster_id, expected_task_name,
+ check_locality=False)
- self.assert_all_gone(cluster_instances, expected_last_instance_state)
- self._assert_cluster_gone(cluster_id)
+ self.assert_all_gone(self.cluster_instances,
+ expected_last_instance_state)
+ self._assert_cluster_gone(client, cluster_id)
# make sure the server group is gone too
self.assert_server_group_gone(self.srv_grp_id)
- def _get_cluster_instances(self, cluster_id):
- cluster = self.auth_client.clusters.get(cluster_id)
- return [self.auth_client.instances.get(instance['id'])
- for instance in cluster.instances]
-
- def _assert_cluster_action(
- self, cluster_id, expected_task_name, expected_http_code,
- check_locality=True):
- if expected_http_code is not None:
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
- if expected_task_name:
- self._assert_cluster_response(cluster_id, expected_task_name,
- check_locality=check_locality)
-
- def _assert_cluster_states(self, cluster_id, expected_states,
+ def _assert_cluster_states(self, client, cluster_id, expected_states,
fast_fail_status=None):
for status in expected_states:
start_time = timer.time()
try:
- poll_until(lambda: self._has_task(
- cluster_id, status, fast_fail_status=fast_fail_status),
+ poll_until(
+ lambda: self._has_task(
+ client, cluster_id, status,
+ fast_fail_status=fast_fail_status),
sleep_time=self.def_sleep_time,
time_out=self.def_timeout)
self.report.log("Cluster has gone '%s' in %s." %
@@ -349,8 +448,8 @@ class ClusterActionsRunner(TestRunner):
return True
- def _has_task(self, cluster_id, task, fast_fail_status=None):
- cluster = self.auth_client.clusters.get(cluster_id)
+ def _has_task(self, client, cluster_id, task, fast_fail_status=None):
+ cluster = client.clusters.get(cluster_id)
task_name = cluster.task['name']
self.report.log("Waiting for cluster '%s' to become '%s': %s"
% (cluster_id, task, task_name))
@@ -359,10 +458,9 @@ class ClusterActionsRunner(TestRunner):
% (cluster_id, task))
return task_name == task
- def _assert_cluster_response(self, cluster_id, expected_task_name,
- expected_http_code=200, check_locality=True):
- cluster = self.auth_client.clusters.get(cluster_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ def _assert_cluster_response(self, client, cluster_id, expected_task_name,
+ check_locality=True):
+ cluster = client.clusters.get(cluster_id)
self._assert_cluster_values(cluster, expected_task_name,
check_locality=check_locality)
@@ -389,59 +487,63 @@ class ClusterActionsRunner(TestRunner):
self.assert_equal(self.locality, cluster.locality,
"Unexpected cluster locality")
- def _assert_cluster_gone(self, cluster_id):
+ def _assert_cluster_gone(self, client, cluster_id):
t0 = timer.time()
try:
# This will poll until the cluster goes away.
- self._assert_cluster_states(cluster_id, ['NONE'])
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
self.fail(
"Cluster '%s' still existed after %s seconds."
% (cluster_id, self._time_since(t0)))
except exceptions.NotFound:
- self.assert_client_code(404, client=self.auth_client)
+ self.assert_client_code(client, 404)
-class CassandraClusterActionsRunner(ClusterActionsRunner):
+class CassandraClusterRunner(ClusterRunner):
def run_cluster_root_enable(self):
raise SkipTest("Operation is currently not supported.")
-class MariadbClusterActionsRunner(ClusterActionsRunner):
+class MariadbClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
return self.get_datastore_config_property('min_cluster_member_count')
- def run_cluster_root_enable(self):
- raise SkipTest("Operation is currently not supported.")
-
-class PxcClusterActionsRunner(ClusterActionsRunner):
+class MongodbClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
- return self.get_datastore_config_property('min_cluster_member_count')
+ return 3
+
+ def run_cluster_delete(self, expected_task_name='NONE',
+ expected_http_code=202):
+ raise SkipKnownBug(runners.BUG_STOP_DB_IN_CLUSTER)
-class VerticaClusterActionsRunner(ClusterActionsRunner):
+class PxcClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
- return self.get_datastore_config_property('cluster_member_count')
+ return self.get_datastore_config_property('min_cluster_member_count')
-class RedisClusterActionsRunner(ClusterActionsRunner):
+class RedisClusterRunner(ClusterRunner):
- def run_cluster_root_enable(self):
- raise SkipTest("Operation is currently not supported.")
+ # Since Redis runs all the shrink code in the API server, the call
+ # will not return until the task name has been set back to 'NONE' so
+ # we can't check it.
+ def run_cluster_shrink(self, expected_task_name='NONE',
+ expected_http_code=202):
+ return super(RedisClusterRunner, self).run_cluster_shrink(
+ expected_task_name=expected_task_name,
+ expected_http_code=expected_http_code)
-class MongodbClusterActionsRunner(ClusterActionsRunner):
-
- def run_cluster_root_enable(self):
- raise SkipTest("Operation is currently not supported.")
+class VerticaClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
- return 3
+ return self.get_datastore_config_property('cluster_member_count')
diff --git a/trove/tests/scenario/runners/configuration_runners.py b/trove/tests/scenario/runners/configuration_runners.py
index e237211d..aeed459f 100644
--- a/trove/tests/scenario/runners/configuration_runners.py
+++ b/trove/tests/scenario/runners/configuration_runners.py
@@ -49,9 +49,10 @@ class ConfigurationRunner(TestRunner):
def assert_action_on_conf_group_failure(
self, group_values, expected_exception, expected_http_code):
json_def = json.dumps(group_values)
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.configurations.create,
+ client, client.configurations.create,
'conf_group',
json_def,
'Group with Bad or Invalid entries',
@@ -128,13 +129,14 @@ class ConfigurationRunner(TestRunner):
def assert_create_group(self, name, description, values,
expected_http_code):
json_def = json.dumps(values)
- result = self.auth_client.configurations.create(
+ client = self.auth_client
+ result = client.configurations.create(
name,
json_def,
description,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
with TypeCheck('Configuration', result) as configuration:
configuration.has_field('name', basestring)
@@ -269,12 +271,10 @@ class ConfigurationRunner(TestRunner):
def assert_conf_get_unauthorized_user(
self, config_id, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.unauth_client
self.assert_raises(
- expected_exception, None,
- self.unauth_client.configurations.get, config_id)
- # we're using a different client, so we'll check the return code
- # on it explicitly, instead of depending on 'assert_raises'
- self.assert_client_code(expected_http_code, client=self.unauth_client)
+ expected_exception, expected_http_code,
+ client, client.configurations.get, config_id)
def run_non_dynamic_conf_get_unauthorized_user(
self, expected_exception=exceptions.NotFound,
@@ -372,9 +372,10 @@ class ConfigurationRunner(TestRunner):
def assert_update_group(
self, instance_id, group_id, values,
expected_states, expected_http_code, restart_inst=False):
- self.auth_client.configurations.update(group_id, values)
- self.assert_instance_action(
- instance_id, expected_states, expected_http_code)
+ client = self.auth_client
+ client.configurations.update(group_id, values)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(instance_id, expected_states)
if restart_inst:
self._restart_instance(instance_id)
@@ -445,9 +446,10 @@ class ConfigurationRunner(TestRunner):
def assert_instance_modify(
self, instance_id, group_id, expected_states, expected_http_code,
restart_inst=False):
- self.auth_client.instances.modify(instance_id, configuration=group_id)
- self.assert_instance_action(
- instance_id, expected_states, expected_http_code)
+ client = self.auth_client
+ client.instances.modify(instance_id, configuration=group_id)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(instance_id, expected_states)
# Verify the group has been attached.
instance = self.get_instance(instance_id)
@@ -470,9 +472,10 @@ class ConfigurationRunner(TestRunner):
def assert_instance_modify_failure(
self, instance_id, group_id, expected_exception,
expected_http_code):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.instances.modify,
+ client, client.instances.modify,
instance_id, configuration=group_id)
def run_delete_dynamic_group(self, expected_http_code=202):
@@ -481,8 +484,9 @@ class ConfigurationRunner(TestRunner):
expected_http_code)
def assert_group_delete(self, group_id, expected_http_code):
- self.auth_client.configurations.delete(group_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ client.configurations.delete(group_id)
+ self.assert_client_code(client, expected_http_code)
def run_delete_non_dynamic_group(self, expected_http_code=202):
if self.non_dynamic_group_id:
@@ -491,16 +495,18 @@ class ConfigurationRunner(TestRunner):
def assert_group_delete_failure(self, group_id, expected_exception,
expected_http_code):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.configurations.delete, group_id)
+ client, client.configurations.delete, group_id)
def _restart_instance(
self, instance_id, expected_states=['REBOOT', 'ACTIVE'],
expected_http_code=202):
- self.auth_client.instances.restart(instance_id)
- self.assert_instance_action(instance_id, expected_states,
- expected_http_code)
+ client = self.auth_client
+ client.instances.restart(instance_id)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(instance_id, expected_states)
def run_create_instance_with_conf(self):
self.config_id_for_inst = (
@@ -514,16 +520,18 @@ class ConfigurationRunner(TestRunner):
def assert_create_instance_with_conf(self, config_id):
# test that a new instance will apply the configuration on create
- result = self.auth_client.instances.create(
+ client = self.auth_client
+ result = client.instances.create(
self.instance_info.name + "_config",
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
[], [],
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version,
+ nics=self.instance_info.nics,
availability_zone="nova",
configuration=config_id)
- self.assert_client_code(200, client=self.auth_client)
+ self.assert_client_code(client, 200)
self.assert_equal("BUILD", result.status, 'Unexpected inst status')
return result.id
@@ -553,8 +561,9 @@ class ConfigurationRunner(TestRunner):
raise SkipTest("No instance created with a configuration group.")
def assert_delete_conf_instance(self, instance_id, expected_http_code):
- self.auth_client.instances.delete(instance_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ client.instances.delete(instance_id)
+ self.assert_client_code(client, expected_http_code)
def run_wait_for_delete_conf_instance(
self, expected_last_state=['SHUTDOWN']):
diff --git a/trove/tests/scenario/runners/database_actions_runners.py b/trove/tests/scenario/runners/database_actions_runners.py
index 52829ee9..467df39d 100644
--- a/trove/tests/scenario/runners/database_actions_runners.py
+++ b/trove/tests/scenario/runners/database_actions_runners.py
@@ -52,9 +52,11 @@ class DatabaseActionsRunner(TestRunner):
def assert_databases_create(self, instance_id, serial_databases_def,
expected_http_code):
- self.auth_client.databases.create(instance_id, serial_databases_def)
- self.assert_client_code(expected_http_code, client=self.auth_client)
- self.wait_for_database_create(instance_id, serial_databases_def)
+ client = self.auth_client
+ client.databases.create(instance_id, serial_databases_def)
+ self.assert_client_code(client, expected_http_code)
+ self.wait_for_database_create(client,
+ instance_id, serial_databases_def)
return serial_databases_def
def run_databases_list(self, expected_http_code=200):
@@ -63,8 +65,9 @@ class DatabaseActionsRunner(TestRunner):
def assert_databases_list(self, instance_id, expected_database_defs,
expected_http_code, limit=2):
- full_list = self.auth_client.databases.list(instance_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ full_list = client.databases.list(instance_id)
+ self.assert_client_code(client, expected_http_code)
listed_databases = {database.name: database for database in full_list}
self.assert_is_none(full_list.next,
"Unexpected pagination in the list.")
@@ -85,8 +88,8 @@ class DatabaseActionsRunner(TestRunner):
"output.")
# Test list pagination.
- list_page = self.auth_client.databases.list(instance_id, limit=limit)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ list_page = client.databases.list(instance_id, limit=limit)
+ self.assert_client_code(client, expected_http_code)
self.assert_true(len(list_page) <= limit)
if len(full_list) > limit:
@@ -102,10 +105,9 @@ class DatabaseActionsRunner(TestRunner):
self.assert_equal(expected_marker, marker,
"Pagination marker should be the last element "
"in the page.")
- list_page = self.auth_client.databases.list(
+ list_page = client.databases.list(
instance_id, marker=marker)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
self.assert_pagination_match(
list_page, full_list, limit, len(full_list))
@@ -132,10 +134,11 @@ class DatabaseActionsRunner(TestRunner):
def assert_databases_create_failure(
self, instance_id, serial_databases_def,
expected_exception, expected_http_code):
+ client = self.auth_client
self.assert_raises(
expected_exception,
expected_http_code,
- self.auth_client.databases.create,
+ client, client.databases.create,
instance_id,
serial_databases_def)
@@ -163,16 +166,18 @@ class DatabaseActionsRunner(TestRunner):
instance_id,
database_name,
expected_http_code):
- self.auth_client.databases.delete(instance_id, database_name)
- self.assert_client_code(expected_http_code, client=self.auth_client)
- self._wait_for_database_delete(instance_id, database_name)
+ client = self.auth_client
+ client.databases.delete(instance_id, database_name)
+ self.assert_client_code(client, expected_http_code)
+ self._wait_for_database_delete(client, instance_id, database_name)
- def _wait_for_database_delete(self, instance_id, deleted_database_name):
+ def _wait_for_database_delete(self, client,
+ instance_id, deleted_database_name):
self.report.log("Waiting for deleted database to disappear from the "
"listing: %s" % deleted_database_name)
def _db_is_gone():
- all_dbs = self.get_db_names(instance_id)
+ all_dbs = self.get_db_names(client, instance_id)
return deleted_database_name not in all_dbs
try:
@@ -182,12 +187,12 @@ class DatabaseActionsRunner(TestRunner):
self.fail("Database still listed after the poll timeout: %ds" %
self.GUEST_CAST_WAIT_TIMEOUT_SEC)
- def run_nonexisting_database_delete(self, expected_http_code=202):
- # Deleting a non-existing database is expected to succeed as if the
- # database was deleted.
- self.assert_database_delete(
+ def run_nonexisting_database_delete(
+ self, expected_exception=exceptions.NotFound,
+ expected_http_code=404):
+ self.assert_database_delete_failure(
self.instance_info.id, self.non_existing_db_def['name'],
- expected_http_code)
+ expected_exception, expected_http_code)
def run_system_database_delete(
self, expected_exception=exceptions.BadRequest,
@@ -205,8 +210,9 @@ class DatabaseActionsRunner(TestRunner):
def assert_database_delete_failure(
self, instance_id, database_name,
expected_exception, expected_http_code):
+ client = self.auth_client
self.assert_raises(expected_exception, expected_http_code,
- self.auth_client.databases.delete,
+ client, client.databases.delete,
instance_id, database_name)
def get_system_databases(self):
diff --git a/trove/tests/scenario/runners/guest_log_runners.py b/trove/tests/scenario/runners/guest_log_runners.py
index 93f7e9c1..4e905b85 100644
--- a/trove/tests/scenario/runners/guest_log_runners.py
+++ b/trove/tests/scenario/runners/guest_log_runners.py
@@ -101,7 +101,7 @@ class GuestLogRunner(TestRunner):
self.report.log("Executing log_show for log '%s'" % log_name)
log_details = client.instances.log_show(
self.instance_info.id, log_name)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
@@ -187,7 +187,7 @@ class GuestLogRunner(TestRunner):
self.report.log("Executing log_enable for log '%s'" % log_name)
log_details = client.instances.log_enable(
self.instance_info.id, log_name)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
@@ -204,7 +204,7 @@ class GuestLogRunner(TestRunner):
(log_name, discard))
log_details = client.instances.log_disable(
self.instance_info.id, log_name, discard=discard)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
@@ -222,7 +222,7 @@ class GuestLogRunner(TestRunner):
(log_name, disable, discard))
log_details = client.instances.log_publish(
self.instance_info.id, log_name, disable=disable, discard=discard)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
@@ -238,7 +238,7 @@ class GuestLogRunner(TestRunner):
self.report.log("Executing log_discard for log '%s'" % log_name)
log_details = client.instances.log_discard(
self.instance_info.id, log_name)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
self.assert_log_details(
log_details, log_name,
expected_type=expected_type,
@@ -303,10 +303,10 @@ class GuestLogRunner(TestRunner):
expected_exception, expected_http_code,
log_name):
self.assert_raises(expected_exception, None,
- client.instances.log_enable,
+ client, client.instances.log_enable,
self.instance_info.id, log_name)
# we may not be using the main client, so check explicitly here
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
def run_test_log_disable_sys(self,
expected_exception=exceptions.BadRequest,
@@ -320,11 +320,11 @@ class GuestLogRunner(TestRunner):
expected_exception, expected_http_code,
log_name, discard=None):
self.assert_raises(expected_exception, None,
- client.instances.log_disable,
+ client, client.instances.log_disable,
self.instance_info.id, log_name,
discard=discard)
# we may not be using the main client, so check explicitly here
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
def run_test_log_show_unauth_user(self,
expected_exception=exceptions.NotFound,
@@ -338,19 +338,18 @@ class GuestLogRunner(TestRunner):
expected_exception, expected_http_code,
log_name):
self.assert_raises(expected_exception, None,
- client.instances.log_show,
+ client, client.instances.log_show,
self.instance_info.id, log_name)
# we may not be using the main client, so check explicitly here
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
def run_test_log_list_unauth_user(self,
expected_exception=exceptions.NotFound,
expected_http_code=404):
- self.assert_raises(expected_exception, None,
- self.unauth_client.instances.log_list,
+ client = self.unauth_client
+ self.assert_raises(expected_exception, expected_http_code,
+ client, client.instances.log_list,
self.instance_info.id)
- # we're not using the main client, so check explicitly here
- self.assert_client_code(expected_http_code, client=self.unauth_client)
def run_test_log_generator_unauth_user(self):
self.assert_log_generator_unauth_user(
@@ -406,11 +405,11 @@ class GuestLogRunner(TestRunner):
log_name,
disable=None, discard=None):
self.assert_raises(expected_exception, None,
- client.instances.log_publish,
+ client, client.instances.log_publish,
self.instance_info.id, log_name,
disable=disable, discard=discard)
# we may not be using the main client, so check explicitly here
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
def run_test_log_discard_unexposed_user(
self, expected_exception=exceptions.BadRequest,
@@ -423,11 +422,9 @@ class GuestLogRunner(TestRunner):
def assert_log_discard_fails(self, client,
expected_exception, expected_http_code,
log_name):
- self.assert_raises(expected_exception, None,
- client.instances.log_discard,
+ self.assert_raises(expected_exception, expected_http_code,
+ client, client.instances.log_discard,
self.instance_info.id, log_name)
- # we may not be using the main client, so check explicitly here
- self.assert_client_code(expected_http_code, client=client)
def run_test_log_enable_user(self):
expected_status = guest_log.LogStatus.Ready.name
@@ -470,23 +467,23 @@ class GuestLogRunner(TestRunner):
# we need to wait until the heartbeat flips the instance
# back into 'ACTIVE' before we issue the restart command
expected_states = ['RESTART_REQUIRED', 'ACTIVE']
- self.assert_instance_action(instance_id, expected_states, None)
- self.auth_client.instances.restart(instance_id)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ self.assert_instance_action(instance_id, expected_states)
+ client = self.auth_client
+ client.instances.restart(instance_id)
+ self.assert_client_code(client, expected_http_code)
def run_test_wait_for_restart(self, expected_states=['REBOOT', 'ACTIVE']):
if self.test_helper.log_enable_requires_restart():
- self.assert_instance_action(self.instance_info.id,
- expected_states, None)
+ self.assert_instance_action(self.instance_info.id, expected_states)
def run_test_log_publish_user(self):
for log_name in self._get_exposed_user_log_names():
self.assert_log_publish(
self.auth_client,
log_name,
- expected_status=guest_log.LogStatus.Published.name,
- expected_published=1, expected_pending=0)
+ expected_status=[guest_log.LogStatus.Published.name,
+ guest_log.LogStatus.Partial.name],
+ expected_published=1, expected_pending=None)
def run_test_add_data(self):
self.test_helper.add_data(DataType.micro, self.get_instance_host())
@@ -499,9 +496,10 @@ class GuestLogRunner(TestRunner):
self.assert_log_publish(
self.admin_client,
log_name,
- expected_status=guest_log.LogStatus.Published.name,
+ expected_status=[guest_log.LogStatus.Published.name,
+ guest_log.LogStatus.Partial.name],
expected_published=self._get_last_log_published(log_name),
- expected_pending=0)
+ expected_pending=None)
def run_test_log_generator_user(self):
for log_name in self._get_exposed_user_log_names():
@@ -559,9 +557,10 @@ class GuestLogRunner(TestRunner):
self.assert_log_publish(
self.auth_client,
log_name,
- expected_status=guest_log.LogStatus.Published.name,
+ expected_status=[guest_log.LogStatus.Published.name,
+ guest_log.LogStatus.Partial.name],
expected_published=self._get_last_log_published(log_name),
- expected_pending=0)
+ expected_pending=None)
# Now get the full contents of the log
self.assert_log_generator(self.auth_client, log_name, lines=100000)
log_lines = len(self._get_last_log_contents(log_name).splitlines())
diff --git a/trove/tests/scenario/runners/instance_actions_runners.py b/trove/tests/scenario/runners/instance_actions_runners.py
index 6b182046..2dd4a2f7 100644
--- a/trove/tests/scenario/runners/instance_actions_runners.py
+++ b/trove/tests/scenario/runners/instance_actions_runners.py
@@ -16,6 +16,7 @@
from proboscis import SkipTest
from trove.tests.config import CONFIG
+from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
@@ -36,6 +37,18 @@ class InstanceActionsRunner(TestRunner):
return self.get_flavor(flavor_name)
+ def run_add_test_data(self):
+ host = self.get_instance_host(self.instance_info.id)
+ self.test_helper.add_data(DataType.small, host)
+
+ def run_verify_test_data(self):
+ host = self.get_instance_host(self.instance_info.id)
+ self.test_helper.verify_data(DataType.small, host)
+
+ def run_remove_test_data(self):
+ host = self.get_instance_host(self.instance_info.id)
+ self.test_helper.remove_data(DataType.small, host)
+
def run_instance_restart(
self, expected_states=['REBOOT', 'ACTIVE'],
expected_http_code=202):
@@ -46,9 +59,10 @@ class InstanceActionsRunner(TestRunner):
expected_http_code):
self.report.log("Testing restart on instance: %s" % instance_id)
- self.auth_client.instances.restart(instance_id)
- self.assert_instance_action(instance_id, expected_states,
- expected_http_code)
+ client = self.auth_client
+ client.instances.restart(instance_id)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(instance_id, expected_states)
def run_instance_resize_volume(
self, resize_amount=1,
@@ -70,9 +84,10 @@ class InstanceActionsRunner(TestRunner):
old_volume_size = int(instance.volume['size'])
new_volume_size = old_volume_size + resize_amount
- self.auth_client.instances.resize_volume(instance_id, new_volume_size)
- self.assert_instance_action(instance_id, expected_states,
- expected_http_code)
+ client = self.auth_client
+ client.instances.resize_volume(instance_id, new_volume_size)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(instance_id, expected_states)
instance = self.get_instance(instance_id)
self.assert_equal(new_volume_size, instance.volume['size'],
@@ -86,9 +101,9 @@ class InstanceActionsRunner(TestRunner):
expected_http_code):
self.report.log("Testing resize to '%s' on instance: %s" %
(resize_flavor_id, instance_id))
- self.auth_client.instances.resize_instance(
- instance_id, resize_flavor_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ client.instances.resize_instance(instance_id, resize_flavor_id)
+ self.assert_client_code(client, expected_http_code)
def run_wait_for_instance_resize_flavor(
self, expected_states=['RESIZE', 'ACTIVE']):
diff --git a/trove/tests/scenario/runners/instance_create_runners.py b/trove/tests/scenario/runners/instance_create_runners.py
index 5eb6a837..4825a02d 100644
--- a/trove/tests/scenario/runners/instance_create_runners.py
+++ b/trove/tests/scenario/runners/instance_create_runners.py
@@ -28,7 +28,7 @@ class InstanceCreateRunner(TestRunner):
def __init__(self):
super(InstanceCreateRunner, self).__init__()
- self.init_inst_id = None
+ self.init_inst_info = None
self.init_inst_dbs = None
self.init_inst_users = None
self.init_inst_host = None
@@ -58,8 +58,8 @@ class InstanceCreateRunner(TestRunner):
instance_info.dbaas_datastore_version)
self.instance_info.dbaas_flavor_href = instance_info.dbaas_flavor_href
self.instance_info.volume = instance_info.volume
- self.instance_info.srv_grp_id = self.assert_server_group_exists(
- self.instance_info.id)
+ self.instance_info.helper_user = instance_info.helper_user
+ self.instance_info.helper_database = instance_info.helper_database
def run_initial_configuration_create(self, expected_http_code=200):
dynamic_config = self.test_helper.get_dynamic_group()
@@ -67,14 +67,14 @@ class InstanceCreateRunner(TestRunner):
values = dynamic_config or non_dynamic_config
if values:
json_def = json.dumps(values)
- result = self.auth_client.configurations.create(
+ client = self.auth_client
+ result = client.configurations.create(
'initial_configuration_for_instance_create',
json_def,
"Configuration group used by instance create tests.",
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
self.config_group_id = result.id
else:
@@ -108,7 +108,7 @@ class InstanceCreateRunner(TestRunner):
expected_states, expected_http_code,
create_helper_user=create_helper_user)
- self.init_inst_id = info.id
+ self.init_inst_info = info
else:
# There is no need to run this test as it's effectively the same as
# the empty instance test.
@@ -131,6 +131,8 @@ class InstanceCreateRunner(TestRunner):
users = [{'name': item['name'], 'password': item['password']}
for item in user_definitions]
+ instance_info = InstanceTestInfo()
+
# Here we add helper user/database if any.
if create_helper_user:
helper_db_def, helper_user_def, root_def = self.build_helper_defs()
@@ -139,14 +141,15 @@ class InstanceCreateRunner(TestRunner):
"Appending a helper database '%s' to the instance "
"definition." % helper_db_def['name'])
databases.append(helper_db_def)
+ instance_info.helper_database = helper_db_def
if helper_user_def:
self.report.log(
"Appending a helper user '%s:%s' to the instance "
"definition."
% (helper_user_def['name'], helper_user_def['password']))
users.append(helper_user_def)
+ instance_info.helper_user = helper_user_def
- instance_info = InstanceTestInfo()
instance_info.name = name
instance_info.databases = databases
instance_info.users = users
@@ -157,10 +160,7 @@ class InstanceCreateRunner(TestRunner):
instance_info.volume = {'size': trove_volume_size}
else:
instance_info.volume = None
-
- shared_network = CONFIG.get('shared_network', None)
- if shared_network:
- instance_info.nics = [{'net-id': shared_network}]
+ instance_info.nics = self.instance_info.nics
self.report.log("Testing create instance: %s"
% {'name': name,
@@ -182,7 +182,8 @@ class InstanceCreateRunner(TestRunner):
instance_info.name = instance.name
else:
self.report.log("Creating a new instance.")
- instance = self.auth_client.instances.create(
+ client = self.auth_client
+ instance = client.instances.create(
instance_info.name,
instance_info.dbaas_flavor_href,
instance_info.volume,
@@ -194,8 +195,8 @@ class InstanceCreateRunner(TestRunner):
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version,
locality=locality)
- self.assert_instance_action(
- instance.id, expected_states[0:1], expected_http_code)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(instance.id, expected_states[0:1])
instance_info.id = instance.id
@@ -227,23 +228,46 @@ class InstanceCreateRunner(TestRunner):
return instance_info
- def run_wait_for_created_instances(
+ def run_wait_for_instance(
self, expected_states=['BUILD', 'ACTIVE']):
instances = [self.instance_info.id]
- if self.init_inst_id:
- instances.append(self.init_inst_id)
self.assert_all_instance_states(instances, expected_states)
+ self.instance_info.srv_grp_id = self.assert_server_group_exists(
+ self.instance_info.id)
+ self.wait_for_test_helpers(self.instance_info)
+
+ def run_wait_for_init_instance(
+ self, expected_states=['BUILD', 'ACTIVE']):
+ if self.init_inst_info:
+ instances = [self.init_inst_info.id]
+ self.assert_all_instance_states(instances, expected_states)
+ self.wait_for_test_helpers(self.init_inst_info)
+
+ def wait_for_test_helpers(self, inst_info):
+ self.report.log("Waiting for helper users and databases to be "
+ "created on instance: %s" % inst_info.id)
+ client = self.auth_client
+ if inst_info.helper_user:
+ self.wait_for_user_create(client, inst_info.id,
+ [inst_info.helper_user])
+ if inst_info.helper_database:
+ self.wait_for_database_create(client, inst_info.id,
+ [inst_info.helper_database])
+ self.report.log("Test helpers are ready.")
def run_add_initialized_instance_data(self):
- self.init_inst_data = DataType.small
- self.init_inst_host = self.get_instance_host(self.init_inst_id)
- self.test_helper.add_data(self.init_inst_data, self.init_inst_host)
+ if self.init_inst_info:
+ self.init_inst_data = DataType.small
+ self.init_inst_host = self.get_instance_host(
+ self.init_inst_info.id)
+ self.test_helper.add_data(self.init_inst_data, self.init_inst_host)
def run_validate_initialized_instance(self):
- if self.init_inst_id:
+ if self.init_inst_info:
self.assert_instance_properties(
- self.init_inst_id, self.init_inst_dbs, self.init_inst_users,
- self.init_inst_config_group_id, self.init_inst_data)
+ self.init_inst_info.id, self.init_inst_dbs,
+ self.init_inst_users, self.init_inst_config_group_id,
+ self.init_inst_data)
def assert_instance_properties(
self, instance_id, expected_dbs_definitions,
@@ -278,52 +302,42 @@ class InstanceCreateRunner(TestRunner):
"No configuration group expected")
def assert_database_list(self, instance_id, expected_databases):
- expected_names = self._get_names(expected_databases)
- full_list = self.auth_client.databases.list(instance_id)
- self.assert_is_none(full_list.next,
- "Unexpected pagination in the database list.")
- listed_names = [database.name for database in full_list]
- self.assert_is_sublist(expected_names, listed_names,
- "Mismatch in instance databases.")
+ self.wait_for_database_create(self.auth_client,
+ instance_id, expected_databases)
def _get_names(self, definitions):
return [item['name'] for item in definitions]
def assert_user_list(self, instance_id, expected_users):
- expected_names = self._get_names(expected_users)
- full_list = self.auth_client.users.list(instance_id)
- self.assert_is_none(full_list.next,
- "Unexpected pagination in the user list.")
- listed_names = [user.name for user in full_list]
- self.assert_is_sublist(expected_names, listed_names,
- "Mismatch in instance users.")
-
+ self.wait_for_user_create(self.auth_client,
+ instance_id, expected_users)
# Verify that user definitions include only created databases.
all_databases = self._get_names(
self.test_helper.get_valid_database_definitions())
for user in expected_users:
- self.assert_is_sublist(
- self._get_names(user['databases']), all_databases,
- "Definition of user '%s' specifies databases not included in "
- "the list of initial databases." % user['name'])
+ if 'databases' in user:
+ self.assert_is_sublist(
+ self._get_names(user['databases']), all_databases,
+ "Definition of user '%s' specifies databases not included "
+ "in the list of initial databases." % user['name'])
def run_initialized_instance_delete(self, expected_http_code=202):
- if self.init_inst_id:
- self.auth_client.instances.delete(self.init_inst_id)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ if self.init_inst_info:
+ client = self.auth_client
+ client.instances.delete(self.init_inst_info.id)
+ self.assert_client_code(client, expected_http_code)
else:
raise SkipTest("Cleanup is not required.")
def run_wait_for_init_delete(self, expected_states=['SHUTDOWN']):
delete_ids = []
- if self.init_inst_id:
- delete_ids.append(self.init_inst_id)
+ if self.init_inst_info:
+ delete_ids.append(self.init_inst_info.id)
if delete_ids:
self.assert_all_gone(delete_ids, expected_states[-1])
else:
raise SkipTest("Cleanup is not required.")
- self.init_inst_id = None
+ self.init_inst_info = None
self.init_inst_dbs = None
self.init_inst_users = None
self.init_inst_host = None
@@ -332,9 +346,9 @@ class InstanceCreateRunner(TestRunner):
def run_initial_configuration_delete(self, expected_http_code=202):
if self.config_group_id:
- self.auth_client.configurations.delete(self.config_group_id)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ client = self.auth_client
+ client.configurations.delete(self.config_group_id)
+ self.assert_client_code(client, expected_http_code)
else:
raise SkipTest("Cleanup is not required.")
self.config_group_id = None
diff --git a/trove/tests/scenario/runners/instance_delete_runners.py b/trove/tests/scenario/runners/instance_delete_runners.py
index f5d1bb3e..ae98f5bb 100644
--- a/trove/tests/scenario/runners/instance_delete_runners.py
+++ b/trove/tests/scenario/runners/instance_delete_runners.py
@@ -35,8 +35,9 @@ class InstanceDeleteRunner(TestRunner):
def assert_instance_delete(self, instance_id, expected_http_code):
self.report.log("Testing delete on instance: %s" % instance_id)
- self.auth_client.instances.delete(instance_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ client.instances.delete(instance_id)
+ self.assert_client_code(client, expected_http_code)
def run_instance_delete_wait(self, expected_states=['SHUTDOWN']):
if self.has_do_not_delete_instance:
diff --git a/trove/tests/scenario/runners/instance_error_create_runners.py b/trove/tests/scenario/runners/instance_error_create_runners.py
index c0f3c4df..c83d1728 100644
--- a/trove/tests/scenario/runners/instance_error_create_runners.py
+++ b/trove/tests/scenario/runners/instance_error_create_runners.py
@@ -33,14 +33,15 @@ class InstanceErrorCreateRunner(TestRunner):
name = self.instance_info.name + '_error'
flavor = self.get_instance_flavor(fault_num=1)
- inst = self.auth_client.instances.create(
+ client = self.auth_client
+ inst = client.instances.create(
name,
self.get_flavor_href(flavor),
self.instance_info.volume,
nics=self.instance_info.nics,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
self.error_inst_id = inst.id
def run_create_error2_instance(self, expected_http_code=200):
@@ -50,14 +51,15 @@ class InstanceErrorCreateRunner(TestRunner):
name = self.instance_info.name + '_error2'
flavor = self.get_instance_flavor(fault_num=2)
- inst = self.auth_client.instances.create(
+ client = self.auth_client
+ inst = client.instances.create(
name,
self.get_flavor_href(flavor),
self.instance_info.volume,
nics=self.instance_info.nics,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
self.error2_inst_id = inst.id
def run_wait_for_error_instances(self, expected_states=['ERROR']):
@@ -100,14 +102,13 @@ class InstanceErrorCreateRunner(TestRunner):
(instance.fault['message'], err_msg))
def run_delete_error_instances(self, expected_http_code=202):
+ client = self.auth_client
if self.error_inst_id:
- self.auth_client.instances.delete(self.error_inst_id)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ client.instances.delete(self.error_inst_id)
+ self.assert_client_code(client, expected_http_code)
if self.error2_inst_id:
- self.auth_client.instances.delete(self.error2_inst_id)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ client.instances.delete(self.error2_inst_id)
+ self.assert_client_code(client, expected_http_code)
def run_wait_for_error_delete(self, expected_states=['SHUTDOWN']):
delete_ids = []
diff --git a/trove/tests/scenario/runners/instance_force_delete_runners.py b/trove/tests/scenario/runners/instance_force_delete_runners.py
index 03045a29..70ebc87c 100644
--- a/trove/tests/scenario/runners/instance_force_delete_runners.py
+++ b/trove/tests/scenario/runners/instance_force_delete_runners.py
@@ -33,21 +33,23 @@ class InstanceForceDeleteRunner(TestRunner):
name = self.instance_info.name + '_build'
flavor = self.get_instance_flavor()
- inst = self.auth_client.instances.create(
+ client = self.auth_client
+ inst = client.instances.create(
name,
self.get_flavor_href(flavor),
self.instance_info.volume,
nics=self.instance_info.nics,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
- self.assert_instance_action([inst.id], expected_states,
- expected_http_code)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action([inst.id], expected_states)
self.build_inst_id = inst.id
def run_delete_build_instance(self, expected_http_code=202):
if self.build_inst_id:
- self.auth_client.instances.force_delete(self.build_inst_id)
- self.assert_client_code(expected_http_code)
+ client = self.admin_client
+ client.instances.force_delete(self.build_inst_id)
+ self.assert_client_code(client, expected_http_code)
def run_wait_for_force_delete(self):
if self.build_inst_id:
diff --git a/trove/tests/scenario/runners/instance_upgrade_runners.py b/trove/tests/scenario/runners/instance_upgrade_runners.py
index 587024de..b9b61815 100644
--- a/trove/tests/scenario/runners/instance_upgrade_runners.py
+++ b/trove/tests/scenario/runners/instance_upgrade_runners.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
@@ -21,6 +22,18 @@ class InstanceUpgradeRunner(TestRunner):
def __init__(self):
super(InstanceUpgradeRunner, self).__init__()
+ def run_add_test_data(self):
+ host = self.get_instance_host(self.instance_info.id)
+ self.test_helper.add_data(DataType.small, host)
+
+ def run_verify_test_data(self):
+ host = self.get_instance_host(self.instance_info.id)
+ self.test_helper.verify_data(DataType.small, host)
+
+ def run_remove_test_data(self):
+ host = self.get_instance_host(self.instance_info.id)
+ self.test_helper.remove_data(DataType.small, host)
+
def run_instance_upgrade(
self, expected_states=['UPGRADE', 'ACTIVE'],
expected_http_code=202):
@@ -28,6 +41,7 @@ class InstanceUpgradeRunner(TestRunner):
self.report.log("Testing upgrade on instance: %s" % instance_id)
target_version = self.instance_info.dbaas_datastore_version
- self.auth_client.instances.upgrade(instance_id, target_version)
- self.assert_instance_action(instance_id, expected_states,
- expected_http_code)
+ client = self.auth_client
+ client.instances.upgrade(instance_id, target_version)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(instance_id, expected_states)
diff --git a/trove/tests/scenario/runners/module_runners.py b/trove/tests/scenario/runners/module_runners.py
index a14289e2..669a48a1 100644
--- a/trove/tests/scenario/runners/module_runners.py
+++ b/trove/tests/scenario/runners/module_runners.py
@@ -142,53 +142,59 @@ class ModuleRunner(TestRunner):
def run_module_create_bad_type(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, 'invalid-type', self.MODULE_NEG_CONTENTS)
def run_module_create_non_admin_auto(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
auto_apply=True)
def run_module_create_non_admin_all_tenant(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
all_tenants=True)
def run_module_create_non_admin_hidden(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
visible=False)
def run_module_create_bad_datastore(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
datastore='bad-datastore')
def run_module_create_bad_datastore_version(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
datastore=self.instance_info.dbaas_datastore,
datastore_version='bad-datastore-version')
@@ -196,9 +202,10 @@ class ModuleRunner(TestRunner):
def run_module_create_missing_datastore(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
datastore_version=self.instance_info.dbaas_datastore_version)
@@ -242,8 +249,9 @@ class ModuleRunner(TestRunner):
datastore=datastore, datastore_version=datastore_version,
auto_apply=auto_apply,
live_update=live_update, visible=visible)
- if (client == self.auth_client or
- (client == self.admin_client and visible)):
+ username = client.real_client.client.username
+ if (('alt' in username and 'admin' not in username) or
+ ('admin' in username and visible)):
self.module_create_count += 1
if datastore:
self.module_ds_create_count += 1
@@ -358,17 +366,19 @@ class ModuleRunner(TestRunner):
def run_module_create_dupe(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.create,
+ client, client.modules.create,
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS)
def run_module_update_missing_datastore(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update,
+ client, client.modules.update,
self.update_test_module.id,
datastore_version=self.instance_info.dbaas_datastore_version)
@@ -414,12 +424,10 @@ class ModuleRunner(TestRunner):
def run_module_show_unauth_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.unauth_client
self.assert_raises(
- expected_exception, None,
- self.unauth_client.modules.get, self.main_test_module.id)
- # we're using a different client, so we'll check the return code
- # on it explicitly, instead of depending on 'assert_raises'
- self.assert_client_code(expected_http_code, client=self.unauth_client)
+ expected_exception, expected_http_code,
+ client, client.modules.get, self.main_test_module.id)
def run_module_list(self):
self.assert_module_list(
@@ -551,9 +559,10 @@ class ModuleRunner(TestRunner):
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
module = self._find_invisible_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.get, module.id)
+ client, client.modules.get, module.id)
def run_module_list_admin(self):
self.assert_module_list(
@@ -632,82 +641,92 @@ class ModuleRunner(TestRunner):
def run_module_update_unauth(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.unauth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.unauth_client.modules.update,
+ client, client.modules.update,
self.main_test_module.id, description='Upd')
def run_module_update_non_admin_auto(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update,
+ client, client.modules.update,
self.main_test_module.id, visible=False)
def run_module_update_non_admin_auto_off(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
module = self._find_auto_apply_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update, module.id, auto_apply=False)
+ client, client.modules.update, module.id, auto_apply=False)
def run_module_update_non_admin_auto_any(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
module = self._find_auto_apply_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update, module.id, description='Upd')
+ client, client.modules.update, module.id, description='Upd')
def run_module_update_non_admin_all_tenant(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update,
+ client, client.modules.update,
self.main_test_module.id, all_tenants=True)
def run_module_update_non_admin_all_tenant_off(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
module = self._find_all_tenant_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update, module.id, all_tenants=False)
+ client, client.modules.update, module.id, all_tenants=False)
def run_module_update_non_admin_all_tenant_any(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
module = self._find_all_tenant_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update, module.id, description='Upd')
+ client, client.modules.update, module.id, description='Upd')
def run_module_update_non_admin_invisible(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update,
+ client, client.modules.update,
self.main_test_module.id, visible=False)
def run_module_update_non_admin_invisible_off(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
module = self._find_invisible_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update, module.id, visible=True)
+ client, client.modules.update, module.id, visible=True)
def run_module_update_non_admin_invisible_any(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
module = self._find_invisible_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.update, module.id, description='Upd')
+ client, client.modules.update, module.id, description='Upd')
# ModuleInstanceGroup methods
def run_module_list_instance_empty(self):
@@ -718,7 +737,7 @@ class ModuleRunner(TestRunner):
def assert_module_list_instance(self, client, instance_id, expected_count,
expected_http_code=200):
module_list = client.instances.modules(instance_id)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
count = len(module_list)
self.assert_equal(expected_count, count,
"Wrong number of modules from list instance")
@@ -733,7 +752,7 @@ class ModuleRunner(TestRunner):
def assert_module_instances(self, client, module_id, expected_count,
expected_http_code=200):
instance_list = client.modules.instances(module_id)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
count = len(instance_list)
self.assert_equal(expected_count, count,
"Wrong number of instances applied from module")
@@ -751,7 +770,7 @@ class ModuleRunner(TestRunner):
def assert_module_query(self, client, instance_id, expected_count,
expected_http_code=200, expected_results=None):
modquery_list = client.instances.module_query(instance_id)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
count = len(modquery_list)
self.assert_equal(expected_count, count,
"Wrong number of modules from query")
@@ -774,7 +793,7 @@ class ModuleRunner(TestRunner):
expected_http_code=200):
module_apply_list = client.instances.module_apply(
instance_id, [module.id])
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
admin_only = (not module.visible or module.auto_apply or
not module.tenant_id)
expected_status = expected_status or 'OK'
@@ -919,7 +938,8 @@ class ModuleRunner(TestRunner):
def assert_inst_mod_create(self, module_id, name_suffix,
expected_http_code):
- inst = self.auth_client.instances.create(
+ client = self.auth_client
+ inst = client.instances.create(
self.instance_info.name + name_suffix,
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
@@ -928,15 +948,16 @@ class ModuleRunner(TestRunner):
nics=self.instance_info.nics,
modules=[module_id],
)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
return inst.id
def run_module_delete_applied(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.delete, self.main_test_module.id)
+ client, client.modules.delete, self.main_test_module.id)
def run_module_remove(self):
self.assert_module_remove(self.auth_client, self.instance_info.id,
@@ -945,10 +966,10 @@ class ModuleRunner(TestRunner):
def assert_module_remove(self, client, instance_id, module_id,
expected_http_code=200):
client.instances.module_remove(instance_id, module_id)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
def run_wait_for_inst_with_mods(self, expected_states=['BUILD', 'ACTIVE']):
- self.assert_instance_action(self.mod_inst_id, expected_states, None)
+ self.assert_instance_action(self.mod_inst_id, expected_states)
def run_module_query_after_inst_create(self):
auto_modules = self._find_all_auto_apply_modules(visible=True)
@@ -975,7 +996,7 @@ class ModuleRunner(TestRunner):
prefix = 'contents'
modretrieve_list = client.instances.module_retrieve(
instance_id, directory=temp_dir, prefix=prefix)
- self.assert_client_code(expected_http_code, client=client)
+ self.assert_client_code(client, expected_http_code)
count = len(modretrieve_list)
self.assert_equal(expected_count, count,
"Wrong number of modules from retrieve")
@@ -1027,16 +1048,18 @@ class ModuleRunner(TestRunner):
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
module = self._find_auto_apply_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.delete, module.id)
+ client, client.modules.delete, module.id)
def run_delete_inst_with_mods(self, expected_http_code=202):
self.assert_delete_instance(self.mod_inst_id, expected_http_code)
def assert_delete_instance(self, instance_id, expected_http_code):
- self.auth_client.instances.delete(instance_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ client.instances.delete(instance_id)
+ self.assert_client_code(client, expected_http_code)
def run_wait_for_delete_inst_with_mods(
self, expected_last_state=['SHUTDOWN']):
@@ -1046,40 +1069,45 @@ class ModuleRunner(TestRunner):
def run_module_delete_non_existent(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.delete, 'bad_id')
+ client, client.modules.delete, 'bad_id')
def run_module_delete_unauth_user(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
+ client = self.unauth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.unauth_client.modules.delete, self.main_test_module.id)
+ client, client.modules.delete, self.main_test_module.id)
def run_module_delete_hidden_by_non_admin(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
module = self._find_invisible_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.delete, module.id)
+ client, client.modules.delete, module.id)
def run_module_delete_all_tenant_by_non_admin(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
module = self._find_all_tenant_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.delete, module.id)
+ client, client.modules.delete, module.id)
def run_module_delete_auto_by_non_admin(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
module = self._find_auto_apply_module()
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.modules.delete, module.id)
+ client, client.modules.delete, module.id)
def run_module_delete(self):
expected_count = len(self.auth_client.modules.list()) - 1
diff --git a/trove/tests/scenario/runners/negative_cluster_actions_runners.py b/trove/tests/scenario/runners/negative_cluster_actions_runners.py
index c37d7673..53f9044e 100644
--- a/trove/tests/scenario/runners/negative_cluster_actions_runners.py
+++ b/trove/tests/scenario/runners/negative_cluster_actions_runners.py
@@ -60,8 +60,9 @@ class NegativeClusterActionsRunner(TestRunner):
def _assert_cluster_create_raises(self, cluster_name, instances_def,
expected_http_code):
+ client = self.auth_client
self.assert_raises(exceptions.BadRequest, expected_http_code,
- self.auth_client.clusters.create,
+ client, client.clusters.create,
cluster_name,
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
diff --git a/trove/tests/scenario/runners/replication_runners.py b/trove/tests/scenario/runners/replication_runners.py
index 2d402321..9f9bc76c 100644
--- a/trove/tests/scenario/runners/replication_runners.py
+++ b/trove/tests/scenario/runners/replication_runners.py
@@ -62,14 +62,16 @@ class ReplicationRunner(TestRunner):
self.test_helper.verify_data(data_type, host)
def run_create_non_affinity_master(self, expected_http_code=200):
- self.non_affinity_master_id = self.auth_client.instances.create(
+ client = self.auth_client
+ self.non_affinity_master_id = client.instances.create(
self.instance_info.name + '_non-affinity',
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version,
+ nics=self.instance_info.nics,
locality='anti-affinity').id
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
def run_create_single_replica(self, expected_http_code=200):
self.master_backup_count = len(
@@ -79,7 +81,8 @@ class ReplicationRunner(TestRunner):
def assert_replica_create(
self, master_id, replica_name, replica_count, expected_http_code):
- replica = self.auth_client.instances.create(
+ client = self.auth_client
+ replica = client.instances.create(
self.instance_info.name + '_' + replica_name,
self.instance_info.dbaas_flavor_href,
self.instance_info.volume, replica_of=master_id,
@@ -87,7 +90,7 @@ class ReplicationRunner(TestRunner):
datastore_version=self.instance_info.dbaas_datastore_version,
nics=self.instance_info.nics,
replica_count=replica_count)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
return replica.id
def run_wait_for_single_replica(self, expected_states=['BUILD', 'ACTIVE']):
@@ -98,8 +101,9 @@ class ReplicationRunner(TestRunner):
self.replica_1_host = self.get_instance_host(self.replica_1_id)
def _assert_is_master(self, instance_id, replica_ids):
- instance = self.get_instance(instance_id, client=self.admin_client)
- self.assert_client_code(200, client=self.admin_client)
+ client = self.admin_client
+ instance = self.get_instance(instance_id, client=client)
+ self.assert_client_code(client, 200)
CheckInstance(instance._info).slaves()
self.assert_true(
set(replica_ids).issubset(self._get_replica_set(instance_id)))
@@ -110,8 +114,9 @@ class ReplicationRunner(TestRunner):
return set([replica['id'] for replica in instance._info['replicas']])
def _assert_is_replica(self, instance_id, master_id):
- instance = self.get_instance(instance_id, client=self.admin_client)
- self.assert_client_code(200, client=self.admin_client)
+ client = self.admin_client
+ instance = self.get_instance(instance_id, client=client)
+ self.assert_client_code(client, 200)
CheckInstance(instance._info).replica_of()
self.assert_equal(master_id, instance._info['replica_of']['id'],
'Unexpected replication master ID')
@@ -137,15 +142,17 @@ class ReplicationRunner(TestRunner):
self.non_affinity_master_id)
def run_create_non_affinity_replica(self, expected_http_code=200):
- self.non_affinity_repl_id = self.auth_client.instances.create(
+ client = self.auth_client
+ self.non_affinity_repl_id = client.instances.create(
self.instance_info.name + '_non-affinity-repl',
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version,
+ nics=self.instance_info.nics,
replica_of=self.non_affinity_master_id,
replica_count=1).id
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
def run_create_multiple_replicas(self, expected_http_code=200):
self.replica_2_id = self.assert_replica_create(
@@ -174,10 +181,10 @@ class ReplicationRunner(TestRunner):
def assert_delete_instances(self, instance_ids, expected_http_code):
instance_ids = (instance_ids if utils.is_collection(instance_ids)
else [instance_ids])
+ client = self.auth_client
for instance_id in instance_ids:
- self.auth_client.instances.delete(instance_id)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ client.instances.delete(instance_id)
+ self.assert_client_code(client, expected_http_code)
def run_wait_for_delete_non_affinity_repl(
self, expected_last_status=['SHUTDOWN']):
@@ -206,8 +213,7 @@ class ReplicationRunner(TestRunner):
def assert_verify_replica_data(self, master_id, data_type):
replica_ids = self._get_replica_set(master_id)
for replica_id in replica_ids:
- replica_instance = self.get_instance(replica_id)
- host = str(replica_instance._info['ip'][0])
+ host = self.get_instance_host(replica_id)
self.report.log("Checking data on host %s" % host)
self.assert_verify_replication_data(data_type, host)
@@ -219,32 +225,36 @@ class ReplicationRunner(TestRunner):
def run_promote_master(self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.instances.promote_to_replica_source,
+ client, client.instances.promote_to_replica_source,
self.instance_info.id)
def run_eject_replica(self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.instances.eject_replica_source,
+ client, client.instances.eject_replica_source,
self.replica_1_id)
def run_eject_valid_master(self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ # client = self.auth_client
# self.assert_raises(
# expected_exception, expected_http_code,
- # self.auth_client.instances.eject_replica_source,
+ # client, client.instances.eject_replica_source,
# self.instance_info.id)
# Uncomment once BUG_EJECT_VALID_MASTER is fixed
raise SkipKnownBug(runners.BUG_EJECT_VALID_MASTER)
def run_delete_valid_master(self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.instances.delete,
+ client, client.instances.delete,
self.instance_info.id)
def run_promote_to_replica_source(self,
@@ -271,9 +281,10 @@ class ReplicationRunner(TestRunner):
def assert_replica_promote(
self, new_master_id, expected_states, expected_http_code):
- self.auth_client.instances.promote_to_replica_source(new_master_id)
- self.assert_instance_action(new_master_id, expected_states,
- expected_http_code)
+ client = self.auth_client
+ client.instances.promote_to_replica_source(new_master_id)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(new_master_id, expected_states)
def run_verify_replica_data_new_master(self):
self.assert_verify_replication_data(
@@ -339,14 +350,15 @@ class ReplicationRunner(TestRunner):
def assert_detach_replica(
self, replica_id, expected_states, expected_http_code):
- self.auth_client.instances.edit(replica_id,
- detach_replica_source=True)
- self.assert_instance_action(
- replica_id, expected_states, expected_http_code)
+ client = self.auth_client
+ client.instances.edit(replica_id, detach_replica_source=True)
+ self.assert_client_code(client, expected_http_code)
+ self.assert_instance_action(replica_id, expected_states)
def _assert_is_not_replica(self, instance_id):
- instance = self.get_instance(instance_id, client=self.admin_client)
- self.assert_client_code(200, client=self.admin_client)
+ client = self.admin_client
+ instance = self.get_instance(instance_id, client=client)
+ self.assert_client_code(client, 200)
if 'replica_of' not in instance._info:
try:
diff --git a/trove/tests/scenario/runners/root_actions_runners.py b/trove/tests/scenario/runners/root_actions_runners.py
index 4da09a08..c95793e9 100644
--- a/trove/tests/scenario/runners/root_actions_runners.py
+++ b/trove/tests/scenario/runners/root_actions_runners.py
@@ -41,8 +41,9 @@ class RootActionsRunner(TestRunner):
def _assert_root_state(self, instance_id, expected_state,
expected_http_code, message):
# The call returns a nameless user object with 'rootEnabled' attribute.
- response = self.auth_client.root.is_root_enabled(instance_id)
- self.assert_instance_action(instance_id, None, expected_http_code)
+ client = self.auth_client
+ response = client.root.is_root_enabled(instance_id)
+ self.assert_client_code(client, expected_http_code)
actual_state = getattr(response, 'rootEnabled', None)
self.assert_equal(expected_state, actual_state, message)
@@ -54,8 +55,9 @@ class RootActionsRunner(TestRunner):
def assert_root_disable_failure(self, instance_id, expected_exception,
expected_http_code):
+ client = self.auth_client
self.assert_raises(expected_exception, expected_http_code,
- self.auth_client.root.delete, instance_id)
+ client, client.root.delete, instance_id)
def run_enable_root_no_password(self, expected_http_code=200):
root_credentials = self.test_helper.get_helper_credentials_root()
@@ -66,17 +68,18 @@ class RootActionsRunner(TestRunner):
def assert_root_create(self, instance_id, root_password,
expected_root_name, expected_http_code):
+ client = self.auth_client
if root_password is not None:
- root_creds = self.auth_client.root.create_instance_root(
+ root_creds = client.root.create_instance_root(
instance_id, root_password)
self.assert_equal(root_password, root_creds[1])
else:
- root_creds = self.auth_client.root.create(instance_id)
+ root_creds = client.root.create(instance_id)
+ self.assert_client_code(client, expected_http_code)
if expected_root_name is not None:
self.assert_equal(expected_root_name, root_creds[0])
- self.assert_instance_action(instance_id, None, expected_http_code)
self.assert_can_connect(instance_id, root_creds)
return root_creds
@@ -122,8 +125,9 @@ class RootActionsRunner(TestRunner):
self.assert_root_disable(self.instance_info.id, expected_http_code)
def assert_root_disable(self, instance_id, expected_http_code):
- self.auth_client.root.delete(instance_id)
- self.assert_instance_action(instance_id, None, expected_http_code)
+ client = self.auth_client
+ client.root.delete(instance_id)
+ self.assert_client_code(client, expected_http_code)
self.assert_cannot_connect(self.instance_info.id,
self.current_root_creds)
@@ -142,8 +146,9 @@ class RootActionsRunner(TestRunner):
def assert_root_delete_failure(self, instance_id, expected_exception,
expected_http_code):
root_user_name = self.current_root_creds[0]
+ client = self.auth_client
self.assert_raises(expected_exception, expected_http_code,
- self.auth_client.users.delete,
+ client, client.users.delete,
instance_id, root_user_name)
def run_check_root_enabled_after_restore(
@@ -223,3 +228,6 @@ class CouchbaseRootActionsRunner(RootActionsRunner):
def run_enable_root_with_password(self):
raise SkipTest("Operation is currently not supported.")
+
+ def run_delete_root(self):
+ raise SkipKnownBug(runners.BUG_WRONG_API_VALIDATION)
diff --git a/trove/tests/scenario/runners/test_runners.py b/trove/tests/scenario/runners/test_runners.py
index ebf1ecb4..018ed418 100644
--- a/trove/tests/scenario/runners/test_runners.py
+++ b/trove/tests/scenario/runners/test_runners.py
@@ -14,6 +14,8 @@
# under the License.
import datetime
+import inspect
+import netaddr
import os
import proboscis
import time as timer
@@ -173,6 +175,8 @@ class InstanceTestInfo(object):
self.user = None # The user instance who owns the instance.
self.users = None # The users created on the instance.
self.databases = None # The databases created on the instance.
+ self.helper_user = None # Test helper user if exists.
+ self.helper_database = None # Test helper database if exists.
class TestRunner(object):
@@ -229,6 +233,10 @@ class TestRunner(object):
else:
self.instance_info.volume_size = None
self.instance_info.volume = None
+ self.instance_info.nics = None
+ shared_network = CONFIG.get('shared_network', None)
+ if shared_network:
+ self.instance_info.nics = [{'net-id': shared_network}]
self._auth_client = None
self._unauth_client = None
@@ -314,9 +322,7 @@ class TestRunner(object):
@property
def auth_client(self):
- if not self._auth_client:
- self._auth_client = self._create_authorized_client()
- return self._auth_client
+ return self._create_authorized_client()
def _create_authorized_client(self):
"""Create a client from the normal 'authorized' user."""
@@ -324,9 +330,7 @@ class TestRunner(object):
@property
def unauth_client(self):
- if not self._unauth_client:
- self._unauth_client = self._create_unauthorized_client()
- return self._unauth_client
+ return self._create_unauthorized_client()
def _create_unauthorized_client(self):
"""Create a client from a different 'unauthorized' user
@@ -339,9 +343,7 @@ class TestRunner(object):
@property
def admin_client(self):
- if not self._admin_client:
- self._admin_client = self._create_admin_client()
- return self._admin_client
+ return self._create_admin_client()
def _create_admin_client(self):
"""Create a client from an admin user."""
@@ -351,9 +353,7 @@ class TestRunner(object):
@property
def swift_client(self):
- if not self._swift_client:
- self._swift_client = self._create_swift_client()
- return self._swift_client
+ return self._create_swift_client()
def _create_swift_client(self):
"""Create a swift client from the admin user details."""
@@ -370,9 +370,7 @@ class TestRunner(object):
@property
def nova_client(self):
- if not self._nova_client:
- self._nova_client = create_nova_client(self.instance_info.user)
- return self._nova_client
+ return create_nova_client(self.instance_info.user)
def get_client_tenant(self, client):
tenant_name = client.real_client.client.tenant
@@ -382,11 +380,26 @@ class TestRunner(object):
return tenant_name, tenant_id
def assert_raises(self, expected_exception, expected_http_code,
- client_cmd, *cmd_args, **cmd_kwargs):
+ client, client_cmd, *cmd_args, **cmd_kwargs):
+ if client:
+ # Make sure that the client_cmd comes from the same client that
+ # was passed in, otherwise asserting the client code may fail.
+ cmd_clz = client_cmd.im_self
+ cmd_clz_name = cmd_clz.__class__.__name__
+ client_attrs = [attr[0] for attr in inspect.getmembers(
+ client.real_client)
+ if '__' not in attr[0]]
+ match = [getattr(client, a) for a in client_attrs
+ if getattr(client, a).__class__.__name__ == cmd_clz_name]
+ self.assert_true(any(match),
+ "Could not find method class in client: %s" %
+ client_attrs)
+ self.assert_equal(
+ match[0], cmd_clz,
+ "Test error: client_cmd must be from client obj")
asserts.assert_raises(expected_exception, client_cmd,
*cmd_args, **cmd_kwargs)
-
- self.assert_client_code(expected_http_code)
+ self.assert_client_code(client, expected_http_code)
def get_datastore_config_property(self, name, datastore=None):
"""Get a Trove configuration property for a given datastore.
@@ -422,17 +435,14 @@ class TestRunner(object):
def has_do_not_delete_instance(self):
return self.has_env_flag(self.DO_NOT_DELETE_INSTANCE_FLAG)
- def assert_instance_action(
- self, instance_ids, expected_states, expected_http_code=None):
- self.assert_client_code(expected_http_code)
+ def assert_instance_action(self, instance_ids, expected_states):
if expected_states:
self.assert_all_instance_states(
instance_ids if utils.is_collection(instance_ids)
else [instance_ids], expected_states)
- def assert_client_code(self, expected_http_code, client=None):
- if expected_http_code is not None:
- client = client or self.auth_client
+ def assert_client_code(self, client, expected_http_code):
+ if client and expected_http_code is not None:
self.assert_equal(expected_http_code, client.last_http_code,
"Unexpected client status code")
@@ -483,16 +493,18 @@ class TestRunner(object):
fast_fail_status = ['ERROR', 'FAILED']
found = False
for status in expected_states:
- if require_all_states or found or self._has_status(
- instance_id, status, fast_fail_status=fast_fail_status):
+ found_current = self._has_status(
+ instance_id, status, fast_fail_status=fast_fail_status)
+ if require_all_states or found or found_current:
found = True
start_time = timer.time()
try:
- poll_until(lambda: self._has_status(
- instance_id, status,
- fast_fail_status=fast_fail_status),
- sleep_time=self.def_sleep_time,
- time_out=self.def_timeout)
+ if not found_current:
+ poll_until(lambda: self._has_status(
+ instance_id, status,
+ fast_fail_status=fast_fail_status),
+ sleep_time=self.def_sleep_time,
+ time_out=self.def_timeout)
self.report.log("Instance '%s' has gone '%s' in %s." %
(instance_id, status,
self._time_since(start_time)))
@@ -628,10 +640,16 @@ class TestRunner(object):
client = client or self.auth_client
return client.instances.get(instance_id)
+ def extract_ipv4s(self, ips):
+ ipv4s = [str(ip) for ip in ips if netaddr.valid_ipv4(ip)]
+ if not ipv4s:
+ self.fail("No IPV4 ip found")
+ return ipv4s
+
def get_instance_host(self, instance_id=None):
instance_id = instance_id or self.instance_info.id
instance = self.get_instance(instance_id)
- host = str(instance._info['ip'][0])
+ host = self.extract_ipv4s(instance._info['ip'])[0]
self.report.log("Found host %s for instance %s." % (host, instance_id))
return host
@@ -678,19 +696,20 @@ class TestRunner(object):
not be changed by individual test-cases.
"""
database_def, user_def, root_def = self.build_helper_defs()
+ client = self.auth_client
if database_def:
self.report.log(
"Creating a helper database '%s' on instance: %s"
% (database_def['name'], instance_id))
- self.auth_client.databases.create(instance_id, [database_def])
- self.wait_for_database_create(instance_id, [database_def])
+ client.databases.create(instance_id, [database_def])
+ self.wait_for_database_create(client, instance_id, [database_def])
if user_def:
self.report.log(
"Creating a helper user '%s:%s' on instance: %s"
% (user_def['name'], user_def['password'], instance_id))
- self.auth_client.users.create(instance_id, [user_def])
- self.wait_for_user_create(instance_id, [user_def])
+ client.users.create(instance_id, [user_def])
+ self.wait_for_user_create(client, instance_id, [user_def])
if root_def:
# Not enabling root on a single instance of the cluster here
@@ -723,14 +742,14 @@ class TestRunner(object):
_get_credentials(credentials),
_get_credentials(credentials_root))
- def wait_for_user_create(self, instance_id, expected_user_defs):
+ def wait_for_user_create(self, client, instance_id, expected_user_defs):
expected_user_names = {user_def['name']
for user_def in expected_user_defs}
self.report.log("Waiting for all created users to appear in the "
"listing: %s" % expected_user_names)
def _all_exist():
- all_users = self.get_user_names(instance_id)
+ all_users = self.get_user_names(client, instance_id)
return all(usr in all_users for usr in expected_user_names)
try:
@@ -740,18 +759,19 @@ class TestRunner(object):
self.fail("Some users were not created within the poll "
"timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC)
- def get_user_names(self, instance_id):
- full_list = self.auth_client.users.list(instance_id)
+ def get_user_names(self, client, instance_id):
+ full_list = client.users.list(instance_id)
return {user.name: user for user in full_list}
- def wait_for_database_create(self, instance_id, expected_database_defs):
+ def wait_for_database_create(self, client,
+ instance_id, expected_database_defs):
expected_db_names = {db_def['name']
for db_def in expected_database_defs}
self.report.log("Waiting for all created databases to appear in the "
"listing: %s" % expected_db_names)
def _all_exist():
- all_dbs = self.get_db_names(instance_id)
+ all_dbs = self.get_db_names(client, instance_id)
return all(db in all_dbs for db in expected_db_names)
try:
@@ -761,8 +781,8 @@ class TestRunner(object):
self.fail("Some databases were not created within the poll "
"timeout: %ds" % self.GUEST_CAST_WAIT_TIMEOUT_SEC)
- def get_db_names(self, instance_id):
- full_list = self.auth_client.databases.list(instance_id)
+ def get_db_names(self, client, instance_id):
+ full_list = client.databases.list(instance_id)
return {database.name: database for database in full_list}
diff --git a/trove/tests/scenario/runners/user_actions_runners.py b/trove/tests/scenario/runners/user_actions_runners.py
index d822b1bb..048a5be7 100644
--- a/trove/tests/scenario/runners/user_actions_runners.py
+++ b/trove/tests/scenario/runners/user_actions_runners.py
@@ -43,7 +43,7 @@ class UserActionsRunner(TestRunner):
if self.user_defs:
# Try to use the first user with databases if any.
for user_def in self.user_defs:
- if user_def['databases']:
+ if 'databases' in user_def and user_def['databases']:
return user_def
return self.user_defs[0]
raise SkipTest("No valid user definitions provided.")
@@ -65,9 +65,10 @@ class UserActionsRunner(TestRunner):
def assert_users_create(self, instance_id, serial_users_def,
expected_http_code):
- self.auth_client.users.create(instance_id, serial_users_def)
- self.assert_client_code(expected_http_code, client=self.auth_client)
- self.wait_for_user_create(instance_id, serial_users_def)
+ client = self.auth_client
+ client.users.create(instance_id, serial_users_def)
+ self.assert_client_code(client, expected_http_code)
+ self.wait_for_user_create(client, instance_id, serial_users_def)
return serial_users_def
def run_user_show(self, expected_http_code=200):
@@ -80,9 +81,10 @@ class UserActionsRunner(TestRunner):
user_name = expected_user_def['name']
user_host = expected_user_def.get('host')
- queried_user = self.auth_client.users.get(
+ client = self.auth_client
+ queried_user = client.users.get(
instance_id, user_name, user_host)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
self._assert_user_matches(queried_user, expected_user_def)
def _assert_user_matches(self, user, expected_user_def):
@@ -99,8 +101,9 @@ class UserActionsRunner(TestRunner):
def assert_users_list(self, instance_id, expected_user_defs,
expected_http_code, limit=2):
- full_list = self.auth_client.users.list(instance_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ full_list = client.users.list(instance_id)
+ self.assert_client_code(client, expected_http_code)
listed_users = {user.name: user for user in full_list}
self.assert_is_none(full_list.next,
"Unexpected pagination in the list.")
@@ -120,8 +123,8 @@ class UserActionsRunner(TestRunner):
"System users should not be included in the 'user-list' output.")
# Test list pagination.
- list_page = self.auth_client.users.list(instance_id, limit=limit)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ list_page = client.users.list(instance_id, limit=limit)
+ self.assert_client_code(client, expected_http_code)
self.assert_true(len(list_page) <= limit)
if len(full_list) > limit:
@@ -137,9 +140,8 @@ class UserActionsRunner(TestRunner):
self.assert_equal(expected_marker, marker,
"Pagination marker should be the last element "
"in the page.")
- list_page = self.auth_client.users.list(instance_id, marker=marker)
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
+ list_page = client.users.list(instance_id, marker=marker)
+ self.assert_client_code(client, expected_http_code)
self.assert_pagination_match(
list_page, full_list, limit, len(full_list))
@@ -154,9 +156,10 @@ class UserActionsRunner(TestRunner):
def assert_user_access_show(self, instance_id, user_def,
expected_http_code):
user_name, user_host = self._get_user_name_host_pair(user_def)
- user_dbs = self.auth_client.users.list_access(instance_id, user_name,
- hostname=user_host)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ client = self.auth_client
+ user_dbs = client.users.list_access(
+ instance_id, user_name, hostname=user_host)
+ self.assert_client_code(client, expected_http_code)
expected_dbs = {db_def['name'] for db_def in user_def['databases']}
listed_dbs = [db.name for db in user_dbs]
@@ -189,10 +192,11 @@ class UserActionsRunner(TestRunner):
def assert_user_access_revoke(self, instance_id, user_name, user_host,
database, expected_http_code):
- self.auth_client.users.revoke(
+ client = self.auth_client
+ client.users.revoke(
instance_id, user_name, database, hostname=user_host)
- self.assert_client_code(expected_http_code, client=self.auth_client)
- user_dbs = self.auth_client.users.list_access(
+ self.assert_client_code(client, expected_http_code)
+ user_dbs = client.users.list_access(
instance_id, user_name, hostname=user_host)
self.assert_false(any(db.name == database for db in user_dbs),
"Database should no longer be included in the user "
@@ -205,10 +209,11 @@ class UserActionsRunner(TestRunner):
def assert_user_access_grant(self, instance_id, user_name, user_host,
database, expected_http_code):
- self.auth_client.users.grant(
+ client = self.auth_client
+ client.users.grant(
instance_id, user_name, [database], hostname=user_host)
- self.assert_client_code(expected_http_code, client=self.auth_client)
- user_dbs = self.auth_client.users.list_access(
+ self.assert_client_code(client, expected_http_code)
+ user_dbs = client.users.list_access(
instance_id, user_name, hostname=user_host)
self.assert_true(any(db.name == database for db in user_dbs),
"Database should be included in the user "
@@ -278,9 +283,10 @@ class UserActionsRunner(TestRunner):
def assert_users_create_failure(
self, instance_id, serial_users_def,
expected_exception, expected_http_code):
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.users.create, instance_id, serial_users_def)
+ client, client.users.create, instance_id, serial_users_def)
def run_user_update_with_blank_name(
self, expected_exception=exceptions.BadRequest,
@@ -302,9 +308,10 @@ class UserActionsRunner(TestRunner):
expected_exception, expected_http_code):
user_name, user_host = self._get_user_name_host_pair(user_def)
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.users.update_attributes, instance_id,
+ client, client.users.update_attributes, instance_id,
user_name, update_attribites, user_host)
def _get_user_name_host_pair(self, user_def):
@@ -338,9 +345,10 @@ class UserActionsRunner(TestRunner):
update_attribites, expected_http_code):
user_name, user_host = self._get_user_name_host_pair(user_def)
- self.auth_client.users.update_attributes(
+ client = self.auth_client
+ client.users.update_attributes(
instance_id, user_name, update_attribites, user_host)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ self.assert_client_code(client, expected_http_code)
# Update the stored definitions with the new value.
expected_def = None
@@ -350,7 +358,7 @@ class UserActionsRunner(TestRunner):
user_def.update(update_attribites)
expected_def = user_def
- self.wait_for_user_create(instance_id, self.user_defs)
+ self.wait_for_user_create(client, instance_id, self.user_defs)
# Verify using 'user-show' and 'user-list'.
self.assert_user_show(instance_id, expected_def, 200)
@@ -388,16 +396,17 @@ class UserActionsRunner(TestRunner):
def assert_user_delete(self, instance_id, user_def, expected_http_code):
user_name, user_host = self._get_user_name_host_pair(user_def)
- self.auth_client.users.delete(instance_id, user_name, user_host)
- self.assert_client_code(expected_http_code, client=self.auth_client)
- self._wait_for_user_delete(instance_id, user_name)
+ client = self.auth_client
+ client.users.delete(instance_id, user_name, user_host)
+ self.assert_client_code(client, expected_http_code)
+ self._wait_for_user_delete(client, instance_id, user_name)
- def _wait_for_user_delete(self, instance_id, deleted_user_name):
+ def _wait_for_user_delete(self, client, instance_id, deleted_user_name):
self.report.log("Waiting for deleted user to disappear from the "
"listing: %s" % deleted_user_name)
def _db_is_gone():
- all_users = self.get_user_names(instance_id)
+ all_users = self.get_user_names(client, instance_id)
return deleted_user_name not in all_users
try:
@@ -419,9 +428,10 @@ class UserActionsRunner(TestRunner):
expected_exception, expected_http_code):
user_name, user_host = self._get_user_name_host_pair(user_def)
+ client = self.auth_client
self.assert_raises(
expected_exception, expected_http_code,
- self.auth_client.users.get, instance_id, user_name, user_host)
+ client, client.users.get, instance_id, user_name, user_host)
def run_system_user_show(
self, expected_exception=exceptions.BadRequest,
@@ -456,8 +466,9 @@ class UserActionsRunner(TestRunner):
expected_exception, expected_http_code):
user_name, user_host = self._get_user_name_host_pair(user_def)
+ client = self.auth_client
self.assert_raises(expected_exception, expected_http_code,
- self.auth_client.users.delete,
+ client, client.users.delete,
instance_id, user_name, user_host)
def run_system_user_delete(
diff --git a/trove/tests/unittests/api/common/test_limits.py b/trove/tests/unittests/api/common/test_limits.py
index 5757f5a4..e90840cd 100644
--- a/trove/tests/unittests/api/common/test_limits.py
+++ b/trove/tests/unittests/api/common/test_limits.py
@@ -45,6 +45,7 @@ class BaseLimitTestSuite(trove_testtools.TestCase):
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
+ self.context = trove_testtools.TroveTestContext(self)
self.absolute_limits = {"max_instances": 55,
"max_volumes": 100,
"max_backups": 40}
@@ -60,7 +61,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
limit_controller = LimitsController()
req = MagicMock()
- req.environ = {}
+ req.environ = {'trove.context': self.context}
view = limit_controller.index(req, "test_tenant_id")
expected = {'limits': [{'verb': 'ABSOLUTE'}]}
@@ -122,7 +123,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
hard_limit=55)}
req = MagicMock()
- req.environ = {"trove.limits": limits}
+ req.environ = {"trove.limits": limits, 'trove.context': self.context}
with patch.object(QUOTAS, 'get_all_quotas_by_tenant',
return_value=abs_limits):
diff --git a/trove/tests/unittests/backup/test_backup_models.py b/trove/tests/unittests/backup/test_backup_models.py
index da034079..315a508e 100644
--- a/trove/tests/unittests/backup/test_backup_models.py
+++ b/trove/tests/unittests/backup/test_backup_models.py
@@ -216,7 +216,8 @@ class BackupCreateTest(trove_testtools.TestCase):
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
- def test_create_backup_creation_error(self):
+ @patch('trove.backup.models.LOG')
+ def test_create_backup_creation_error(self, mock_logging):
instance = MagicMock()
instance.cluster_id = None
with patch.object(instance_models.BuiltInstance, 'load',
diff --git a/trove/tests/unittests/backup/test_backupagent.py b/trove/tests/unittests/backup/test_backupagent.py
index e0644653..8bbf70dc 100644
--- a/trove/tests/unittests/backup/test_backupagent.py
+++ b/trove/tests/unittests/backup/test_backupagent.py
@@ -356,7 +356,8 @@ class BackupAgentTest(trove_testtools.TestCase):
@patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock()))
@patch.object(conductor_api.API, 'update_backup',
Mock(return_value=Mock()))
- def test_execute_bad_process_backup(self):
+ @patch('trove.guestagent.backup.backupagent.LOG')
+ def test_execute_bad_process_backup(self, mock_logging):
agent = backupagent.BackupAgent()
backup_info = {'id': '123',
'location': 'fake-location',
@@ -516,7 +517,8 @@ class BackupAgentTest(trove_testtools.TestCase):
ANY, ANY, metadata=expected_metadata)
@patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock()))
- def test_backup_incremental_bad_metadata(self):
+ @patch('trove.guestagent.backup.backupagent.LOG')
+ def test_backup_incremental_bad_metadata(self, mock_logging):
with patch.object(backupagent, 'get_storage_strategy',
return_value=MockSwift):
diff --git a/trove/tests/unittests/cluster/test_cluster.py b/trove/tests/unittests/cluster/test_cluster.py
index 74ad14d3..8eee1ed8 100644
--- a/trove/tests/unittests/cluster/test_cluster.py
+++ b/trove/tests/unittests/cluster/test_cluster.py
@@ -221,3 +221,7 @@ class ClusterTest(trove_testtools.TestCase):
task_status=ClusterTasks.ADDING_SHARD)
mock_task_api.mongodb_add_shard_cluster.assert_called_with(
self.cluster.id, 'new-shard-id', 'rs2')
+
+ @patch('trove.cluster.models.LOG')
+ def test_upgrade_not_implemented(self, mock_logging):
+ self.assertRaises(exception.BadRequest, self.cluster.upgrade, "foo")
diff --git a/trove/tests/unittests/cluster/test_cluster_controller.py b/trove/tests/unittests/cluster/test_cluster_controller.py
index 29541f23..107c8cd5 100644
--- a/trove/tests/unittests/cluster/test_cluster_controller.py
+++ b/trove/tests/unittests/cluster/test_cluster_controller.py
@@ -18,7 +18,6 @@ import jsonschema
from mock import MagicMock
from mock import Mock
from mock import patch
-from testtools import TestCase
from testtools.matchers import Is, Equals
from trove.cluster import models
from trove.cluster.models import Cluster, DBCluster
@@ -33,7 +32,8 @@ from trove.datastore import models as datastore_models
from trove.tests.unittests import trove_testtools
-class TestClusterController(TestCase):
+class TestClusterController(trove_testtools.TestCase):
+
def setUp(self):
super(TestClusterController, self).setUp()
self.controller = ClusterController()
@@ -177,6 +177,7 @@ class TestClusterController(TestCase):
'flavor_id': '1234',
'availability_zone': 'az',
'modules': None,
+ 'region_name': None,
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
@@ -247,7 +248,8 @@ class TestClusterController(TestCase):
cluster.delete.assert_called_with()
-class TestClusterControllerWithStrategy(TestCase):
+class TestClusterControllerWithStrategy(trove_testtools.TestCase):
+
def setUp(self):
super(TestClusterControllerWithStrategy, self).setUp()
self.controller = ClusterController()
diff --git a/trove/tests/unittests/cluster/test_cluster_pxc_controller.py b/trove/tests/unittests/cluster/test_cluster_pxc_controller.py
index 48ae3988..51176aa2 100644
--- a/trove/tests/unittests/cluster/test_cluster_pxc_controller.py
+++ b/trove/tests/unittests/cluster/test_cluster_pxc_controller.py
@@ -142,6 +142,7 @@ class TestClusterController(trove_testtools.TestCase):
'flavor_id': '1234',
'availability_zone': 'az',
'modules': None,
+ 'region_name': None,
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
diff --git a/trove/tests/unittests/cluster/test_cluster_redis_controller.py b/trove/tests/unittests/cluster/test_cluster_redis_controller.py
index 00666abb..6fe3cfc7 100644
--- a/trove/tests/unittests/cluster/test_cluster_redis_controller.py
+++ b/trove/tests/unittests/cluster/test_cluster_redis_controller.py
@@ -157,6 +157,7 @@ class TestClusterController(trove_testtools.TestCase):
"flavor_id": "1234",
"availability_zone": "az",
'modules': None,
+ 'region_name': None,
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
@@ -167,6 +168,7 @@ class TestClusterController(trove_testtools.TestCase):
"flavor_id": "1234",
"availability_zone": "az",
'modules': None,
+ 'region_name': None,
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
@@ -177,6 +179,7 @@ class TestClusterController(trove_testtools.TestCase):
"flavor_id": "1234",
"availability_zone": "az",
'modules': None,
+ 'region_name': None,
"nics": [
{"net-id": "e89aa5fd-6b0a-436d-a75c-1545d34d5331"}
]
diff --git a/trove/tests/unittests/cluster/test_cluster_vertica_controller.py b/trove/tests/unittests/cluster/test_cluster_vertica_controller.py
index e56a950f..720f5a2e 100644
--- a/trove/tests/unittests/cluster/test_cluster_vertica_controller.py
+++ b/trove/tests/unittests/cluster/test_cluster_vertica_controller.py
@@ -142,6 +142,7 @@ class TestClusterController(trove_testtools.TestCase):
'flavor_id': '1234',
'availability_zone': 'az',
'modules': None,
+ 'region_name': None,
'nics': [
{'net-id': 'e89aa5fd-6b0a-436d-a75c-1545d34d5331'}
]
diff --git a/trove/tests/unittests/cluster/test_cluster_views.py b/trove/tests/unittests/cluster/test_cluster_views.py
index 45a2127c..1479bae2 100644
--- a/trove/tests/unittests/cluster/test_cluster_views.py
+++ b/trove/tests/unittests/cluster/test_cluster_views.py
@@ -127,6 +127,10 @@ class ClusterInstanceDetailViewTest(trove_testtools.TestCase):
self.instance.get_visible_ip_addresses = lambda: ["1.2.3.4"]
self.instance.slave_of_id = None
self.instance.slaves = None
+ self.context = trove_testtools.TroveTestContext(self)
+ self.req = Mock()
+ self.req.environ = Mock()
+ self.req.environ.__getitem__ = Mock(return_value=self.context)
def tearDown(self):
super(ClusterInstanceDetailViewTest, self).tearDown()
@@ -135,7 +139,7 @@ class ClusterInstanceDetailViewTest(trove_testtools.TestCase):
@patch.object(ClusterInstanceDetailView, '_build_flavor_links')
@patch.object(ClusterInstanceDetailView, '_build_configuration_info')
def test_data(self, *args):
- view = ClusterInstanceDetailView(self.instance, Mock())
+ view = ClusterInstanceDetailView(self.instance, self.req)
result = view.data()
self.assertEqual(self.instance.created, result['instance']['created'])
self.assertEqual(self.instance.updated, result['instance']['updated'])
@@ -150,7 +154,7 @@ class ClusterInstanceDetailViewTest(trove_testtools.TestCase):
@patch.object(ClusterInstanceDetailView, '_build_configuration_info')
def test_data_ip(self, *args):
self.instance.hostname = None
- view = ClusterInstanceDetailView(self.instance, Mock())
+ view = ClusterInstanceDetailView(self.instance, self.req)
result = view.data()
self.assertEqual(self.instance.created, result['instance']['created'])
self.assertEqual(self.instance.updated, result['instance']['updated'])
diff --git a/trove/tests/unittests/cluster/test_galera_cluster.py b/trove/tests/unittests/cluster/test_galera_cluster.py
index b2f1cdde..aea712d8 100644
--- a/trove/tests/unittests/cluster/test_galera_cluster.py
+++ b/trove/tests/unittests/cluster/test_galera_cluster.py
@@ -25,6 +25,8 @@ from trove.common import exception
from trove.common import remote
from trove.common.strategies.cluster.experimental.galera_common import (
api as galera_api)
+from trove.common.strategies.cluster.experimental.galera_common import (
+ taskmanager as galera_task)
from trove.instance import models as inst_models
from trove.quota.quota import QUOTAS
from trove.taskmanager import api as task_api
@@ -310,8 +312,6 @@ class ClusterTest(trove_testtools.TestCase):
self.cluster.delete()
mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING)
- @patch.object(galera_api.GaleraCommonCluster,
- '_get_cluster_network_interfaces')
@patch.object(DBCluster, 'update')
@patch.object(galera_api, 'CONF')
@patch.object(inst_models.Instance, 'create')
@@ -319,9 +319,8 @@ class ClusterTest(trove_testtools.TestCase):
@patch.object(QUOTAS, 'check_quotas')
@patch.object(remote, 'create_nova_client')
def test_grow(self, mock_client, mock_check_quotas, mock_task_api,
- mock_inst_create, mock_conf, mock_update, mock_interfaces):
+ mock_inst_create, mock_conf, mock_update):
mock_client.return_value.flavors = Mock()
- mock_interfaces.return_value = [Mock()]
self.cluster.grow(self.instances)
mock_update.assert_called_with(
task_status=ClusterTasks.GROWING_CLUSTER)
@@ -329,7 +328,20 @@ class ClusterTest(trove_testtools.TestCase):
self.db_info.id,
[mock_inst_create.return_value.id] * 3)
self.assertEqual(3, mock_inst_create.call_count)
- self.assertEqual(1, mock_interfaces.call_count)
+
+ @patch.object(DBCluster, 'update')
+ @patch.object(galera_api, 'CONF')
+ @patch.object(inst_models.Instance, 'create')
+ @patch.object(QUOTAS, 'check_quotas')
+ @patch.object(remote, 'create_nova_client')
+ def test_grow_exception(self, mock_client, mock_check_quotas,
+ mock_inst_create, mock_conf, mock_update):
+ mock_client.return_value.flavors = Mock()
+ with patch.object(task_api, 'load') as mock_load:
+ mock_load.return_value.grow_cluster = Mock(
+ side_effect=exception.BadRequest)
+ self.assertRaises(exception.BadRequest, self.cluster.grow,
+ self.instances)
@patch.object(inst_models.DBInstance, 'find_all')
@patch.object(inst_models.Instance, 'load')
@@ -360,3 +372,21 @@ class ClusterTest(trove_testtools.TestCase):
self.db_info.id, [mock_load.return_value.id])
mock_init.assert_called_with(self.context, self.db_info,
self.datastore, self.datastore_version)
+
+ @patch.object(galera_task.GaleraCommonClusterTasks, 'shrink_cluster')
+ @patch.object(galera_api.GaleraCommonCluster, '__init__')
+ @patch.object(DBCluster, 'update')
+ @patch.object(inst_models.DBInstance, 'find_all')
+ @patch.object(inst_models.Instance, 'load')
+ @patch.object(Cluster, 'validate_cluster_available')
+ def test_shrink_exception(self, mock_validate, mock_load, mock_find_all,
+ mock_update, mock_init, mock_shrink):
+ mock_init.return_value = None
+ existing_instances = [Mock(), Mock()]
+ mock_find_all.return_value.all.return_value = existing_instances
+ instance = Mock()
+ with patch.object(task_api, 'load') as mock_load:
+ mock_load.return_value.shrink_cluster = Mock(
+ side_effect=exception.BadRequest)
+ self.assertRaises(exception.BadRequest, self.cluster.shrink,
+ [instance])
diff --git a/trove/tests/unittests/cluster/test_models.py b/trove/tests/unittests/cluster/test_models.py
index a0f2c79b..e1ed045a 100644
--- a/trove/tests/unittests/cluster/test_models.py
+++ b/trove/tests/unittests/cluster/test_models.py
@@ -38,11 +38,15 @@ class TestModels(trove_testtools.TestCase):
mock_flv.ephemeral = 0
test_instances = [{'flavor_id': 1, 'volume_size': 10},
- {'flavor_id': 1, 'volume_size': 1.5},
- {'flavor_id': 2, 'volume_size': 3}]
+ {'flavor_id': 1, 'volume_size': 1.5,
+ 'region_name': 'home'},
+ {'flavor_id': 2, 'volume_size': 3,
+ 'region_name': 'work'}]
models.validate_instance_flavors(Mock(), test_instances,
True, True)
- create_nove_cli_mock.assert_called_once_with(ANY)
+ create_nove_cli_mock.assert_has_calls([call(ANY, None),
+ call(ANY, 'home'),
+ call(ANY, 'work')])
self.assertRaises(exception.LocalStorageNotSpecified,
models.validate_instance_flavors,
diff --git a/trove/tests/unittests/common/test_common_extensions.py b/trove/tests/unittests/common/test_common_extensions.py
index 07cc0d67..1c2e35d4 100644
--- a/trove/tests/unittests/common/test_common_extensions.py
+++ b/trove/tests/unittests/common/test_common_extensions.py
@@ -24,6 +24,7 @@ from trove.extensions.common import models
from trove.extensions.common.service import ClusterRootController
from trove.extensions.common.service import DefaultRootController
from trove.extensions.common.service import RootController
+from trove.instance import models as instance_models
from trove.instance.models import DBInstance
from trove.tests.unittests import trove_testtools
@@ -90,16 +91,20 @@ class TestRootController(trove_testtools.TestCase):
def setUp(self):
super(TestRootController, self).setUp()
+ self.context = trove_testtools.TroveTestContext(self)
self.controller = RootController()
+ @patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
- def test_index(self, service_get_datastore, service_load_root_controller):
+ def test_index(self, service_get_datastore, service_load_root_controller,
+ service_load_instance):
req = Mock()
+ req.environ = {'trove.context': self.context}
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
- is_cluster = Mock()
+ is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
root_controller = Mock()
ret = Mock()
@@ -112,15 +117,18 @@ class TestRootController(trove_testtools.TestCase):
root_controller.root_index.assert_called_with(
req, tenant_id, uuid, is_cluster)
+ @patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
- def test_create(self, service_get_datastore, service_load_root_controller):
+ def test_create(self, service_get_datastore, service_load_root_controller,
+ service_load_instance):
req = Mock()
+ req.environ = {'trove.context': self.context}
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
- is_cluster = Mock()
+ is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
root_controller = Mock()
ret = Mock()
@@ -134,17 +142,20 @@ class TestRootController(trove_testtools.TestCase):
root_controller.root_create.assert_called_with(
req, body, tenant_id, uuid, is_cluster)
+ @patch.object(instance_models.Instance, "load")
@patch.object(RootController, "load_root_controller")
@patch.object(RootController, "_get_datastore")
def test_create_with_no_root_controller(self,
service_get_datastore,
- service_load_root_controller):
+ service_load_root_controller,
+ service_load_instance):
req = Mock()
+ req.environ = {'trove.context': self.context}
body = Mock()
tenant_id = Mock()
uuid = utils.generate_uuid()
ds_manager = Mock()
- is_cluster = Mock()
+ is_cluster = False
service_get_datastore.return_value = (ds_manager, is_cluster)
service_load_root_controller.return_value = None
@@ -160,6 +171,7 @@ class TestClusterRootController(trove_testtools.TestCase):
def setUp(self):
super(TestClusterRootController, self).setUp()
+ self.context = trove_testtools.TroveTestContext(self)
self.controller = ClusterRootController()
@patch.object(ClusterRootController, "cluster_root_index")
@@ -204,22 +216,18 @@ class TestClusterRootController(trove_testtools.TestCase):
@patch.object(models.ClusterRoot, "load")
def test_instance_root_index(self, mock_cluster_root_load):
- context = Mock()
req = Mock()
- req.environ = Mock()
- req.environ.__getitem__ = Mock(return_value=context)
+ req.environ = {'trove.context': self.context}
tenant_id = Mock()
instance_id = utils.generate_uuid()
self.controller.instance_root_index(req, tenant_id, instance_id)
- mock_cluster_root_load.assert_called_with(context, instance_id)
+ mock_cluster_root_load.assert_called_with(self.context, instance_id)
@patch.object(models.ClusterRoot, "load",
side_effect=exception.UnprocessableEntity())
def test_instance_root_index_exception(self, mock_cluster_root_load):
- context = Mock()
req = Mock()
- req.environ = Mock()
- req.environ.__getitem__ = Mock(return_value=context)
+ req.environ = {'trove.context': self.context}
tenant_id = Mock()
instance_id = utils.generate_uuid()
self.assertRaises(
@@ -227,7 +235,7 @@ class TestClusterRootController(trove_testtools.TestCase):
self.controller.instance_root_index,
req, tenant_id, instance_id
)
- mock_cluster_root_load.assert_called_with(context, instance_id)
+ mock_cluster_root_load.assert_called_with(self.context, instance_id)
@patch.object(ClusterRootController, "instance_root_index")
@patch.object(ClusterRootController, "_get_cluster_instance_id")
@@ -278,12 +286,10 @@ class TestClusterRootController(trove_testtools.TestCase):
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create(self, mock_cluster_root_create):
user = Mock()
- context = Mock()
- context.user = Mock()
- context.user.__getitem__ = Mock(return_value=user)
+ self.context.user = Mock()
+ self.context.user.__getitem__ = Mock(return_value=user)
req = Mock()
- req.environ = Mock()
- req.environ.__getitem__ = Mock(return_value=context)
+ req.environ = {'trove.context': self.context}
password = Mock()
body = {'password': password}
instance_id = utils.generate_uuid()
@@ -291,17 +297,16 @@ class TestClusterRootController(trove_testtools.TestCase):
self.controller.instance_root_create(
req, body, instance_id, cluster_instances)
mock_cluster_root_create.assert_called_with(
- context, instance_id, context.user, password, cluster_instances)
+ self.context, instance_id, self.context.user, password,
+ cluster_instances)
@patch.object(models.ClusterRoot, "create")
def test_instance_root_create_no_body(self, mock_cluster_root_create):
user = Mock()
- context = Mock()
- context.user = Mock()
- context.user.__getitem__ = Mock(return_value=user)
+ self.context.user = Mock()
+ self.context.user.__getitem__ = Mock(return_value=user)
req = Mock()
- req.environ = Mock()
- req.environ.__getitem__ = Mock(return_value=context)
+ req.environ = {'trove.context': self.context}
password = None
body = None
instance_id = utils.generate_uuid()
@@ -309,4 +314,5 @@ class TestClusterRootController(trove_testtools.TestCase):
self.controller.instance_root_create(
req, body, instance_id, cluster_instances)
mock_cluster_root_create.assert_called_with(
- context, instance_id, context.user, password, cluster_instances)
+ self.context, instance_id, self.context.user, password,
+ cluster_instances)
diff --git a/trove/tests/unittests/common/test_dbmodels.py b/trove/tests/unittests/common/test_dbmodels.py
index 77bfdfeb..02c46c48 100644
--- a/trove/tests/unittests/common/test_dbmodels.py
+++ b/trove/tests/unittests/common/test_dbmodels.py
@@ -135,7 +135,7 @@ class DatastoreUserTest(trove_testtools.TestCase):
def _test_user_basic(self, user):
self.assertEqual(self.username, user.name)
- self.assertEqual(None, user.password)
+ self.assertIsNone(user.password)
self.assertEqual(self.host_wildcard, user.host)
self.assertEqual([], user.databases)
@@ -316,7 +316,7 @@ class DatastoreUserTest(trove_testtools.TestCase):
def test_validate_dict_defaults(self):
user = models.DatastoreUser(self.username)
user.verify_dict()
- self.assertEqual(None, user.password)
+ self.assertIsNone(user.password)
self.assertEqual(self.host_wildcard, user.host)
self.assertEqual([], user.databases)
diff --git a/trove/tests/unittests/common/test_policy.py b/trove/tests/unittests/common/test_policy.py
new file mode 100644
index 00000000..eebdeb13
--- /dev/null
+++ b/trove/tests/unittests/common/test_policy.py
@@ -0,0 +1,53 @@
+# Copyright 2016 Tesora Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from mock import MagicMock
+from mock import NonCallableMock
+from mock import patch
+
+from trove.common import exception as trove_exceptions
+from trove.common import policy as trove_policy
+from trove.tests.unittests import trove_testtools
+
+
+class TestPolicy(trove_testtools.TestCase):
+
+ def setUp(self):
+ super(TestPolicy, self).setUp()
+ self.context = trove_testtools.TroveTestContext(self)
+ self.mock_enforcer = MagicMock()
+ get_enforcer_patch = patch.object(trove_policy, 'get_enforcer',
+ return_value=self.mock_enforcer)
+ self.addCleanup(get_enforcer_patch.stop)
+ self.mock_get_enforcer = get_enforcer_patch.start()
+
+ def test_authorize_on_tenant(self):
+ test_rule = NonCallableMock()
+ trove_policy.authorize_on_tenant(self.context, test_rule)
+ self.mock_get_enforcer.assert_called_once_with()
+ self.mock_enforcer.authorize.assert_called_once_with(
+ test_rule, {'tenant': self.context.tenant}, self.context.to_dict(),
+ do_raise=True, exc=trove_exceptions.PolicyNotAuthorized,
+ action=test_rule)
+
+ def test_authorize_on_target(self):
+ test_rule = NonCallableMock()
+ test_target = NonCallableMock()
+ trove_policy.authorize_on_target(self.context, test_rule, test_target)
+ self.mock_get_enforcer.assert_called_once_with()
+ self.mock_enforcer.authorize.assert_called_once_with(
+ test_rule, test_target, self.context.to_dict(),
+ do_raise=True, exc=trove_exceptions.PolicyNotAuthorized,
+ action=test_rule)
diff --git a/trove/tests/unittests/common/test_remote.py b/trove/tests/unittests/common/test_remote.py
index 02c813a5..7b0ffec6 100644
--- a/trove/tests/unittests/common/test_remote.py
+++ b/trove/tests/unittests/common/test_remote.py
@@ -24,6 +24,7 @@ from testtools import ExpectedException, matchers
from trove.common import cfg
from trove.common.context import TroveContext
from trove.common import exception
+from trove.common import glance_remote
from trove.common import remote
from trove.tests.fakes.swift import SwiftClientStub
from trove.tests.unittests import trove_testtools
@@ -574,6 +575,47 @@ class TestCreateSwiftClient(trove_testtools.TestCase):
client.url)
+class TestCreateGlanceClient(trove_testtools.TestCase):
+ def setUp(self):
+ super(TestCreateGlanceClient, self).setUp()
+ self.glance_public_url = 'http://publicURL/v2'
+ self.glancev3_public_url_region_two = 'http://publicURL-r2/v3'
+ self.service_catalog = [
+ {
+ 'endpoints': [
+ {
+ 'region': 'RegionOne',
+ 'publicURL': self.glance_public_url,
+ }
+ ],
+ 'type': 'image'
+ },
+ {
+ 'endpoints': [
+ {
+ 'region': 'RegionOne',
+ 'publicURL': 'http://publicURL-r1/v1',
+ },
+ {
+ 'region': 'RegionTwo',
+ 'publicURL': self.glancev3_public_url_region_two,
+ }
+ ],
+ 'type': 'imagev3'
+ }
+ ]
+
+ def test_create_with_no_conf_no_catalog(self):
+ self.assertRaises(exception.EmptyCatalog,
+ glance_remote.create_glance_client,
+ TroveContext())
+
+ def test_create(self):
+ client = glance_remote.create_glance_client(
+ TroveContext(service_catalog=self.service_catalog))
+ self.assertIsNotNone(client)
+
+
class TestEndpoints(trove_testtools.TestCase):
"""
Copied from glance/tests/unit/test_auth.py.
diff --git a/trove/tests/unittests/guestagent/test_volume.py b/trove/tests/unittests/guestagent/test_volume.py
index efbb96bb..e8f47a93 100644
--- a/trove/tests/unittests/guestagent/test_volume.py
+++ b/trove/tests/unittests/guestagent/test_volume.py
@@ -12,197 +12,191 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
+from mock import ANY, call, DEFAULT, patch, mock_open
-from mock import Mock, MagicMock, patch, mock_open
-import pexpect
-
-from trove.common.exception import GuestError, ProcessExecutionError
+from trove.common import exception
from trove.common import utils
+from trove.guestagent.common import operating_system
from trove.guestagent import volume
from trove.tests.unittests import trove_testtools
-def _setUp_fake_spawn(return_val=0):
- fake_spawn = pexpect.spawn('echo')
- fake_spawn.expect = Mock(return_value=return_val)
- pexpect.spawn = Mock(return_value=fake_spawn)
- return fake_spawn
-
-
class VolumeDeviceTest(trove_testtools.TestCase):
def setUp(self):
super(VolumeDeviceTest, self).setUp()
self.volumeDevice = volume.VolumeDevice('/dev/vdb')
+ self.exec_patcher = patch.object(
+ utils, 'execute', return_value=('has_journal', ''))
+ self.mock_exec = self.exec_patcher.start()
+ self.addCleanup(self.exec_patcher.stop)
+ self.ismount_patcher = patch.object(operating_system, 'is_mount')
+ self.mock_ismount = self.ismount_patcher.start()
+ self.addCleanup(self.ismount_patcher.stop)
+
def tearDown(self):
super(VolumeDeviceTest, self).tearDown()
- @patch.object(pexpect, 'spawn', Mock())
def test_migrate_data(self):
- origin_execute = utils.execute
- utils.execute = Mock()
- origin_os_path_exists = os.path.exists
- os.path.exists = Mock()
- fake_spawn = _setUp_fake_spawn()
-
- origin_unmount = self.volumeDevice.unmount
- self.volumeDevice.unmount = MagicMock()
- self.volumeDevice.migrate_data('/')
- self.assertEqual(1, fake_spawn.expect.call_count)
- self.assertEqual(1, utils.execute.call_count)
- self.assertEqual(1, self.volumeDevice.unmount.call_count)
- utils.execute = origin_execute
- self.volumeDevice.unmount = origin_unmount
- os.path.exists = origin_os_path_exists
+ with patch.multiple(self.volumeDevice,
+ mount=DEFAULT, unmount=DEFAULT) as mocks:
+ self.volumeDevice.migrate_data('/')
+ self.assertEqual(1, mocks['mount'].call_count)
+ self.assertEqual(1, mocks['unmount'].call_count)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('rsync', '--safe-links', '--perms', '--recursive',
+ '--owner', '--group', '--xattrs',
+ '--sparse', '/', '/mnt/volume',
+ root_helper='sudo', run_as_root=True),
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test__check_device_exists(self):
- origin_execute = utils.execute
- utils.execute = Mock()
self.volumeDevice._check_device_exists()
- self.assertEqual(1, utils.execute.call_count)
- utils.execute = origin_execute
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
@patch('trove.guestagent.volume.LOG')
def test_fail__check_device_exists(self, mock_logging):
- with patch.object(utils, 'execute', side_effect=ProcessExecutionError):
- self.assertRaises(GuestError,
+ with patch.object(utils, 'execute',
+ side_effect=exception.ProcessExecutionError):
+ self.assertRaises(exception.GuestError,
self.volumeDevice._check_device_exists)
- @patch.object(pexpect, 'spawn', Mock())
def test__check_format(self):
- fake_spawn = _setUp_fake_spawn()
-
self.volumeDevice._check_format()
- self.assertEqual(1, fake_spawn.expect.call_count)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
- @patch.object(pexpect, 'spawn', Mock())
- def test__check_format_2(self):
- fake_spawn = _setUp_fake_spawn(return_val=1)
-
- self.assertEqual(0, fake_spawn.expect.call_count)
- self.assertRaises(IOError, self.volumeDevice._check_format)
+ @patch('trove.guestagent.volume.LOG')
+ def test__check_format_2(self, mock_logging):
+ self.assertEqual(0, self.mock_exec.call_count)
+ proc_err = exception.ProcessExecutionError()
+ proc_err.stderr = 'Wrong magic number'
+ self.mock_exec.side_effect = proc_err
+ self.assertRaises(exception.GuestError,
+ self.volumeDevice._check_format)
- @patch.object(pexpect, 'spawn', Mock())
def test__format(self):
- fake_spawn = _setUp_fake_spawn()
-
self.volumeDevice._format()
-
- self.assertEqual(1, fake_spawn.expect.call_count)
- self.assertEqual(1, pexpect.spawn.call_count)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb',
+ root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_format(self):
- origin_check_device_exists = self.volumeDevice._check_device_exists
- origin_format = self.volumeDevice._format
- origin_check_format = self.volumeDevice._check_format
- self.volumeDevice._check_device_exists = MagicMock()
- self.volumeDevice._check_format = MagicMock()
- self.volumeDevice._format = MagicMock()
-
self.volumeDevice.format()
- self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
- self.assertEqual(1, self.volumeDevice._format.call_count)
- self.assertEqual(1, self.volumeDevice._check_format.call_count)
-
- self.volumeDevice._check_device_exists = origin_check_device_exists
- self.volumeDevice._format = origin_format
- self.volumeDevice._check_format = origin_check_format
+ self.assertEqual(3, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb',
+ root_helper='sudo', run_as_root=True),
+ call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_mount(self):
- origin_ = volume.VolumeMountPoint.mount
- volume.VolumeMountPoint.mount = Mock()
- origin_os_path_exists = os.path.exists
- os.path.exists = Mock()
- origin_write_to_fstab = volume.VolumeMountPoint.write_to_fstab
- volume.VolumeMountPoint.write_to_fstab = Mock()
-
- self.volumeDevice.mount(Mock)
- self.assertEqual(1, volume.VolumeMountPoint.mount.call_count)
- self.assertEqual(1, volume.VolumeMountPoint.write_to_fstab.call_count)
- volume.VolumeMountPoint.mount = origin_
- volume.VolumeMountPoint.write_to_fstab = origin_write_to_fstab
- os.path.exists = origin_os_path_exists
+ with patch.multiple(volume.VolumeMountPoint,
+ mount=DEFAULT, write_to_fstab=DEFAULT) as mocks:
+ self.volumeDevice.mount('/dev/vba')
+ self.assertEqual(1, mocks['mount'].call_count,
+ "Wrong number of calls to mount()")
+ self.assertEqual(1, mocks['write_to_fstab'].call_count,
+ "Wrong number of calls to write_to_fstab()")
+ self.mock_exec.assert_not_called()
def test_resize_fs(self):
- origin_check_device_exists = self.volumeDevice._check_device_exists
- origin_execute = utils.execute
- utils.execute = Mock()
- self.volumeDevice._check_device_exists = MagicMock()
- origin_os_path_exists = os.path.exists
- os.path.exists = Mock()
-
- self.volumeDevice.resize_fs('/mnt/volume')
-
- self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
- self.assertEqual(2, utils.execute.call_count)
- self.volumeDevice._check_device_exists = origin_check_device_exists
- os.path.exists = origin_os_path_exists
- utils.execute = origin_execute
-
- @patch.object(os.path, 'ismount', return_value=True)
- @patch.object(utils, 'execute', side_effect=ProcessExecutionError)
+ with patch.object(operating_system, 'is_mount', return_value=True):
+ mount_point = '/mnt/volume'
+ self.volumeDevice.resize_fs(mount_point)
+ self.assertEqual(4, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ call("umount", mount_point, run_as_root=True,
+ root_helper='sudo'),
+ call('e2fsck', '-f', '-p', '/dev/vdb', root_helper='sudo',
+ run_as_root=True),
+ call('resize2fs', '/dev/vdb', root_helper='sudo',
+ run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
+
+ @patch.object(utils, 'execute',
+ side_effect=exception.ProcessExecutionError)
@patch('trove.guestagent.volume.LOG')
- def test_fail_resize_fs(self, mock_logging, mock_execute, mock_mount):
+ def test_fail_resize_fs(self, mock_logging, mock_execute):
with patch.object(self.volumeDevice, '_check_device_exists'):
- self.assertRaises(GuestError,
+ self.assertRaises(exception.GuestError,
self.volumeDevice.resize_fs, '/mnt/volume')
self.assertEqual(1,
self.volumeDevice._check_device_exists.call_count)
- self.assertEqual(1, mock_mount.call_count)
+ self.assertEqual(2, self.mock_ismount.call_count)
def test_unmount_positive(self):
self._test_unmount()
def test_unmount_negative(self):
- self._test_unmount(False)
-
- @patch.object(pexpect, 'spawn', Mock())
- def _test_unmount(self, positive=True):
- origin_ = os.path.exists
- os.path.exists = MagicMock(return_value=positive)
- fake_spawn = _setUp_fake_spawn()
-
- self.volumeDevice.unmount('/mnt/volume')
- COUNT = 1
- if not positive:
- COUNT = 0
- self.assertEqual(COUNT, fake_spawn.expect.call_count)
- os.path.exists = origin_
-
- @patch.object(utils, 'execute', return_value=('/var/lib/mysql', ''))
- def test_mount_points(self, mock_execute):
+ self._test_unmount(has_mount=False)
+
+ def _test_unmount(self, has_mount=True):
+ with patch.object(operating_system, 'is_mount',
+ return_value=has_mount):
+ self.volumeDevice.unmount('/mnt/volume')
+ if has_mount:
+ self.assertEqual(1, self.mock_exec.call_count)
+ else:
+ self.mock_exec.assert_not_called()
+
+ def test_mount_points(self):
+ self.mock_exec.return_value = (
+ ("/dev/vdb /var/lib/mysql xfs rw 0 0", ""))
mount_point = self.volumeDevice.mount_points('/dev/vdb')
self.assertEqual(['/var/lib/mysql'], mount_point)
-
- @patch.object(utils, 'execute', side_effect=ProcessExecutionError)
- @patch('trove.guestagent.volume.LOG')
- def test_fail_mount_points(self, mock_logging, mock_execute):
- self.assertRaises(GuestError, self.volumeDevice.mount_points,
- '/mnt/volume')
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call("grep '^/dev/vdb ' /etc/mtab", check_exit_code=[0, 1],
+ shell=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_set_readahead_size(self):
- origin_check_device_exists = self.volumeDevice._check_device_exists
- self.volumeDevice._check_device_exists = MagicMock()
- mock_execute = MagicMock(return_value=None)
readahead_size = 2048
- self.volumeDevice.set_readahead_size(readahead_size,
- execute_function=mock_execute)
- blockdev = mock_execute.call_args_list[0]
-
- blockdev.assert_called_with("sudo", "blockdev", "--setra",
- readahead_size, "/dev/vdb")
- self.volumeDevice._check_device_exists = origin_check_device_exists
+ self.volumeDevice.set_readahead_size(readahead_size)
+ self.assertEqual(2, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ call('blockdev', '--setra', readahead_size, '/dev/vdb',
+ root_helper='sudo', run_as_root=True),
+ ]
+ self.mock_exec.assert_has_calls(calls)
@patch('trove.guestagent.volume.LOG')
def test_fail_set_readahead_size(self, mock_logging):
- mock_execute = MagicMock(side_effect=ProcessExecutionError)
+ self.mock_exec.side_effect = exception.ProcessExecutionError
readahead_size = 2048
- with patch.object(self.volumeDevice, '_check_device_exists'):
- self.assertRaises(GuestError, self.volumeDevice.set_readahead_size,
- readahead_size, execute_function=mock_execute)
- self.volumeDevice._check_device_exists.assert_any_call()
+ self.assertRaises(exception.GuestError,
+ self.volumeDevice.set_readahead_size,
+ readahead_size)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ ]
+ self.mock_exec.assert_has_calls(calls)
class VolumeMountPointTest(trove_testtools.TestCase):
@@ -211,32 +205,35 @@ class VolumeMountPointTest(trove_testtools.TestCase):
super(VolumeMountPointTest, self).setUp()
self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device',
'/dev/vdb')
+ self.exec_patcher = patch.object(utils, 'execute',
+ return_value=('', ''))
+ self.mock_exec = self.exec_patcher.start()
+ self.addCleanup(self.exec_patcher.stop)
def tearDown(self):
super(VolumeMountPointTest, self).tearDown()
- @patch.object(pexpect, 'spawn', Mock())
def test_mount(self):
- origin_ = os.path.exists
- os.path.exists = MagicMock(return_value=False)
- fake_spawn = _setUp_fake_spawn()
-
- with patch.object(utils, 'execute_with_timeout',
- return_value=('0', '')):
+ with patch.object(operating_system, 'exists', return_value=False):
self.volumeMountPoint.mount()
-
- self.assertEqual(1, os.path.exists.call_count)
- self.assertEqual(1, utils.execute_with_timeout.call_count)
- self.assertEqual(1, fake_spawn.expect.call_count)
-
- os.path.exists = origin_
+ self.assertEqual(2, self.mock_exec.call_count)
+ calls = [
+ call('mkdir', '-p', '/dev/vdb', root_helper='sudo',
+ run_as_root=True),
+ call('mount', '-t', 'ext3', '-o', 'defaults,noatime',
+ '/mnt/device', '/dev/vdb', root_helper='sudo',
+ run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_write_to_fstab(self):
- origin_execute = utils.execute
- utils.execute = Mock()
- m = mock_open()
- with patch('%s.open' % volume.__name__, m, create=True):
+ mock_file = mock_open()
+ with patch('%s.open' % volume.__name__, mock_file, create=True):
self.volumeMountPoint.write_to_fstab()
-
- self.assertEqual(1, utils.execute.call_count)
- utils.execute = origin_execute
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('install', '-o', 'root', '-g', 'root', '-m', '644',
+ ANY, '/etc/fstab', root_helper='sudo',
+ run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
diff --git a/trove/tests/unittests/instance/test_instance_models.py b/trove/tests/unittests/instance/test_instance_models.py
index 3dbde5a4..969a8a8c 100644
--- a/trove/tests/unittests/instance/test_instance_models.py
+++ b/trove/tests/unittests/instance/test_instance_models.py
@@ -292,7 +292,8 @@ class TestInstanceUpgrade(trove_testtools.TestCase):
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
@patch.object(task_api.API, 'upgrade')
- def test_upgrade(self, task_upgrade):
+ @patch('trove.tests.fakes.nova.LOG')
+ def test_upgrade(self, mock_logging, task_upgrade):
instance_model = DBInstance(
InstanceTasks.NONE,
id=str(uuid.uuid4()),
diff --git a/trove/tests/unittests/instance/test_instance_views.py b/trove/tests/unittests/instance/test_instance_views.py
index e8458c42..e41836d8 100644
--- a/trove/tests/unittests/instance/test_instance_views.py
+++ b/trove/tests/unittests/instance/test_instance_views.py
@@ -63,6 +63,8 @@ class InstanceDetailViewTest(trove_testtools.TestCase):
self.instance.slave_of_id = None
self.instance.slaves = []
self.instance.locality = 'affinity'
+ self.instance.server_id = 'server_abc'
+ self.instance.volume_id = 'volume_abc'
self.fault_message = 'Error'
self.fault_details = 'details'
self.fault_date = 'now'
@@ -70,6 +72,10 @@ class InstanceDetailViewTest(trove_testtools.TestCase):
self.instance.fault.message = self.fault_message
self.instance.fault.details = self.fault_details
self.instance.fault.updated = self.fault_date
+ self.context = trove_testtools.TroveTestContext(self)
+ self.req = Mock()
+ self.req.environ = Mock()
+ self.req.environ.__getitem__ = Mock(return_value=self.context)
def tearDown(self):
super(InstanceDetailViewTest, self).tearDown()
@@ -78,7 +84,7 @@ class InstanceDetailViewTest(trove_testtools.TestCase):
InstanceDetailView._build_configuration_info = self.build_config_method
def test_data_hostname(self):
- view = InstanceDetailView(self.instance, Mock())
+ view = InstanceDetailView(self.instance, self.req)
result = view.data()
self.assertEqual(self.instance.created, result['instance']['created'])
self.assertEqual(self.instance.updated, result['instance']['updated'])
@@ -90,7 +96,7 @@ class InstanceDetailViewTest(trove_testtools.TestCase):
def test_data_ip(self):
self.instance.hostname = None
- view = InstanceDetailView(self.instance, Mock())
+ view = InstanceDetailView(self.instance, self.req)
result = view.data()
self.assertEqual(self.instance.created, result['instance']['created'])
self.assertEqual(self.instance.updated, result['instance']['updated'])
@@ -101,13 +107,13 @@ class InstanceDetailViewTest(trove_testtools.TestCase):
def test_locality(self):
self.instance.hostname = None
- view = InstanceDetailView(self.instance, Mock())
+ view = InstanceDetailView(self.instance, self.req)
result = view.data()
self.assertEqual(self.instance.locality,
result['instance']['locality'])
def test_fault(self):
- view = InstanceDetailView(self.instance, Mock())
+ view = InstanceDetailView(self.instance, self.req)
result = view.data()
self.assertEqual(self.fault_message,
result['instance']['fault']['message'])
@@ -115,3 +121,17 @@ class InstanceDetailViewTest(trove_testtools.TestCase):
result['instance']['fault']['details'])
self.assertEqual(self.fault_date,
result['instance']['fault']['created'])
+
+ def test_admin_view(self):
+ self.context.is_admin = True
+ view = InstanceDetailView(self.instance, self.req)
+ result = view.data()
+ self.assertIn('server_id', result['instance'])
+ self.assertIn('volume_id', result['instance'])
+
+ def test_non_admin_view(self):
+ self.context.is_admin = False
+ view = InstanceDetailView(self.instance, self.req)
+ result = view.data()
+ self.assertNotIn('server_id', result['instance'])
+ self.assertNotIn('volume_id', result['instance'])
diff --git a/trove/tests/unittests/network/test_neutron_driver.py b/trove/tests/unittests/network/test_neutron_driver.py
index bda649b3..102e319d 100644
--- a/trove/tests/unittests/network/test_neutron_driver.py
+++ b/trove/tests/unittests/network/test_neutron_driver.py
@@ -19,6 +19,7 @@ from mock import Mock, patch
from neutronclient.common import exceptions as neutron_exceptions
from neutronclient.v2_0 import client as NeutronClient
+from trove.common import cfg
from trove.common import exception
from trove.common.models import NetworkRemoteModelBase
from trove.common import remote
@@ -28,6 +29,9 @@ from trove.network.neutron import NeutronDriver as driver
from trove.tests.unittests import trove_testtools
+CONF = cfg.CONF
+
+
class NeutronDriverTest(trove_testtools.TestCase):
def setUp(self):
super(NeutronDriverTest, self).setUp()
@@ -50,26 +54,30 @@ class NeutronDriverTest(trove_testtools.TestCase):
def test_create_security_group(self):
driver.create_security_group = Mock()
RemoteSecurityGroup.create(name=Mock(), description=Mock(),
- context=self.context)
+ context=self.context,
+ region_name=CONF.os_region_name)
self.assertEqual(1, driver.create_security_group.call_count)
def test_add_security_group_rule(self):
driver.add_security_group_rule = Mock()
RemoteSecurityGroup.add_rule(sec_group_id=Mock(), protocol=Mock(),
from_port=Mock(), to_port=Mock(),
- cidr=Mock(), context=self.context)
+ cidr=Mock(), context=self.context,
+ region_name=CONF.os_region_name)
self.assertEqual(1, driver.add_security_group_rule.call_count)
def test_delete_security_group_rule(self):
driver.delete_security_group_rule = Mock()
RemoteSecurityGroup.delete_rule(sec_group_rule_id=Mock(),
- context=self.context)
+ context=self.context,
+ region_name=CONF.os_region_name)
self.assertEqual(1, driver.delete_security_group_rule.call_count)
def test_delete_security_group(self):
driver.delete_security_group = Mock()
RemoteSecurityGroup.delete(sec_group_id=Mock(),
- context=self.context)
+ context=self.context,
+ region_name=CONF.os_region_name)
self.assertEqual(1, driver.delete_security_group.call_count)
@@ -81,7 +89,7 @@ class NeutronDriverExceptionTest(trove_testtools.TestCase):
self.orig_NeutronClient = NeutronClient.Client
self.orig_get_endpoint = remote.get_endpoint
remote.get_endpoint = MagicMock(return_value="neutron_url")
- mock_driver = neutron.NeutronDriver(self.context)
+ mock_driver = neutron.NeutronDriver(self.context, "regionOne")
NetworkRemoteModelBase.get_driver = MagicMock(
return_value=mock_driver)
@@ -98,23 +106,27 @@ class NeutronDriverExceptionTest(trove_testtools.TestCase):
def test_create_sg_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupCreationError,
RemoteSecurityGroup.create,
- "sg_name", "sg_desc", self.context)
+ "sg_name", "sg_desc", self.context,
+ region_name=CONF.os_region_name)
@patch('trove.network.neutron.LOG')
def test_add_sg_rule_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleCreationError,
RemoteSecurityGroup.add_rule,
"12234", "tcp", "22", "22",
- "0.0.0.0/8", self.context)
+ "0.0.0.0/8", self.context,
+ region_name=CONF.os_region_name)
@patch('trove.network.neutron.LOG')
def test_delete_sg_rule_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleDeletionError,
RemoteSecurityGroup.delete_rule,
- "12234", self.context)
+ "12234", self.context,
+ region_name=CONF.os_region_name)
@patch('trove.network.neutron.LOG')
def test_delete_sg_with_exception(self, mock_logging):
self.assertRaises(exception.SecurityGroupDeletionError,
RemoteSecurityGroup.delete,
- "123445", self.context)
+ "123445", self.context,
+ region_name=CONF.os_region_name)
diff --git a/trove/tests/unittests/secgroups/test_security_group.py b/trove/tests/unittests/secgroups/test_security_group.py
index 459a303e..c39e9148 100644
--- a/trove/tests/unittests/secgroups/test_security_group.py
+++ b/trove/tests/unittests/secgroups/test_security_group.py
@@ -18,6 +18,7 @@ from mock import Mock
from mock import patch
from novaclient import exceptions as nova_exceptions
+from trove.common import cfg
from trove.common import exception
import trove.common.remote
from trove.extensions.security_group import models as sec_mod
@@ -26,6 +27,9 @@ from trove.tests.fakes import nova
from trove.tests.unittests import trove_testtools
+CONF = cfg.CONF
+
+
"""
Unit tests for testing the exceptions raised by Security Groups
"""
@@ -49,7 +53,7 @@ class Security_Group_Exceptions_Test(trove_testtools.TestCase):
self.FakeClient.security_group_rules.delete = fException
trove.common.remote.create_nova_client = (
- lambda c: self._return_mocked_nova_client(c))
+ lambda c, r: self._return_mocked_nova_client(c))
def tearDown(self):
super(Security_Group_Exceptions_Test, self).tearDown()
@@ -67,25 +71,29 @@ class Security_Group_Exceptions_Test(trove_testtools.TestCase):
sec_mod.RemoteSecurityGroup.create,
"TestName",
"TestDescription",
- self.context)
+ self.context,
+ region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group(self, mock_logging):
self.assertRaises(exception.SecurityGroupDeletionError,
sec_mod.RemoteSecurityGroup.delete,
- 1, self.context)
+ 1, self.context,
+ region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_create_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleCreationError,
sec_mod.RemoteSecurityGroup.add_rule,
- 1, "tcp", 3306, 3306, "0.0.0.0/0", self.context)
+ 1, "tcp", 3306, 3306, "0.0.0.0/0", self.context,
+ region_name=CONF.os_region_name)
@patch('trove.network.nova.LOG')
def test_failed_to_delete_security_group_rule(self, mock_logging):
self.assertRaises(exception.SecurityGroupRuleDeletionError,
sec_mod.RemoteSecurityGroup.delete_rule,
- 1, self.context)
+ 1, self.context,
+ region_name=CONF.os_region_name)
class fake_RemoteSecGr(object):
@@ -93,7 +101,7 @@ class fake_RemoteSecGr(object):
self.id = uuid.uuid4()
return {'id': self.id}
- def delete(self, context):
+ def delete(self, context, region_name):
pass
@@ -135,7 +143,7 @@ class SecurityGroupDeleteTest(trove_testtools.TestCase):
sec_mod.SecurityGroupInstanceAssociation.find_by = self.fException
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
- uuid.uuid4(), self.context))
+ uuid.uuid4(), self.context, CONF.os_region_name))
def test_get_security_group_from_assoc_with_db_exception(self):
@@ -156,7 +164,7 @@ class SecurityGroupDeleteTest(trove_testtools.TestCase):
return_value=new_fake_RemoteSecGrAssoc())
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
- i_id, self.context))
+ i_id, self.context, CONF.os_region_name))
def test_delete_secgr_assoc_with_db_exception(self):
@@ -164,11 +172,11 @@ class SecurityGroupDeleteTest(trove_testtools.TestCase):
sec_mod.SecurityGroupInstanceAssociation.find_by = Mock(
return_value=fake_SecGr_Association())
sec_mod.SecurityGroupInstanceAssociation.delete = self.fException
- self.assertNotEqual(sec_mod.SecurityGroupInstanceAssociation.find_by(
- i_id, deleted=False).get_security_group(), None)
+ self.assertIsNotNone(sec_mod.SecurityGroupInstanceAssociation.find_by(
+ i_id, deleted=False).get_security_group())
self.assertTrue(hasattr(sec_mod.SecurityGroupInstanceAssociation.
find_by(i_id, deleted=False).
get_security_group(), 'delete'))
self.assertIsNone(
sec_mod.SecurityGroup.delete_for_instance(
- i_id, self.context))
+ i_id, self.context, CONF.os_region_name))
diff --git a/trove/tests/unittests/trove_testtools.py b/trove/tests/unittests/trove_testtools.py
index 346c743f..36413b1e 100644
--- a/trove/tests/unittests/trove_testtools.py
+++ b/trove/tests/unittests/trove_testtools.py
@@ -23,6 +23,7 @@ import testtools
from trove.common import cfg
from trove.common.context import TroveContext
from trove.common.notification import DBaaSAPINotification
+from trove.common import policy
from trove.tests import root_logger
@@ -101,6 +102,11 @@ class TestCase(testtools.TestCase):
# Default manager used by all unittsest unless explicitly overridden.
self.patch_datastore_manager('mysql')
+ policy_patcher = mock.patch.object(policy, 'get_enforcer',
+ return_value=mock.MagicMock())
+ self.addCleanup(policy_patcher.stop)
+ policy_patcher.start()
+
def tearDown(self):
# yes, this is gross and not thread aware.
# but the only way to make it thread aware would require that
diff --git a/trove/tests/util/mysql.py b/trove/tests/util/mysql.py
index 218e3910..2d914337 100644
--- a/trove/tests/util/mysql.py
+++ b/trove/tests/util/mysql.py
@@ -115,8 +115,7 @@ class SqlAlchemyConnection(object):
@staticmethod
def _init_engine(user, password, host):
return session.EngineFacade(
- "mysql+pymysql://%s:%s@%s:3306" % (user, password, host),
- pool_recycle=1800, echo=True
+ "mysql+pymysql://%s:%s@%s:3306" % (user, password, host)
).get_engine()
diff --git a/trove/tests/util/server_connection.py b/trove/tests/util/server_connection.py
index 8956c251..6eed3663 100644
--- a/trove/tests/util/server_connection.py
+++ b/trove/tests/util/server_connection.py
@@ -13,7 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import netaddr
import os
+from proboscis.asserts import fail
from trove import tests
from trove.tests import util
@@ -33,7 +35,12 @@ class ServerSSHConnection(object):
self.user = util.test_config.users.find_user(req_admin)
self.dbaas_admin = util.create_dbaas_client(self.user)
self.instance = self.dbaas_admin.management.show(self.instance_id)
- self.ip_address = self.instance.ip[0]
+ try:
+ self.ip_address = [str(ip) for ip in self.instance.ip
+ if netaddr.valid_ipv4(ip)][0]
+ except Exception:
+ fail("No IPV4 ip found")
+
TROVE_TEST_SSH_USER = os.environ.get('TROVE_TEST_SSH_USER')
if TROVE_TEST_SSH_USER and '@' not in self.ip_address:
self.ip_address = TROVE_TEST_SSH_USER + '@' + self.ip_address