summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/flavors.inc2
-rwxr-xr-xapi-ref/source/parameters.yaml6
-rw-r--r--api-ref/source/samples/db-list-cfg-defaults-response.json3
-rw-r--r--api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-mgmt-get-instance-details-response.json2
-rw-r--r--api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt2
-rw-r--r--api-ref/source/samples/db-mgmt-instance-index-response.json2
-rw-r--r--api-ref/source/user-management.inc1
-rw-r--r--apidocs/src/samples/db-get-default-instance-configuration-response-json.txt5
-rw-r--r--devstack/plugin.sh26
-rw-r--r--doc/source/dev/secure_oslo_messaging.rst655
-rw-r--r--doc/source/index.rst1
-rw-r--r--integration/scripts/conf/mysql.conf8
-rw-r--r--integration/scripts/conf/percona.conf8
-rw-r--r--integration/scripts/conf/pxc.conf8
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra12
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase4
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb6
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-db2/install.d/10-db24
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-percona/install.d/30-mysql2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql10
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-redis/install.d/30-redis2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb6
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica10
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql2
-rwxr-xr-xintegration/scripts/trovestack51
-rw-r--r--integration/tests/integration/int_tests.py1
-rw-r--r--integration/tests/integration/tests/__init__.py2
-rw-r--r--integration/tests/integration/tests/colorizer.py1
-rw-r--r--releasenotes/notes/disply_module_bools_properly-571cca9a87f28339.yaml5
-rw-r--r--releasenotes/notes/module-ordering-92b6445a8ac3a3bf.yaml9
-rw-r--r--requirements.txt19
-rw-r--r--run_tests.py3
-rw-r--r--test-requirements.txt2
-rw-r--r--tools/trove-pylint.config38
-rwxr-xr-xtools/trove-pylint.py13
-rw-r--r--tox.ini2
-rw-r--r--trove/cmd/conductor.py6
-rw-r--r--trove/cmd/fakemode.py2
-rw-r--r--trove/cmd/guest.py10
-rw-r--r--trove/cmd/taskmanager.py8
-rw-r--r--trove/common/apischema.py14
-rw-r--r--trove/common/cfg.py48
-rw-r--r--trove/common/context.py4
-rw-r--r--trove/common/crypto_utils.py8
-rw-r--r--trove/common/db/models.py2
-rw-r--r--trove/common/limits.py12
-rw-r--r--trove/common/models.py3
-rw-r--r--trove/common/remote.py11
-rw-r--r--trove/common/rpc/conductor_guest_serializer.py60
-rw-r--r--trove/common/rpc/conductor_host_serializer.py83
-rw-r--r--trove/common/rpc/secure_serializer.py59
-rw-r--r--trove/common/rpc/serializer.py86
-rw-r--r--trove/common/rpc/service.py11
-rw-r--r--trove/common/strategies/cluster/experimental/galera_common/taskmanager.py10
-rw-r--r--trove/common/strategies/cluster/experimental/redis/taskmanager.py4
-rw-r--r--trove/common/strategies/strategy.py3
-rw-r--r--trove/common/xmlutils.py12
-rw-r--r--trove/conductor/api.py6
-rw-r--r--trove/configuration/models.py4
-rw-r--r--trove/db/models.py7
-rw-r--r--trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py48
-rw-r--r--trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py30
-rw-r--r--trove/dns/designate/driver.py10
-rw-r--r--trove/extensions/common/service.py6
-rw-r--r--trove/extensions/mgmt/instances/models.py2
-rw-r--r--trove/guestagent/api.py20
-rw-r--r--trove/guestagent/backup/backupagent.py6
-rw-r--r--trove/guestagent/datastore/experimental/redis/manager.py2
-rw-r--r--trove/guestagent/datastore/experimental/vertica/service.py16
-rw-r--r--trove/guestagent/datastore/experimental/vertica/system.py3
-rw-r--r--trove/guestagent/datastore/manager.py38
-rw-r--r--trove/guestagent/datastore/mysql_common/manager.py2
-rw-r--r--trove/guestagent/datastore/mysql_common/service.py16
-rw-r--r--trove/guestagent/datastore/service.py2
-rw-r--r--trove/guestagent/dbaas.py2
-rw-r--r--trove/guestagent/guest_log.py2
-rw-r--r--trove/guestagent/module/module_manager.py21
-rw-r--r--trove/guestagent/pkg.py38
-rw-r--r--trove/guestagent/strategies/backup/mysql_impl.py10
-rw-r--r--trove/guestagent/strategies/replication/experimental/postgresql_impl.py2
-rw-r--r--trove/guestagent/strategies/restore/experimental/postgresql_impl.py2
-rw-r--r--trove/guestagent/strategies/restore/mysql_impl.py2
-rw-r--r--trove/guestagent/volume.py181
-rw-r--r--trove/instance/models.py140
-rw-r--r--trove/instance/service.py10
-rw-r--r--trove/instance/views.py2
-rw-r--r--trove/module/models.py106
-rw-r--r--trove/module/service.py17
-rw-r--r--trove/module/views.py32
-rw-r--r--trove/network/neutron.py14
-rw-r--r--trove/network/nova.py11
-rw-r--r--trove/rpc.py85
-rw-r--r--trove/taskmanager/api.py10
-rw-r--r--trove/taskmanager/manager.py18
-rwxr-xr-xtrove/taskmanager/models.py43
-rw-r--r--trove/templates/mariadb/config.template2
-rw-r--r--trove/tests/api/backups.py59
-rw-r--r--trove/tests/int_tests.py91
-rw-r--r--trove/tests/scenario/groups/__init__.py30
-rw-r--r--trove/tests/scenario/groups/backup_group.py42
-rw-r--r--trove/tests/scenario/groups/cluster_actions_group.py162
-rw-r--r--trove/tests/scenario/groups/cluster_group.py341
-rw-r--r--trove/tests/scenario/groups/module_group.py75
-rw-r--r--trove/tests/scenario/helpers/test_helper.py71
-rw-r--r--trove/tests/scenario/runners/__init__.py2
-rw-r--r--trove/tests/scenario/runners/backup_runners.py1
-rw-r--r--trove/tests/scenario/runners/cluster_runners.py (renamed from trove/tests/scenario/runners/cluster_actions_runners.py)370
-rw-r--r--trove/tests/scenario/runners/configuration_runners.py1
-rw-r--r--trove/tests/scenario/runners/guest_log_runners.py117
-rw-r--r--trove/tests/scenario/runners/instance_create_runners.py9
-rw-r--r--trove/tests/scenario/runners/instance_force_delete_runners.py7
-rw-r--r--trove/tests/scenario/runners/module_runners.py479
-rw-r--r--trove/tests/scenario/runners/replication_runners.py3
-rw-r--r--trove/tests/scenario/runners/test_runners.py118
-rw-r--r--trove/tests/tempest/tests/api/database/base.py15
-rw-r--r--trove/tests/unittests/backup/test_backup_models.py3
-rw-r--r--trove/tests/unittests/backup/test_backupagent.py9
-rw-r--r--trove/tests/unittests/common/test_conductor_serializer.py110
-rw-r--r--trove/tests/unittests/common/test_remote.py4
-rw-r--r--trove/tests/unittests/common/test_secure_serializer.py64
-rw-r--r--trove/tests/unittests/common/test_serializer.py127
-rw-r--r--trove/tests/unittests/conductor/test_conf.py3
-rw-r--r--trove/tests/unittests/guestagent/test_api.py5
-rw-r--r--trove/tests/unittests/guestagent/test_backups.py7
-rw-r--r--trove/tests/unittests/guestagent/test_dbaas.py4
-rw-r--r--trove/tests/unittests/guestagent/test_galera_cluster_api.py5
-rw-r--r--trove/tests/unittests/guestagent/test_manager.py41
-rw-r--r--trove/tests/unittests/guestagent/test_vertica_api.py6
-rw-r--r--trove/tests/unittests/guestagent/test_volume.py300
-rw-r--r--trove/tests/unittests/instance/test_instance_models.py120
-rw-r--r--trove/tests/unittests/module/test_module_controller.py15
-rw-r--r--trove/tests/unittests/module/test_module_models.py98
-rw-r--r--trove/tests/unittests/module/test_module_views.py9
-rw-r--r--trove/tests/unittests/taskmanager/test_models.py10
-rw-r--r--trove/tests/unittests/upgrade/test_models.py7
-rw-r--r--trove/tests/util/__init__.py7
144 files changed, 4054 insertions, 1266 deletions
diff --git a/api-ref/source/flavors.inc b/api-ref/source/flavors.inc
index 4127737a..0af0a99b 100644
--- a/api-ref/source/flavors.inc
+++ b/api-ref/source/flavors.inc
@@ -67,7 +67,7 @@ Lists information for all available flavors.
This operation lists information for all available flavors.
This resource is identical to the flavors found in the OpenStack
-Nova API, but without the disk property.
+Nova API.
Normal response codes: 200
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 13be71a8..ef71d87b 100755
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -48,6 +48,12 @@ parameter_name:
in: path
required: false
type: string
+user_name:
+ description: |
+ The name of the user.
+ in: path
+ required: false
+ type: string
# variables in body
characterSet:
description: |
diff --git a/api-ref/source/samples/db-list-cfg-defaults-response.json b/api-ref/source/samples/db-list-cfg-defaults-response.json
index 58422431..c506d3aa 100644
--- a/api-ref/source/samples/db-list-cfg-defaults-response.json
+++ b/api-ref/source/samples/db-list-cfg-defaults-response.json
@@ -22,7 +22,8 @@
"open_files_limit": 512,
"performance_schema": "ON",
"pid_file": "/var/run/mysqld/mysqld.pid",
- "port": 3306,
+ "socket": "/var/run/mysqld/mysqld.sock",
+ "port": "3306",
"query_cache_limit": "1M",
"query_cache_size": "8M",
"query_cache_type": 1,
diff --git a/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt b/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt
index feb89a88..6580c3b8 100644
--- a/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt
+++ b/api-ref/source/samples/db-mgmt-get-instance-details-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 1676
+Content-Length: 1709
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-mgmt-get-instance-details-response.json b/api-ref/source/samples/db-mgmt-get-instance-details-response.json
index 203159d6..ef8b7efc 100644
--- a/api-ref/source/samples/db-mgmt-get-instance-details-response.json
+++ b/api-ref/source/samples/db-mgmt-get-instance-details-response.json
@@ -7,6 +7,7 @@
},
"deleted": false,
"deleted_at": null,
+ "encrypted_rpc_messaging": true,
"flavor": {
"id": "3",
"links": [
@@ -80,3 +81,4 @@
"volume_id": "VOL_44b277eb-39be-4921-be31-3d61b43651d7"
}
}
+
diff --git a/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt b/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt
index 875f0f20..3994d592 100644
--- a/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt
+++ b/api-ref/source/samples/db-mgmt-instance-index-response-json-http.txt
@@ -1,5 +1,5 @@
HTTP/1.1 200 OK
Content-Type: application/json
-Content-Length: 1225
+Content-Length: 1258
Date: Mon, 18 Mar 2013 19:09:17 GMT
diff --git a/api-ref/source/samples/db-mgmt-instance-index-response.json b/api-ref/source/samples/db-mgmt-instance-index-response.json
index 5736bb17..6b26254a 100644
--- a/api-ref/source/samples/db-mgmt-instance-index-response.json
+++ b/api-ref/source/samples/db-mgmt-instance-index-response.json
@@ -8,6 +8,7 @@
},
"deleted": false,
"deleted_at": null,
+ "encrypted_rpc_messaging": true,
"flavor": {
"id": "3",
"links": [
@@ -58,3 +59,4 @@
}
]
}
+
diff --git a/api-ref/source/user-management.inc b/api-ref/source/user-management.inc
index 7dd77d95..750b7417 100644
--- a/api-ref/source/user-management.inc
+++ b/api-ref/source/user-management.inc
@@ -163,6 +163,7 @@ Request
- instanceId: instanceId
- accountId: accountId
+ - name: user_name
diff --git a/apidocs/src/samples/db-get-default-instance-configuration-response-json.txt b/apidocs/src/samples/db-get-default-instance-configuration-response-json.txt
new file mode 100644
index 00000000..d8d78fc3
--- /dev/null
+++ b/apidocs/src/samples/db-get-default-instance-configuration-response-json.txt
@@ -0,0 +1,5 @@
+HTTP/1.1 200 OK
+Content-Type: application/json
+Content-Length: 1151
+Date: Mon, 18 Mar 2013 19:09:17 GMT
+
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index e38a5834..42f6b827 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -100,48 +100,42 @@ function iniset_conditional {
}
-# tweak_nova() - update the nova hypervisor configuration if possible
-function tweak_nova {
+# configure_nova_kvm() - update the nova hypervisor configuration if possible
+function configure_nova_kvm {
+ cpu="unknown"
+
if [ -e /sys/module/kvm_*/parameters/nested ]; then
reconfigure_nova="F"
if [ -e /sys/module/kvm_intel/parameters/nested ]; then
+ cpu="Intel"
if [[ "$(cat /sys/module/kvm_*/parameters/nested)" == "Y" ]]; then
reconfigure_nova="Y"
- else
- echo_summary "Found Intel with no support for nested KVM."
fi
elif [ -e /sys/module/kvm_amd/parameters/nested ]; then
+ cpu="AMD"
if [[ "$(cat /sys/module/kvm_*/parameters/nested)" == "1" ]]; then
reconfigure_nova="Y"
- else
- echo_summary "Found AMD with no support for nested KVM."
fi
fi
if [ "${reconfigure_nova}" == "Y" ]; then
- echo_summary "Configuring Nova to use KVM."
-
NOVA_CONF_DIR=${NOVA_CONF_DIR:-/etc/nova}
NOVA_CONF=${NOVA_CONF:-${NOVA_CONF_DIR}/nova.conf}
iniset $NOVA_CONF libvirt cpu_mode "none"
iniset $NOVA_CONF libvirt virt_type "kvm"
- else
- virt_type=$(iniget $NOVA_CONF libvirt virt_type)
- echo_summary "Nested hypervisor not supported, using ${virt_type}."
fi
- else
- virt_type=$(iniget $NOVA_CONF libvirt virt_type)
- echo_summary "Unable to configure Nova to use KVM, using ${virt_type}."
- echo "Unable to configure Nova to use KVM, using ${virt_type}."
fi
+
+ virt_type=$(iniget $NOVA_CONF libvirt virt_type)
+ echo "configure_nova_kvm: using virt_type: ${virt_type} for cpu: ${cpu}."
}
# configure_trove() - Set config files, create data dirs, etc
function configure_trove {
setup_develop $TROVE_DIR
- tweak_nova
+ configure_nova_kvm
# Create the trove conf dir and cache dirs if they don't exist
sudo install -d -o $STACK_USER ${TROVE_CONF_DIR} ${TROVE_AUTH_CACHE_DIR}
diff --git a/doc/source/dev/secure_oslo_messaging.rst b/doc/source/dev/secure_oslo_messaging.rst
new file mode 100644
index 00000000..beabd339
--- /dev/null
+++ b/doc/source/dev/secure_oslo_messaging.rst
@@ -0,0 +1,655 @@
+.. _secure_rpc_messaging:
+
+======================
+ Secure RPC messaging
+======================
+
+Background
+----------
+
+Trove uses oslo_messaging.rpc for communication amongst the various
+control plane components and the guest agents. For secure operation of
+the system, these RPC calls can be fully encrypted. A control plane
+encryption key is used for communications between the API service and
+the taskmanager, and system generated per-instance keys are used for
+communication between the control plane and guest instances.
+
+This document provides some useful tips on how to use this mechanism.
+
+The default system behavior
+---------------------------
+
+By default, the system will attempt to encrypt all RPC
+communication. This behavior is controlled by the following
+configuration parameters:
+
+- enable_secure_rpc_messaging
+
+ boolean that determines whether rpc messages will be secured by
+ encryption. The default value is True.
+
+- taskmanager_rpc_encr_key
+
+ the key used for encrypting messages sent to the taskmanager. A
+ default value is provided for this and it is important that
+ deployers change this.
+
+- inst_rpc_key_encr_key
+
+ the key used for encrypting the per-instance keys when they are
+ stored in the trove infrastructure database (catalog). A default is
+ provided for this and it is important that deployers change this.
+
+
+Interoperability and Upgrade
+----------------------------
+
+Consider the system as shown below which runs a version of code prior
+to the introduciton of this oslo_messaging.rpc security. Observe, for
+example that the instances table in the system catalog does not
+include the per-instance encrypted key column.
+
+mysql> describe instances;
++----------------------+--------------+------+-----+---------+-------+
+| Field | Type | Null | Key | Default | Extra |
++----------------------+--------------+------+-----+---------+-------+
+| id | varchar(36) | NO | PRI | NULL | |
+| created | datetime | YES | | NULL | |
+| updated | datetime | YES | | NULL | |
+| name | varchar(255) | YES | | NULL | |
+| hostname | varchar(255) | YES | | NULL | |
+| compute_instance_id | varchar(36) | YES | | NULL | |
+| task_id | int(11) | YES | | NULL | |
+| task_description | varchar(255) | YES | | NULL | |
+| task_start_time | datetime | YES | | NULL | |
+| volume_id | varchar(36) | YES | | NULL | |
+| flavor_id | varchar(255) | YES | | NULL | |
+| volume_size | int(11) | YES | | NULL | |
+| tenant_id | varchar(36) | YES | MUL | NULL | |
+| server_status | varchar(64) | YES | | NULL | |
+| deleted | tinyint(1) | YES | MUL | NULL | |
+| deleted_at | datetime | YES | | NULL | |
+| datastore_version_id | varchar(36) | NO | MUL | NULL | |
+| configuration_id | varchar(36) | YES | MUL | NULL | |
+| slave_of_id | varchar(36) | YES | MUL | NULL | |
+| cluster_id | varchar(36) | YES | MUL | NULL | |
+| shard_id | varchar(36) | YES | | NULL | |
+| type | varchar(64) | YES | | NULL | |
+| region_id | varchar(255) | YES | | NULL | |
++----------------------+--------------+------+-----+---------+-------+
+23 rows in set (0.00 sec)
+
+We launch an instance of MySQL using this version of the software.
+
+amrith@amrith-work:/opt/stack/trove/integration/scripts$ openstack network list
++--------------------------------------+-------------+--------------------------------------+
+| ID | Name | Subnets |
++--------------------------------------+-------------+--------------------------------------+
+[...]
+| 4bab02e7-87bb-4cc0-8c07-2f282c777c85 | public | e620c4f5-749c-4212-b1d1-4a6e2c0a3f16 |
+[...]
++--------------------------------------+-------------+--------------------------------------+
+
+amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove create m2 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85
++-------------------+--------------------------------------+
+| Property | Value |
++-------------------+--------------------------------------+
+| created | 2017-01-09T18:17:13 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| flavor | 25 |
+| id | bb0c9213-31f8-4427-8898-c644254b3642 |
+| name | m2 |
+| region | RegionOne |
+| server_id | None |
+| status | BUILD |
+| updated | 2017-01-09T18:17:13 |
+| volume | 3 |
+| volume_id | None |
++-------------------+--------------------------------------+
+
+amrith@amrith-work:/opt/stack/trove/integration/scripts$ nova list
++--------------------------------------+------+--------+------------+-------------+-------------------+
+| ID | Name | Status | Task State | Power State | Networks |
++--------------------------------------+------+--------+------------+-------------+-------------------+
+| a4769ce2-4e22-4134-b958-6db6c23cb221 | m2 | BUILD | spawning | NOSTATE | public=172.24.4.4 |
++--------------------------------------+------+--------+------------+-------------+-------------------+
+
+And on that machine, the configuration file looks like this:
+
+amrith@m2:~$ cat /etc/trove/conf.d/guest_info.conf
+[DEFAULT]
+guest_id=bb0c9213-31f8-4427-8898-c644254b3642
+datastore_manager=mysql
+tenant_id=56cca8484d3e48869126ada4f355c284
+
+The instance goes online
+
+amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove show m2
++-------------------+--------------------------------------+
+| Property | Value |
++-------------------+--------------------------------------+
+| created | 2017-01-09T18:17:13 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| flavor | 25 |
+| id | bb0c9213-31f8-4427-8898-c644254b3642 |
+| name | m2 |
+| region | RegionOne |
+| server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 |
+| status | ACTIVE |
+| updated | 2017-01-09T18:17:17 |
+| volume | 3 |
+| volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c |
+| volume_used | 0.11 |
++-------------------+--------------------------------------+
+
+For testing later, we launch a few more instances.
+
+amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove create m3 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85
+amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove create m4 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85
+
+amrith@amrith-work:/opt/stack/trove/integration/scripts$ trove list
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+
+In this condition, we take down the control plane and upgrade the
+software running on it. This will result in a catalog upgrade. Since
+this system is based on devstack, here's what that looks like.
+
+amrith@amrith-work:/opt/stack/trove$ git branch
+* master
+ review/amrith/bp/secure-oslo-messaging-messages
+amrith@amrith-work:/opt/stack/trove$ git checkout review/amrith/bp/secure-oslo-messaging-messages
+Switched to branch 'review/amrith/bp/secure-oslo-messaging-messages'
+Your branch is ahead of 'gerrit/master' by 1 commit.
+ (use "git push" to publish your local commits)
+amrith@amrith-work:/opt/stack/trove$ find . -name '*.pyc' -delete
+amrith@amrith-work:/opt/stack/trove$
+
+amrith@amrith-work:/opt/stack/trove$ trove-manage db_sync
+[...]
+2017-01-09 13:24:25.251 DEBUG migrate.versioning.repository [-] Config: OrderedDict([('db_settings', OrderedDict([('__name__', 'db_settings'), ('repository_id', 'Trove Migrations'), ('version_table', 'migrate_version'), ('required_dbs', "['mysql','postgres','sqlite']")]))]) from (pid=96180) __init__ /usr/local/lib/python2.7/dist-packages/migrate/versioning/repository.py:83
+2017-01-09 13:24:25.260 INFO migrate.versioning.api [-] 40 -> 41...
+2017-01-09 13:24:25.328 INFO migrate.versioning.api [-] done
+2017-01-09 13:24:25.329 DEBUG migrate.versioning.util [-] Disposing SQLAlchemy engine Engine(mysql+pymysql://root:***@127.0.0.1/trove?charset=utf8) from (pid=96180) with_engine /usr/local/lib/python2.7/dist-packages/migrate/versioning/util/__init__.py:163
+[...]
+
+We observe that the new table in the system has the encrypted_key column
+
+mysql> describe instances;
++----------------------+--------------+------+-----+---------+-------+
+| Field | Type | Null | Key | Default | Extra |
++----------------------+--------------+------+-----+---------+-------+
+| id | varchar(36) | NO | PRI | NULL | |
+| created | datetime | YES | | NULL | |
+| updated | datetime | YES | | NULL | |
+| name | varchar(255) | YES | | NULL | |
+| hostname | varchar(255) | YES | | NULL | |
+| compute_instance_id | varchar(36) | YES | | NULL | |
+| task_id | int(11) | YES | | NULL | |
+| task_description | varchar(255) | YES | | NULL | |
+| task_start_time | datetime | YES | | NULL | |
+| volume_id | varchar(36) | YES | | NULL | |
+| flavor_id | varchar(255) | YES | | NULL | |
+| volume_size | int(11) | YES | | NULL | |
+| tenant_id | varchar(36) | YES | MUL | NULL | |
+| server_status | varchar(64) | YES | | NULL | |
+| deleted | tinyint(1) | YES | MUL | NULL | |
+| deleted_at | datetime | YES | | NULL | |
+| datastore_version_id | varchar(36) | NO | MUL | NULL | |
+| configuration_id | varchar(36) | YES | MUL | NULL | |
+| slave_of_id | varchar(36) | YES | MUL | NULL | |
+| cluster_id | varchar(36) | YES | MUL | NULL | |
+| shard_id | varchar(36) | YES | | NULL | |
+| type | varchar(64) | YES | | NULL | |
+| region_id | varchar(255) | YES | | NULL | |
+| encrypted_key | varchar(255) | YES | | NULL | |
++----------------------+--------------+------+-----+---------+-------+
+
+
+mysql> select id, encrypted_key from instances;
++--------------------------------------+---------------+
+| id | encrypted_key |
++--------------------------------------+---------------+
+| 13a787f2-b699-4867-a727-b3f4d8040a12 | NULL |
++--------------------------------------+---------------+
+1 row in set (0.00 sec)
+
+amrith@amrith-work:/opt/stack/trove$ sudo python setup.py install -f
+[...]
+
+We can now relaunch the control plane software but before we do that,
+we inspect the configuration parameters and disable secure RPC
+messaging by adding this line into the configuration files.
+
+amrith@amrith-work:/etc/trove$ grep enable_secure_rpc_messaging *.conf
+trove-conductor.conf:enable_secure_rpc_messaging = False
+trove.conf:enable_secure_rpc_messaging = False
+trove-taskmanager.conf:enable_secure_rpc_messaging = False
+
+The first thing we observe is that heartbeat messages from the
+existing instance are still properly handled by the conductor and the
+instance remains active.
+
+2017-01-09 13:26:57.742 DEBUG oslo_messaging._drivers.amqpdriver [-] received message with unique_id: eafe22c08bae485e9346ce0fbdaa4d6c from (pid=96551) __call__ /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:196
+2017-01-09 13:26:57.744 DEBUG trove.conductor.manager [-] Instance ID: bb0c9213-31f8-4427-8898-c644254b3642, Payload: {u'service_status': u'running'} from (pid=96551) heartbeat /opt/stack/trove/trove/conductor/manager.py:88
+2017-01-09 13:26:57.748 DEBUG trove.conductor.manager [-] Instance bb0c9213-31f8-4427-8898-c644254b3642 sent heartbeat at 1483986416.52 from (pid=96551) _message_too_old /opt/stack/trove/trove/conductor/manager.py:54
+2017-01-09 13:26:57.750 DEBUG trove.conductor.manager [-] [Instance bb0c9213-31f8-4427-8898-c644254b3642] Rec'd message is younger than last seen. Updating. from (pid=96551) _message_too_old /opt/stack/trove/trove/conductor/manager.py:76
+2017-01-09 13:27:01.197 DEBUG oslo_messaging._drivers.amqpdriver [-] received message with unique_id: df62b76523004338876bc7b08f8b7711 from (pid=96552) __call__ /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:196
+2017-01-09 13:27:01.200 DEBUG trove.conductor.manager [-] Instance ID: 9ceebd62-e13d-43c5-953a-c0f24f08757e, Payload: {u'service_status': u'running'} from (pid=96552) heartbeat /opt/stack/trove/trove/conductor/manager.py:88
+2017-01-09 13:27:01.219 DEBUG oslo_db.sqlalchemy.engines [-] Parent process 96542 forked (96552) with an open database connection, which is being discarded and recreated. from (pid=96552) checkout /usr/local/lib/python2.7/dist-packages/oslo_db/sqlalchemy/engines.py:362
+2017-01-09 13:27:01.225 DEBUG trove.conductor.manager [-] Instance 9ceebd62-e13d-43c5-953a-c0f24f08757e sent heartbeat at 1483986419.99 from (pid=96552) _message_too_old /opt/stack/trove/trove/conductor/manager.py:54
+2017-01-09 13:27:01.231 DEBUG trove.conductor.manager [-] [Instance 9ceebd62-e13d-43c5-953a-c0f24f08757e] Rec'd message is younger than last seen. Updating. from (pid=96552) _message_too_old /opt/stack/trove/trove/conductor/manager.py:76
+
+amrith@amrith-work:/etc/trove$ trove list
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+
+amrith@amrith-work:/etc/trove$ trove show m2
++-------------------+--------------------------------------+
+| Property | Value |
++-------------------+--------------------------------------+
+| created | 2017-01-09T18:17:13 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| flavor | 25 |
+| id | bb0c9213-31f8-4427-8898-c644254b3642 |
+| name | m2 |
+| region | RegionOne |
+| server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 |
+| status | ACTIVE |
+| updated | 2017-01-09T18:17:17 |
+| volume | 3 |
+| volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c |
+| volume_used | 0.11 |
++-------------------+--------------------------------------+
+
+We now launch a new instance, recall that secure_rpc_messaging is disabled.
+
+amrith@amrith-work:/etc/trove$ trove create m10 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85
++-------------------+--------------------------------------+
+| Property | Value |
++-------------------+--------------------------------------+
+| created | 2017-01-09T18:28:56 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| flavor | 25 |
+| id | 514ef051-0bf7-48a5-adcf-071d4a6625fb |
+| name | m10 |
+| region | RegionOne |
+| server_id | None |
+| status | BUILD |
+| updated | 2017-01-09T18:28:56 |
+| volume | 3 |
+| volume_id | None |
++-------------------+--------------------------------------+
+
+Observe that the task manager does not create a password for the instance.
+
+2017-01-09 13:29:00.111 INFO trove.instance.models [-] Resetting task status to NONE on instance 514ef051-0bf7-48a5-adcf-071d4a6625fb.
+2017-01-09 13:29:00.115 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'514ef051-0bf7-48a5-adcf-071d4a6625fb', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'No tasks for the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 29, 0, 114971), '_sa_instance_state': <sqlalchemy.orm.state.InstanceState object at 0x7f460dbca410>, u'encrypted_key': None, u'deleted': 0, u'configuration_id': None, u'volume_id': u'cee2e17b-80fa-48e5-a488-da8b7809373a', u'slave_of_id': None, u'task_start_time': None, u'name': u'm10', u'task_id': 1, u'created': datetime.datetime(2017, 1, 9, 18, 28, 56), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'2452263e-3d33-48ec-8f24-2851fe74db28', u'flavor_id': u'25'} from (pid=96635) save /opt/stack/trove/trove/db/models.py:64
+
+
+the configuration file for this instance is:
+
+amrith@m10:~$ cat /etc/trove/conf.d/guest_info.conf
+[DEFAULT]
+guest_id=514ef051-0bf7-48a5-adcf-071d4a6625fb
+datastore_manager=mysql
+tenant_id=56cca8484d3e48869126ada4f355c284
+
+We can now shutdown the control plane again and enable the secure RPC
+capability. Observe that we've just commented out the lines (below).
+
+trove-conductor.conf:# enable_secure_rpc_messaging = False
+trove.conf:# enable_secure_rpc_messaging = False
+trove-taskmanager.conf:# enable_secure_rpc_messaging = False
+
+And create another database instance
+
+amrith@amrith-work:/etc/trove$ trove create m20 25 --size 3 --nic net-id=4bab02e7-87bb-4cc0-8c07-2f282c777c85
++-------------------+--------------------------------------+
+| Property | Value |
++-------------------+--------------------------------------+
+| created | 2017-01-09T18:31:48 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| flavor | 25 |
+| id | 792fa220-2a40-4831-85af-cfb0ded8033c |
+| name | m20 |
+| region | RegionOne |
+| server_id | None |
+| status | BUILD |
+| updated | 2017-01-09T18:31:48 |
+| volume | 3 |
+| volume_id | None |
++-------------------+--------------------------------------+
+
+Observe that a unique per-instance encryption key was created for this instance.
+
+2017-01-09 13:31:52.474 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'792fa220-2a40-4831-85af-cfb0ded8033c', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'No tasks for the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 31, 52, 473552), '_sa_instance_state': <sqlalchemy.orm.state.InstanceState object at 0x7fdb14d44550>, u'encrypted_key': u'fVpHrkUIjVsXe7Fj7Lm4u2xnJUsWX2rMC9GL0AppILJINBZxLvkowY8FOa+asKS+8pWb4iNyukQQ4AQoLEUHUQ==', u'deleted': 0, u'configuration_id': None, u'volume_id': u'4cd563dc-fe08-477b-828f-120facf4351b', u'slave_of_id': None, u'task_start_time': None, u'name': u'm20', u'task_id': 1, u'created': datetime.datetime(2017, 1, 9, 18, 31, 49), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'1e62a192-83d3-43fd-b32e-b5ee2fa4e24b', u'flavor_id': u'25'} from (pid=97562) save /opt/stack/trove/trove/db/models.py:64
+
+And the configuration file on that instance includes an encryption key.
+
+amrith@m20:~$ cat /etc/trove/conf.d/guest_info.conf
+[DEFAULT]
+guest_id=792fa220-2a40-4831-85af-cfb0ded8033c
+datastore_manager=mysql
+tenant_id=56cca8484d3e48869126ada4f355c284
+instance_rpc_encr_key=eRz43LwE6eaxIbBlA2pNukzPjSdcQkVi
+
+amrith@amrith-work:/etc/trove$ trove list
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+
+At this point communication between API service and Task Manager, and
+between the control plane and instance m20 is encrypted but
+communication between control plane and all other instances is not
+encrypted.
+
+In this condition we can attempt some operations on the various
+instances. First with the legacy instances created on software that
+predated the secure RPC mechanism.
+
+amrith@amrith-work:/etc/trove$ trove database-list m2
++------+
+| Name |
++------+
++------+
+amrith@amrith-work:/etc/trove$ trove database-create m2 foo2
+amrith@amrith-work:/etc/trove$ trove database-list m2
++------+
+| Name |
++------+
+| foo2 |
++------+
+
+And at the same time with the instance m10 which is created with the
+current software but without RPC encryption.
+
+amrith@amrith-work:/etc/trove$ trove database-list m10
++------+
+| Name |
++------+
++------+
+amrith@amrith-work:/etc/trove$ trove database-create m10 foo10
+amrith@amrith-work:/etc/trove$ trove database-list m10
++-------+
+| Name |
++-------+
+| foo10 |
++-------+
+amrith@amrith-work:/etc/trove$
+
+And finally with an instance that uses encrypted RPC communications.
+
+amrith@amrith-work:/etc/trove$ trove database-list m20
++------+
+| Name |
++------+
++------+
+amrith@amrith-work:/etc/trove$ trove database-create m20 foo20
+amrith@amrith-work:/etc/trove$ trove database-list m20
++-------+
+| Name |
++-------+
+| foo20 |
++-------+
+
+Finally, we can upgrade an instance that has no encryption to have rpc
+encryption.
+
+amrith@amrith-work:/etc/trove$ trove datastore-list
++--------------------------------------+------------------+
+| ID | Name |
++--------------------------------------+------------------+
+| 8e052edb-5f14-4aec-9149-0a80a30cf5e4 | mysql |
++--------------------------------------+------------------+
+amrith@amrith-work:/etc/trove$ trove datastore-version-list mysql
++--------------------------------------+------------------+
+| ID | Name |
++--------------------------------------+------------------+
+| 4a881cb5-9e48-4cb2-a209-4283ed44eb01 | 5.6 |
++--------------------------------------+------------------+
+
+Let's look at instance m2.
+
+mysql> select id, name, encrypted_key from instances where id = 'bb0c9213-31f8-4427-8898-c644254b3642';
++--------------------------------------+------+---------------+
+| id | name | encrypted_key |
++--------------------------------------+------+---------------+
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | NULL |
++--------------------------------------+------+---------------+
+1 row in set (0.00 sec)
+
+amrith@amrith-work:/etc/trove$ trove upgrade m2 4a881cb5-9e48-4cb2-a209-4283ed44eb01
+
+amrith@amrith-work:/etc/trove$ trove list
++--------------------------------------+------+-----------+-------------------+---------+-----------+------+-----------+
+| ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region |
++--------------------------------------+------+-----------+-------------------+---------+-----------+------+-----------+
+| 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 9ceebd62-e13d-43c5-953a-c0f24f08757e | m3 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | UPGRADE | 25 | 3 | RegionOne |
++--------------------------------------+------+-----------+-------------------+---------+-----------+------+-----------+
+
+amrith@amrith-work:/etc/trove$ nova list
++--------------------------------------+------+---------+------------+-------------+--------------------+
+| ID | Name | Status | Task State | Power State | Networks |
++--------------------------------------+------+---------+------------+-------------+--------------------+
+[...]
+| a4769ce2-4e22-4134-b958-6db6c23cb221 | m2 | REBUILD | rebuilding | Running | public=172.24.4.4 |
+[...]
++--------------------------------------+------+---------+------------+-------------+--------------------+
+
+
+2017-01-09 13:47:24.337 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'bb0c9213-31f8-4427-8898-c644254b3642', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'Upgrading the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 47, 24, 337400), '_sa_instance_state': <sqlalchemy.orm.state.InstanceState object at 0x7fdb14d44150>, u'encrypted_key': u'gMrlHkEVxKgEFMTabzZr2TLJ6r5+wgfJfhohs7K/BzutWxs1wXfBswyV5Bgw4qeD212msmgSdOUCFov5otgzyg==', u'deleted': 0, u'configuration_id': None, u'volume_id': u'16e57e3f-b462-4db2-968b-3c284aa2751c', u'slave_of_id': None, u'task_start_time': None, u'name': u'm2', u'task_id': 89, u'created': datetime.datetime(2017, 1, 9, 18, 17, 13), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'a4769ce2-4e22-4134-b958-6db6c23cb221', u'flavor_id': u'25'} from (pid=97562) save /opt/stack/trove/trove/db/models.py:64
+2017-01-09 13:47:24.347 DEBUG trove.taskmanager.models [-] Generated unique RPC encryption key for instance = bb0c9213-31f8-4427-8898-c644254b3642, key = gMrlHkEVxKgEFMTabzZr2TLJ6r5+wgfJfhohs7K/BzutWxs1wXfBswyV5Bgw4qeD212msmgSdOUCFov5otgzyg== from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1440
+2017-01-09 13:47:24.350 DEBUG trove.taskmanager.models [-] Rebuilding instance m2(bb0c9213-31f8-4427-8898-c644254b3642) with image ea05cba7-2f70-4745-abea-136d7bcc16c7. from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1445
+
+The instance now has an encryption key in its configuration
+
+amrith@m2:~$ cat /etc/trove/conf.d/guest_info.conf
+[DEFAULT]
+guest_id=bb0c9213-31f8-4427-8898-c644254b3642
+datastore_manager=mysql
+tenant_id=56cca8484d3e48869126ada4f355c284
+instance_rpc_encr_key=pN2hHEl171ngyD0mPvyV1xKJF2im01Gv
+
+amrith@amrith-work:/etc/trove$ trove list
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+[...]
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+[...]
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+
+amrith@amrith-work:/etc/trove$ trove show m2
++-------------------+--------------------------------------+
+| Property | Value |
++-------------------+--------------------------------------+
+| created | 2017-01-09T18:17:13 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| flavor | 25 |
+| id | bb0c9213-31f8-4427-8898-c644254b3642 |
+| name | m2 |
+| region | RegionOne |
+| server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 |
+| status | ACTIVE |
+| updated | 2017-01-09T18:50:07 |
+| volume | 3 |
+| volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c |
+| volume_used | 0.13 |
++-------------------+--------------------------------------+
+
+amrith@amrith-work:/etc/trove$ trove database-list m2
++------+
+| Name |
++------+
+| foo2 |
++------+
+
+We can similarly upgrade m4.
+
+2017-01-09 13:51:43.078 DEBUG trove.instance.models [-] Instance 6d55ab3a-267f-4b95-8ada-33fc98fd1767 service status is running. from (pid=97562) load_instance /opt/stack/trove/trove/instance/models.py:534
+2017-01-09 13:51:43.083 DEBUG trove.taskmanager.models [-] Upgrading instance m4(6d55ab3a-267f-4b95-8ada-33fc98fd1767) to new datastore version 5.6(4a881cb5-9e48-4cb2-a209-4283ed44eb01) from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1410
+2017-01-09 13:51:43.087 DEBUG trove.guestagent.api [-] Sending the call to prepare the guest for upgrade. from (pid=97562) pre_upgrade /opt/stack/trove/trove/guestagent/api.py:351
+2017-01-09 13:51:43.087 DEBUG trove.guestagent.api [-] Calling pre_upgrade with timeout 600 from (pid=97562) _call /opt/stack/trove/trove/guestagent/api.py:86
+2017-01-09 13:51:43.088 DEBUG oslo_messaging._drivers.amqpdriver [-] CALL msg_id: 41dbb7fff3dc4f8fa69d8b5f219809e0 exchange 'trove' topic 'guestagent.6d55ab3a-267f-4b95-8ada-33fc98fd1767' from (pid=97562) _send /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:442
+2017-01-09 13:51:45.452 DEBUG oslo_messaging._drivers.amqpdriver [-] received reply msg_id: 41dbb7fff3dc4f8fa69d8b5f219809e0 from (pid=97562) __call__ /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:299
+2017-01-09 13:51:45.452 DEBUG trove.guestagent.api [-] Result is {u'mount_point': u'/var/lib/mysql', u'save_etc_dir': u'/var/lib/mysql/etc', u'home_save': u'/var/lib/mysql/trove_user', u'save_dir': u'/var/lib/mysql/etc_mysql'}. from (pid=97562) _call /opt/stack/trove/trove/guestagent/api.py:91
+2017-01-09 13:51:45.544 DEBUG trove.db.models [-] Saving DBInstance: {u'region_id': u'RegionOne', u'cluster_id': None, u'shard_id': None, u'deleted_at': None, u'id': u'6d55ab3a-267f-4b95-8ada-33fc98fd1767', u'datastore_version_id': u'4a881cb5-9e48-4cb2-a209-4283ed44eb01', 'errors': {}, u'hostname': None, u'server_status': None, u'task_description': u'Upgrading the instance.', u'volume_size': 3, u'type': None, u'updated': datetime.datetime(2017, 1, 9, 18, 51, 45, 544496), '_sa_instance_state': <sqlalchemy.orm.state.InstanceState object at 0x7fdb14972c10>, u'encrypted_key': u'0gBkJl5Aqb4kFIPeJDMTNIymEUuUUB8NBksecTiYyQl+Ibrfi7ME8Bi58q2n61AxbG2coOqp97ETjHRyN7mYTg==', u'deleted': 0, u'configuration_id': None, u'volume_id': u'b7dc17b5-d0a8-47bb-aef4-ef9432c269e9', u'slave_of_id': None, u'task_start_time': None, u'name': u'm4', u'task_id': 89, u'created': datetime.datetime(2017, 1, 9, 18, 20, 58), u'tenant_id': u'56cca8484d3e48869126ada4f355c284', u'compute_instance_id': u'f43bba63-3be6-4993-b2d0-4ddfb7818d27', u'flavor_id': u'25'} from (pid=97562) save /opt/stack/trove/trove/db/models.py:64
+2017-01-09 13:51:45.557 DEBUG trove.taskmanager.models [-] Generated unique RPC encryption key for instance = 6d55ab3a-267f-4b95-8ada-33fc98fd1767, key = 0gBkJl5Aqb4kFIPeJDMTNIymEUuUUB8NBksecTiYyQl+Ibrfi7ME8Bi58q2n61AxbG2coOqp97ETjHRyN7mYTg== from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1440
+2017-01-09 13:51:45.560 DEBUG trove.taskmanager.models [-] Rebuilding instance m4(6d55ab3a-267f-4b95-8ada-33fc98fd1767) with image ea05cba7-2f70-4745-abea-136d7bcc16c7. from (pid=97562) upgrade /opt/stack/trove/trove/taskmanager/models.py:1445
+
+amrith@amrith-work:/etc/trove$ nova list
++--------------------------------------+------+---------+------------+-------------+--------------------+
+| ID | Name | Status | Task State | Power State | Networks |
++--------------------------------------+------+---------+------------+-------------+--------------------+
+[...]
+| f43bba63-3be6-4993-b2d0-4ddfb7818d27 | m4 | REBUILD | rebuilding | Running | public=172.24.4.11 |
+[...]
++--------------------------------------+------+---------+------------+-------------+--------------------+
+
+2017-01-09 13:53:26.581 DEBUG trove.guestagent.api [-] Recover the guest after upgrading the guest's image. from (pid=97562) post_upgrade /opt/stack/trove/trove/guestagent/api.py:359
+2017-01-09 13:53:26.581 DEBUG trove.guestagent.api [-] Recycling the client ... from (pid=97562) post_upgrade /opt/stack/trove/trove/guestagent/api.py:361
+2017-01-09 13:53:26.581 DEBUG trove.guestagent.api [-] Calling post_upgrade with timeout 600 from (pid=97562) _call /opt/stack/trove/trove/guestagent/api.py:86
+2017-01-09 13:53:26.583 DEBUG oslo_messaging._drivers.amqpdriver [-] CALL msg_id: 2e9ccc88715b4b98848a017e19b2938d exchange 'trove' topic 'guestagent.6d55ab3a-267f-4b95-8ada-33fc98fd1767' from (pid=97562) _send /usr/local/lib/python2.7/dist-packages/oslo_messaging/_drivers/amqpdriver.py:442
+
+mysql> select id, name, encrypted_key from instances where name in ('m2', 'm4', 'm10', 'm20');
++--------------------------------------+------+------------------------------------------------------------------------------------------+
+| id | name | encrypted_key |
++--------------------------------------+------+------------------------------------------------------------------------------------------+
+| 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | NULL |
+| 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | 0gBkJl5Aqb4kFIPeJDMTNIymEUuUUB8NBksecTiYyQl+Ibrfi7ME8Bi58q2n61AxbG2coOqp97ETjHRyN7mYTg== |
+| 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | fVpHrkUIjVsXe7Fj7Lm4u2xnJUsWX2rMC9GL0AppILJINBZxLvkowY8FOa+asKS+8pWb4iNyukQQ4AQoLEUHUQ== |
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | gMrlHkEVxKgEFMTabzZr2TLJ6r5+wgfJfhohs7K/BzutWxs1wXfBswyV5Bgw4qeD212msmgSdOUCFov5otgzyg== |
++--------------------------------------+------+------------------------------------------------------------------------------------------+
+
+amrith@amrith-work:/etc/trove$ trove list
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| ID | Name | Datastore | Datastore Version | Status | Flavor ID | Size | Region |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+| 514ef051-0bf7-48a5-adcf-071d4a6625fb | m10 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 6d55ab3a-267f-4b95-8ada-33fc98fd1767 | m4 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| 792fa220-2a40-4831-85af-cfb0ded8033c | m20 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
+| bb0c9213-31f8-4427-8898-c644254b3642 | m2 | mysql | 5.6 | ACTIVE | 25 | 3 | RegionOne |
++--------------------------------------+------+-----------+-------------------+--------+-----------+------+-----------+
+
+Inspecting which instances are using secure RPC communications
+--------------------------------------------------------------
+
+An additional field is returned in the trove show command output to
+indicate whether any given instance is using secure RPC communication
+or not.
+
+NOTE: This field is only returned if the user is an 'admin'. Non admin
+users do not see the field.
+
+amrith@amrith-work:/opt/stack/trove$ trove show m20
++-------------------------+--------------------------------------+
+| Property | Value |
++-------------------------+--------------------------------------+
+| created | 2017-01-09T18:31:49 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| encrypted_rpc_messaging | True |
+| flavor | 25 |
+| id | 792fa220-2a40-4831-85af-cfb0ded8033c |
+| name | m20 |
+| region | RegionOne |
+| server_id | 1e62a192-83d3-43fd-b32e-b5ee2fa4e24b |
+| status | ACTIVE |
+| updated | 2017-01-09T18:31:52 |
+| volume | 3 |
+| volume_id | 4cd563dc-fe08-477b-828f-120facf4351b |
+| volume_used | 0.11 |
++-------------------------+--------------------------------------+
+amrith@amrith-work:/opt/stack/trove$ trove show m10
++-------------------------+--------------------------------------+
+| Property | Value |
++-------------------------+--------------------------------------+
+| created | 2017-01-09T18:28:56 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| encrypted_rpc_messaging | False |
+| flavor | 25 |
+| id | 514ef051-0bf7-48a5-adcf-071d4a6625fb |
+| name | m10 |
+| region | RegionOne |
+| server_id | 2452263e-3d33-48ec-8f24-2851fe74db28 |
+| status | ACTIVE |
+| updated | 2017-01-09T18:29:00 |
+| volume | 3 |
+| volume_id | cee2e17b-80fa-48e5-a488-da8b7809373a |
+| volume_used | 0.11 |
++-------------------------+--------------------------------------+
+amrith@amrith-work:/opt/stack/trove$ trove show m2
++-------------------------+--------------------------------------+
+| Property | Value |
++-------------------------+--------------------------------------+
+| created | 2017-01-09T18:17:13 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| encrypted_rpc_messaging | True |
+| flavor | 25 |
+| id | bb0c9213-31f8-4427-8898-c644254b3642 |
+| name | m2 |
+| region | RegionOne |
+| server_id | a4769ce2-4e22-4134-b958-6db6c23cb221 |
+| status | ACTIVE |
+| updated | 2017-01-09T18:50:07 |
+| volume | 3 |
+| volume_id | 16e57e3f-b462-4db2-968b-3c284aa2751c |
+| volume_used | 0.13 |
++-------------------------+--------------------------------------+
+amrith@amrith-work:/opt/stack/trove$ trove show m4
++-------------------------+--------------------------------------+
+| Property | Value |
++-------------------------+--------------------------------------+
+| created | 2017-01-09T18:20:58 |
+| datastore | mysql |
+| datastore_version | 5.6 |
+| encrypted_rpc_messaging | True |
+| flavor | 25 |
+| id | 6d55ab3a-267f-4b95-8ada-33fc98fd1767 |
+| name | m4 |
+| region | RegionOne |
+| server_id | f43bba63-3be6-4993-b2d0-4ddfb7818d27 |
+| status | ACTIVE |
+| updated | 2017-01-09T18:54:30 |
+| volume | 3 |
+| volume_id | b7dc17b5-d0a8-47bb-aef4-ef9432c269e9 |
+| volume_used | 0.13 |
++-------------------------+--------------------------------------+
+amrith@amrith-work:/opt/stack/trove$
+
+In the API response, note that the additional key
+"encrypted_rpc_messaging" has been added (as below).
+
+NOTE: This field is only returned if the user is an 'admin'. Non admin
+users do not see the field.
+
+RESP BODY: {"instance": {"status": "ACTIVE", "updated": "2017-01-09T18:29:00", "name": "m10", "links": [{"href": "https://192.168.126.130:8779/v1.0/56cca8484d3e48869126ada4f355c284/instances/514ef051-0bf7-48a5-adcf-071d4a6625fb", "rel": "self"}, {"href": "https://192.168.126.130:8779/instances/514ef051-0bf7-48a5-adcf-071d4a6625fb", "rel": "bookmark"}], "created": "2017-01-09T18:28:56", "region": "RegionOne", "server_id": "2452263e-3d33-48ec-8f24-2851fe74db28", "id": "514ef051-0bf7-48a5-adcf-071d4a6625fb", "volume": {"used": 0.11, "size": 3}, "volume_id": "cee2e17b-80fa-48e5-a488-da8b7809373a", "flavor": {"id": "25", "links": [{"href": "https://192.168.126.130:8779/v1.0/56cca8484d3e48869126ada4f355c284/flavors/25", "rel": "self"}, {"href": "https://192.168.126.130:8779/flavors/25", "rel": "bookmark"}]}, "datastore": {"version": "5.6", "type": "mysql"}, "encrypted_rpc_messaging": false}}
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f803a376..ed1511d1 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -51,6 +51,7 @@ functionality, the following resources are provided.
dev/guest_cloud_init.rst
dev/notifier.rst
dev/trove_api_extensions.rst
+ dev/secure_oslo_messaging.rst
* Source Code Repositories
diff --git a/integration/scripts/conf/mysql.conf b/integration/scripts/conf/mysql.conf
index eac3c579..0e29d114 100644
--- a/integration/scripts/conf/mysql.conf
+++ b/integration/scripts/conf/mysql.conf
@@ -1,6 +1,6 @@
- "instance_flavor_name": "test.small-3",
- "instance_bigger_flavor_name": "test.small-3.resize",
- "instance_eph_flavor_name": "test.eph.small-3",
- "instance_bigger_eph_flavor_name": "test.eph.small-3.resize",
+ "instance_flavor_name": "test.small-4",
+ "instance_bigger_flavor_name": "test.small-4.resize",
+ "instance_eph_flavor_name": "test.eph.small-4",
+ "instance_bigger_eph_flavor_name": "test.eph.small-4.resize",
"trove_volume_support": true,
"trove_volume_size": 1,
diff --git a/integration/scripts/conf/percona.conf b/integration/scripts/conf/percona.conf
index eac3c579..0e29d114 100644
--- a/integration/scripts/conf/percona.conf
+++ b/integration/scripts/conf/percona.conf
@@ -1,6 +1,6 @@
- "instance_flavor_name": "test.small-3",
- "instance_bigger_flavor_name": "test.small-3.resize",
- "instance_eph_flavor_name": "test.eph.small-3",
- "instance_bigger_eph_flavor_name": "test.eph.small-3.resize",
+ "instance_flavor_name": "test.small-4",
+ "instance_bigger_flavor_name": "test.small-4.resize",
+ "instance_eph_flavor_name": "test.eph.small-4",
+ "instance_bigger_eph_flavor_name": "test.eph.small-4.resize",
"trove_volume_support": true,
"trove_volume_size": 1,
diff --git a/integration/scripts/conf/pxc.conf b/integration/scripts/conf/pxc.conf
index eac3c579..0e29d114 100644
--- a/integration/scripts/conf/pxc.conf
+++ b/integration/scripts/conf/pxc.conf
@@ -1,6 +1,6 @@
- "instance_flavor_name": "test.small-3",
- "instance_bigger_flavor_name": "test.small-3.resize",
- "instance_eph_flavor_name": "test.eph.small-3",
- "instance_bigger_eph_flavor_name": "test.eph.small-3.resize",
+ "instance_flavor_name": "test.small-4",
+ "instance_bigger_flavor_name": "test.small-4.resize",
+ "instance_eph_flavor_name": "test.eph.small-4",
+ "instance_bigger_eph_flavor_name": "test.eph.small-4.resize",
"trove_volume_support": true,
"trove_volume_size": 1,
diff --git a/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra b/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra
index aa9e24bf..54e34d68 100755
--- a/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra
+++ b/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra
@@ -4,16 +4,16 @@ set -ex
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get install -qy curl
+apt-get --allow-unauthenticated install -qy curl
echo "deb http://debian.datastax.com/community stable main" >> /etc/apt/sources.list.d/cassandra.sources.list
curl -L http://debian.datastax.com/debian/repo_key | apt-key add -
apt-get update
-apt-get install -qy openjdk-7-jdk expect python-dev
-apt-get install -qy libxml2-dev ntp mc
-apt-get install -qy libxslt1-dev python-pexpect
-apt-get install -qy python-migrate build-essential
+apt-get --allow-unauthenticated install -qy openjdk-7-jdk expect python-dev
+apt-get --allow-unauthenticated install -qy libxml2-dev ntp mc
+apt-get --allow-unauthenticated install -qy libxslt1-dev python-pexpect
+apt-get --allow-unauthenticated install -qy python-migrate build-essential
-apt-get install dsc21=2.1.* cassandra=2.1.* -qy
+apt-get --allow-unauthenticated install dsc21=2.1.* cassandra=2.1.* -qy
# The Python Driver 2.0 for Apache Cassandra.
pip2 install cassandra-driver
diff --git a/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase b/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase
index 0dc67d8b..1303fdfd 100755
--- a/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase
+++ b/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase
@@ -2,7 +2,7 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get install -qy curl
-apt-get install -qy libssl0.9.8
+apt-get --allow-unauthenticated install -qy curl
+apt-get --allow-unauthenticated install -qy libssl0.9.8
curl -O http://packages.couchbase.com/releases/2.2.0/couchbase-server-community_2.2.0_x86_64.deb
INSTALL_DONT_START_SERVER=1 dpkg -i couchbase-server-community_2.2.0_x86_64.deb
diff --git a/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb b/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb
index 77871d3f..b53f7faa 100755
--- a/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb
+++ b/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb
@@ -6,14 +6,14 @@ set -o xtrace
export DEBIAN_FRONTEND=noninteractive
# install the ppa-finding tool for ubuntu 12.0.4 release
-apt-get install -y python-software-properties
+apt-get --allow-unauthenticated install -y python-software-properties
add-apt-repository -y ppa:couchdb/stable
# update cached list of packages
apt-get update -y
# remove any existing couchdb binaries
apt-get remove -yf couchdb couchdb-bin couchdb-common
# install couchdb
-apt-get install -yV couchdb
+apt-get --allow-unauthenticated install -yV couchdb
# install curl to provide a way to interact with CouchDB
# over HTTP REST API
-apt-get install -qy curl
+apt-get --allow-unauthenticated install -qy curl
diff --git a/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2 b/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2
index 63bc3bf7..4495e95a 100755
--- a/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2
+++ b/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2
@@ -19,8 +19,8 @@ echo "127.0.0.1 ${host_name}" >> /etc/hosts
tar -xvzf /tmp/in_target.d/db2.tar.gz
# installing dependencies
-apt-get install libaio1
-apt-get install libstdc++6
+apt-get --allow-unauthenticated install libaio1
+apt-get --allow-unauthenticated install libstdc++6
# start the installation process. Accepts the default installation directory '/opt/ibm/db2/V10.5'
${DB2_PKG_LOCATION}/expc/db2_install -b /opt/ibm/db2/V10.5 -f sysreq -l ${DB2_PKG_LOCATION}/db2_install.log
diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps b/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps
index 5cd392b2..3a8cacfb 100755
--- a/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps
+++ b/integration/scripts/files/elements/ubuntu-guest/install.d/05-base-apps
@@ -7,4 +7,4 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install ntp apparmor-utils
+apt-get --allow-unauthenticated -y install ntp apparmor-utils
diff --git a/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep b/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep
index 697d3610..c79ad877 100755
--- a/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep
+++ b/integration/scripts/files/elements/ubuntu-guest/install.d/15-trove-dep
@@ -7,7 +7,7 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install python-dev libxml2-dev libxslt1-dev python-setuptools \
+apt-get --allow-unauthenticated -y install python-dev libxml2-dev libxslt1-dev python-setuptools \
python-pip python-sqlalchemy python-lxml \
python-routes python-eventlet python-webob \
python-pastedeploy python-paste python-netaddr \
diff --git a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools b/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools
index 1a8647f5..8360ddfc 100755
--- a/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools
+++ b/integration/scripts/files/elements/ubuntu-guest/pre-install.d/04-baseline-tools
@@ -4,4 +4,4 @@
set -e
set -o xtrace
-apt-get install -y language-pack-en python-software-properties \ No newline at end of file
+apt-get --allow-unauthenticated install -y language-pack-en python-software-properties \ No newline at end of file
diff --git a/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb b/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb
index 6a95d2f0..4ab5c349 100755
--- a/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb
+++ b/integration/scripts/files/elements/ubuntu-mongodb/install.d/20-mongodb
@@ -5,4 +5,4 @@ set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install mongodb-org=3.2.6
+apt-get --allow-unauthenticated -y install mongodb-org=3.2.6
diff --git a/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key b/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
index 1538d61c..1345f508 100755
--- a/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
+++ b/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
@@ -5,7 +5,7 @@ set -o xtrace
[ -n "${RELEASE}" ] || die "RELEASE must be set to either Precise or Quantal"
-apt-get -y install software-properties-common
+apt-get --allow-unauthenticated -y install software-properties-common
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
diff --git a/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql
index 5d5b4265..d5a8ac5b 100755
--- a/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql
@@ -14,4 +14,4 @@ export DEBIAN_FRONTEND=noninteractive
if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then
PXB_VERSION_OVERRIDE="-22"
fi
-apt-get -y install percona-toolkit percona-server-common-5.6 percona-server-server-5.6 percona-server-test-5.6 percona-server-client-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
+apt-get --allow-unauthenticated -y install percona-toolkit percona-server-common-5.6 percona-server-server-5.6 percona-server-test-5.6 percona-server-client-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
diff --git a/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql b/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql
index f3a107a4..d5b5f505 100755
--- a/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql
+++ b/integration/scripts/files/elements/ubuntu-postgresql/install.d/30-postgresql
@@ -31,7 +31,7 @@ exit \$?
_EOF_
-apt-get -y install postgresql-9.4 postgresql-contrib-9.4 postgresql-server-dev-9.4
+apt-get --allow-unauthenticated -y install postgresql-9.4 postgresql-contrib-9.4 postgresql-server-dev-9.4
###########################################
# Hack alert:
@@ -50,19 +50,19 @@ git clone https://github.com/vmware/pg_rewind.git --branch REL9_4_STABLE
dev_pkgs="libreadline-dev libkrb5-dev libssl-dev libpam-dev libxml2-dev libxslt-dev libedit-dev libselinux1-dev bison flex"
-apt-get install $dev_pkgs -y
+apt-get --allow-unauthenticated install $dev_pkgs -y
# Unfortunately, on ubuntu, was not able to get pg_rewind to build
# outside of the pgsql source tree. Configure and compile postgres
# but only call make install against the contrib/pg_rewind directory
# so that support library is accessible to the server
cd $tmpdir/postgres
-./configure
+./configure
make
cd contrib/pg_rewind
make install
-# Make the pg_rewind binary and the library used by the
+# Make the pg_rewind binary and the library used by the
# pg_rewind stored procedures accessible
ln -s /usr/local/pgsql/bin/pg_rewind /usr/bin/pg_rewind
ln -s /usr/local/pgsql/lib/pg_rewind_support.so /usr/lib/postgresql/9.4/lib/pg_rewind_support.so
@@ -75,5 +75,5 @@ apt-get remove -y $dev_pkgs
################################
# Install the native Python client.
-apt-get -y install libpq-dev
+apt-get --allow-unauthenticated -y install libpq-dev
pip2 install psycopg2
diff --git a/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql
index ae658957..d9f2f427 100755
--- a/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql
@@ -7,7 +7,7 @@ set -e
set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get -y install percona-xtradb-cluster-server-5.6 percona-xtradb-cluster-client-5.6 percona-xtrabackup
+apt-get --allow-unauthenticated -y install percona-xtradb-cluster-server-5.6 percona-xtradb-cluster-client-5.6 percona-xtrabackup
# Don't auto start mysql (we'll start it up in guest)
update-rc.d mysql defaults
diff --git a/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis b/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis
index 6930f519..84b091f1 100755
--- a/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis
+++ b/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis
@@ -34,7 +34,7 @@ _EOF_
add-apt-repository -y ppa:chris-lea/redis-server
apt-get -y update
-apt-get install -y redis-server
+apt-get --allow-unauthenticated install -y redis-server
cat > "/etc/default/redis-server" << _EOF_
# Call ulimit -n with this argument prior to invoking Redis itself.
diff --git a/integration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb b/integration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb
index 065c2f98..eba83c70 100755
--- a/integration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb
+++ b/integration/scripts/files/elements/ubuntu-trusty-mariadb/install.d/30-mariadb
@@ -10,7 +10,7 @@ export DEBIAN_FRONTEND=noninteractive
# NOTE(vkmc): Using MariaDB repositories is required
# https://mariadb.com/kb/en/mariadb/installing-mariadb-deb-files/
-apt-get -y install software-properties-common
+apt-get --allow-unauthenticated -y install software-properties-common
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xcbcb082a1bb943db
add-apt-repository 'deb http://ftp.osuosl.org/pub/mariadb/repo/10.1/ubuntu trusty main'
@@ -25,8 +25,8 @@ apt-get -y update
if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then
PXB_VERSION_OVERRIDE="-22"
fi
-apt-get -y install socat percona-xtrabackup${PXB_VERSION_OVERRIDE}
-apt-get -y install libmariadbclient18 mariadb-server
+apt-get --allow-unauthenticated -y install socat percona-xtrabackup${PXB_VERSION_OVERRIDE}
+apt-get --allow-unauthenticated -y install libmariadbclient18 mariadb-server
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
[mysqld]
diff --git a/integration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql
index d31292ec..887bda2f 100755
--- a/integration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-trusty-mysql/install.d/30-mysql
@@ -15,7 +15,7 @@ export DEBIAN_FRONTEND=noninteractive
if [[ $BRANCH_OVERRIDE == "stable/kilo" || $BRANCH_OVERRIDE == "stable/liberty" ]]; then
PXB_VERSION_OVERRIDE="-22"
fi
-apt-get -y install libmysqlclient18 mysql-server-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
+apt-get --allow-unauthenticated -y install libmysqlclient18 mysql-server-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
[mysqld]
diff --git a/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica b/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica
index 2d50bc6a..bd2b72ac 100755
--- a/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica
+++ b/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica
@@ -13,11 +13,11 @@ export DEBIAN_FRONTEND=noninteractive
dd if=/tmp/in_target.d/vertica.deb of=/vertica.deb
# Install base packages
-apt-get install -qy build-essential bc iptables
-apt-get install -qy curl sysstat pstack mcelog
-apt-get install -qy python-dev g++ unixODBC unixODBC-dev dialog
-apt-get install -qy dialog libbz2-dev libboost-all-dev libcurl4-gnutls-dev
-apt-get install -qy openjdk-7-jdk
+apt-get --allow-unauthenticated install -qy build-essential bc iptables
+apt-get --allow-unauthenticated install -qy curl sysstat pstack mcelog
+apt-get --allow-unauthenticated install -qy python-dev g++ unixODBC unixODBC-dev dialog
+apt-get --allow-unauthenticated install -qy dialog libbz2-dev libboost-all-dev libcurl4-gnutls-dev
+apt-get --allow-unauthenticated install -qy openjdk-7-jdk
# Install Vertica package
dpkg -i /vertica.deb
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
index 9f0133f4..75ccdc66 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
@@ -11,7 +11,7 @@ export DEBIAN_FRONTEND=noninteractive
add-apt-repository 'deb http://archive.ubuntu.com/ubuntu trusty universe'
apt-get -y update
-apt-get -y install mysql-client-5.6 mysql-server-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
+apt-get --allow-unauthenticated -y install mysql-client-5.6 mysql-server-5.6 percona-xtrabackup${PXB_VERSION_OVERRIDE}
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
[mysqld]
diff --git a/integration/scripts/trovestack b/integration/scripts/trovestack
index a25894f3..c9e43b80 100755
--- a/integration/scripts/trovestack
+++ b/integration/scripts/trovestack
@@ -672,7 +672,7 @@ function install_test_packages() {
sudo -H $HTTP_PROXY curl http://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add -
echo "deb http://packages.couchbase.com/ubuntu trusty trusty/main" | sudo tee /etc/apt/sources.list.d/couchbase-csdk.list
sudo -H $HTTP_PROXY apt-get update
- sudo -H $HTTP_PROXY apt-get -y install libcouchbase-dev
+ sudo -H $HTTP_PROXY apt-get --allow-unauthenticated -y install libcouchbase-dev
sudo -H $HTTP_PROXY pip install --upgrade couchbase
fi
fi
@@ -1084,12 +1084,12 @@ function cmd_example_tests() {
function mysql_nova() {
echo mysql nova --execute "$@"
- mysql -u root -p$MYSQL_PASSWORD nova --execute "$@"
+ mysql -u root -p$MYSQL_PASSWORD nova --execute "$@" 2> /dev/null
}
function mysql_trove() {
echo mysql trove --execute "$@"
- mysql -u root -p$MYSQL_PASSWORD trove --execute "$@"
+ mysql -u root -p$MYSQL_PASSWORD trove --execute "$@" 2> /dev/null
}
function cmd_wipe_logs() {
@@ -1230,6 +1230,12 @@ function cmd_clean() {
exit 1
fi
source "${PATH_DEVSTACK_SRC}"/accrc/${project_name}/admin
+ local cloud_arg=$CLOUD_ADMIN_ARG
+ if [[ $project_name == *"alt"* ]]; then
+ cloud_arg="--os-cloud=devstack-alt-admin"
+ elif [[ $project_name == "demo" ]]; then
+ cloud_arg="--os-cloud=devstack"
+ fi
# delete any trove clusters
exec_cmd_on_output "trove cluster-list" "trove cluster-delete" 20
# delete any trove instances
@@ -1237,16 +1243,16 @@ function cmd_clean() {
# delete any backups
exec_cmd_on_output "trove backup-list" "trove backup-delete"
# clean up any remaining nova instances or cinder volumes
- exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG server list" "openstack $CLOUD_ADMIN_ARG server delete" 5
- exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG volume list" "openstack $CLOUD_ADMIN_ARG volume delete" 1
+ exec_cmd_on_output "openstack $cloud_arg server list" "openstack $cloud_arg server delete" 5
+ exec_cmd_on_output "openstack $cloud_arg volume list" "openstack $cloud_arg volume delete" 1
# delete any config groups since all instances should be gone now
exec_cmd_on_output "trove configuration-list" "trove configuration-delete"
# delete any modules too
exec_cmd_on_output "trove module-list" "trove module-delete"
# make sure that security groups are also gone, except the default
- exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG security group list" "openstack $CLOUD_ADMIN_ARG security group delete" 0 "default"
+ exec_cmd_on_output "openstack $cloud_arg security group list" "openstack $cloud_arg security group delete" 0 "default"
# delete server groups
- exec_cmd_on_output "openstack $CLOUD_ADMIN_ARG server group list" "openstack $CLOUD_ADMIN_ARG server group delete"
+ exec_cmd_on_output "openstack $cloud_arg server group list" "openstack $cloud_arg server group delete"
}
function cmd_kick_start() {
@@ -1265,7 +1271,7 @@ function cmd_kick_start() {
}
function cmd_dsvm_gate_tests() {
- ACTUAL_HOSTNAME=$(hostname -I | sed 's/[0-9]*\.[0-9]*\.[0-9]*\.1\b//g' | sed 's/[0-9a-z][0-9a-z]*:.*:[0-9a-z][0-9a-z]*//g' | sed 's/ /\n/g' | sed '/^$/d' | sort -bu | head -1)
+ ACTUAL_HOSTNAME=$(hostname -I | sed 's/[0-9a-z][0-9a-z]*:.*:[0-9a-z][0-9a-z]*//g' | sed 's/[0-9]*\.[0-9]*\.[0-9]*\.1\b//g' | sed 's/ /\n/g' | sed '/^$/d' | sort -bu | head -1)
local DATASTORE_TYPE=${1:-'mysql'}
local TEST_GROUP=${2:-${DATASTORE_TYPE}}
@@ -1275,6 +1281,35 @@ function cmd_dsvm_gate_tests() {
local ESCAPED_PATH_TROVE=${6:-'\/opt\/stack\/new\/trove'}
exclaim "Running cmd_dsvm_gate_tests ..."
+
+ # Sometimes in the gate the ACTUAL_HOSTNAME is blank - this code attempts to debug it
+ if [[ -z "${CONTROLLER_IP}" ]]; then
+ echo "*** CONTROLLER_IP is blank, trying to determine actual hostname"
+ local hostname_part=$(hostname -I)
+ echo "Hostname pass 1: $hostname_part"
+ hostname_part=$(echo $hostname_part | sed 's/[0-9a-z][0-9a-z]*:.*:[0-9a-z][0-9a-z]*//g')
+ echo "Hostname pass 2: $hostname_part"
+ hostname_part_no_ip6=$hostname_part
+ hostname_part=$(echo $hostname_part | sed 's/[0-9]*\.[0-9]*\.[0-9]*\.1\b//g')
+ echo "Hostname pass 3: $hostname_part"
+ if [[ -z "${hostname_part}" ]]; then
+ # This seems to occur when the actual hostname ends with '.1'
+ # If this happens, take the first one that doesn't start with '192' or '172'
+ hostname_part=$(echo $hostname_part_no_ip6 | sed 's/1[79]2\.[0-9]*\.[0-9]*\.1\b//g')
+ echo "Hostname pass 3a: $hostname_part"
+ fi
+ hostname_part=$(echo $hostname_part | sed 's/ /\n/g')
+ echo "Hostname pass 4: $hostname_part"
+ hostname_part=$(echo $hostname_part | sed '/^$/d')
+ echo "Hostname pass 5: $hostname_part"
+ hostname_part=$(echo $hostname_part | sort -bu)
+ echo "Hostname pass 6: $hostname_part"
+ hostname_part=$(echo $hostname_part | head -1)
+ echo "Hostname pass 7: $hostname_part"
+ CONTROLLER_IP=$hostname_part
+ echo "*** CONTROLLER_IP was blank (CONTROLLER_IP now set to '$CONTROLLER_IP')"
+ fi
+
export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/dsvm-report/}
export TROVE_REPORT_DIR=$HOME/dsvm-report/
TROVESTACK_DUMP_ENV=true
diff --git a/integration/tests/integration/int_tests.py b/integration/tests/integration/int_tests.py
index f0930561..fb1dbf98 100644
--- a/integration/tests/integration/int_tests.py
+++ b/integration/tests/integration/int_tests.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# # Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
diff --git a/integration/tests/integration/tests/__init__.py b/integration/tests/integration/tests/__init__.py
index 65f633d2..bc4d5f46 100644
--- a/integration/tests/integration/tests/__init__.py
+++ b/integration/tests/integration/tests/__init__.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
diff --git a/integration/tests/integration/tests/colorizer.py b/integration/tests/integration/tests/colorizer.py
index 4dd797dc..31cdb201 100644
--- a/integration/tests/integration/tests/colorizer.py
+++ b/integration/tests/integration/tests/colorizer.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
diff --git a/releasenotes/notes/disply_module_bools_properly-571cca9a87f28339.yaml b/releasenotes/notes/disply_module_bools_properly-571cca9a87f28339.yaml
new file mode 100644
index 00000000..24adce0d
--- /dev/null
+++ b/releasenotes/notes/disply_module_bools_properly-571cca9a87f28339.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ Module list/show now returns boolean values as True/False instead of
+ 1/0. Bug 1656398
diff --git a/releasenotes/notes/module-ordering-92b6445a8ac3a3bf.yaml b/releasenotes/notes/module-ordering-92b6445a8ac3a3bf.yaml
new file mode 100644
index 00000000..0bae4290
--- /dev/null
+++ b/releasenotes/notes/module-ordering-92b6445a8ac3a3bf.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - Modules can now be applied in a consistent order,
+ based on the new 'priority_apply' and 'apply_order'
+ attributes when creating them.
+ Blueprint module-management-ordering
+upgrade:
+ - For module ordering to work, db_upgrade must be run
+ on the Trove database.
diff --git a/requirements.txt b/requirements.txt
index 96217542..f68e93ae 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,7 +4,7 @@
pbr>=1.8 # Apache-2.0
SQLAlchemy<1.1.0,>=1.0.10 # MIT
eventlet!=0.18.3,>=0.18.2 # MIT
-keystonemiddleware!=4.5.0,>=4.2.0 # Apache-2.0
+keystonemiddleware>=4.12.0 # Apache-2.0
Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT
Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT
WebOb>=1.6.0 # MIT
@@ -12,21 +12,20 @@ PasteDeploy>=1.5.0 # MIT
Paste # MIT
sqlalchemy-migrate>=0.9.6 # Apache-2.0
netaddr!=0.7.16,>=0.7.13 # BSD
-netifaces>=0.10.4 # MIT
httplib2>=0.7.5 # MIT
-lxml>=2.3 # BSD
+lxml!=3.7.0,>=2.3 # BSD
passlib>=1.7.0 # BSD
python-heatclient>=1.6.1 # Apache-2.0
-python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
+python-novaclient!=7.0.0,>=6.0.0 # Apache-2.0
python-cinderclient!=1.7.0,!=1.7.1,>=1.6.0 # Apache-2.0
-python-keystoneclient>=3.6.0 # Apache-2.0
-python-swiftclient>=2.2.0 # Apache-2.0
+python-keystoneclient>=3.8.0 # Apache-2.0
+python-swiftclient>=3.2.0 # Apache-2.0
python-designateclient>=1.5.0 # Apache-2.0
python-neutronclient>=5.1.0 # Apache-2.0
python-glanceclient>=2.5.0 # Apache-2.0
iso8601>=0.1.11 # MIT
jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
-Jinja2>=2.8 # BSD License (3 clause)
+Jinja2!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4,>=2.8 # BSD License (3 clause)
pexpect!=3.3,>=3.1 # ISC License
oslo.config!=3.18.0,>=3.14.0 # Apache-2.0
oslo.context>=2.9.0 # Apache-2.0
@@ -36,14 +35,14 @@ oslo.serialization>=1.10.0 # Apache-2.0
oslo.service>=1.10.0 # Apache-2.0
oslo.utils>=3.18.0 # Apache-2.0
oslo.concurrency>=3.8.0 # Apache-2.0
-PyMySQL!=0.7.7,>=0.7.6 # MIT License
+PyMySQL>=0.7.6 # MIT License
Babel>=2.3.4 # BSD
six>=1.9.0 # MIT
stevedore>=1.17.1 # Apache-2.0
-oslo.messaging>=5.2.0 # Apache-2.0
+oslo.messaging>=5.14.0 # Apache-2.0
osprofiler>=1.4.0 # Apache-2.0
oslo.log>=3.11.0 # Apache-2.0
-oslo.db!=4.13.1,!=4.13.2,>=4.11.0 # Apache-2.0
+oslo.db>=4.15.0 # Apache-2.0
enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
xmltodict>=0.10.1 # MIT
pycrypto>=2.6 # Public Domain
diff --git a/run_tests.py b/run_tests.py
index 5f4c98ee..eb00e032 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -76,7 +76,8 @@ def initialize_trove(config_file):
rpc.init(CONF)
taskman_service = rpc_service.RpcService(
- None, topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION,
+ CONF.taskmanager_rpc_encr_key, topic=topic,
+ rpc_api_version=rpc_version.RPC_API_VERSION,
manager='trove.taskmanager.manager.Manager')
taskman_service.start()
diff --git a/test-requirements.txt b/test-requirements.txt
index db7b528b..36e260d7 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -18,7 +18,7 @@ wsgi-intercept>=1.4.1 # MIT License
proboscis>=1.2.5.3 # Apache-2.0
python-troveclient>=2.2.0 # Apache-2.0
mock>=2.0 # BSD
-mox3>=0.7.0 # Apache-2.0
+mox3!=0.19.0,>=0.7.0 # Apache-2.0
testtools>=1.4.0 # MIT
testrepository>=0.0.18 # Apache-2.0/BSD
pymongo!=3.1,>=3.0.2 # Apache-2.0
diff --git a/tools/trove-pylint.config b/tools/trove-pylint.config
index ea041c5d..fad15049 100644
--- a/tools/trove-pylint.config
+++ b/tools/trove-pylint.config
@@ -718,6 +718,30 @@
"upgrade"
],
[
+ "trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py",
+ "E1101",
+ "Instance of 'Table' has no 'create_column' member",
+ "upgrade"
+ ],
+ [
+ "trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py",
+ "no-member",
+ "Instance of 'Table' has no 'create_column' member",
+ "upgrade"
+ ],
+ [
+ "trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py",
+ "E1101",
+ "Instance of 'Table' has no 'create_column' member",
+ "upgrade"
+ ],
+ [
+ "trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py",
+ "no-member",
+ "Instance of 'Table' has no 'create_column' member",
+ "upgrade"
+ ],
+ [
"trove/db/sqlalchemy/migration.py",
"E0611",
"No name 'exceptions' in module 'migrate.versioning'",
@@ -1097,11 +1121,23 @@
],
[
"trove/instance/models.py",
+ "E1101",
+ "Instance of 'DBInstance' has no 'encrypted_key' member",
+ "DBInstance.key"
+ ],
+ [
+ "trove/instance/models.py",
"no-member",
"Class 'InstanceStatus' has no 'LOGGING' member",
"SimpleInstance.status"
],
[
+ "trove/instance/models.py",
+ "no-member",
+ "Instance of 'DBInstance' has no 'encrypted_key' member",
+ "DBInstance.key"
+ ],
+ [
"trove/instance/service.py",
"E1101",
"Instance of 'BuiltInstance' has no 'get_default_configuration_template' member",
@@ -1487,4 +1523,4 @@
"--rcfile=./pylintrc",
"-E"
]
-}
+} \ No newline at end of file
diff --git a/tools/trove-pylint.py b/tools/trove-pylint.py
index a31d9db9..785c4bf6 100755
--- a/tools/trove-pylint.py
+++ b/tools/trove-pylint.py
@@ -194,6 +194,13 @@ def usage():
print("\t rebuild: rebuild the list of exceptions to ignore.")
return 0
+class ParseableTextReporter(text.TextReporter):
+ name = 'parseable'
+ line_format = '{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}'
+
+ # that's it folks
+
+
class LintRunner(object):
def __init__(self):
self.config = Config()
@@ -204,7 +211,7 @@ class LintRunner(object):
exceptions = set()
buffer = csio()
- reporter = text.ParseableTextReporter(output=buffer)
+ reporter = ParseableTextReporter(output=buffer)
options = list(self.config.get('options'))
options.append(filename)
lint.Run(options, reporter=reporter, exit=False)
@@ -226,8 +233,8 @@ class LintRunner(object):
func = tokens[4]
message = tokens[5]
- if not self.config.ignore(fn, code, codename, message):
- exceptions.add((fn, ln, code, codename, func, message))
+ if not self.config.ignore(fn, code, codename, message):
+ exceptions.add((fn, ln, code, codename, func, message))
return exceptions
diff --git a/tox.ini b/tox.ini
index f80d24df..81e9aba4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -75,7 +75,7 @@ show-source = True
ignore = F821,H301,H404,H405,H501
enable-extensions = H203,H106
builtins = _
-exclude=.venv,.tox,.git,dist,doc,*egg,tools,etc,build,*.po,*.pot,integration
+exclude=.venv,.tox,.git,dist,doc,*egg,tools,etc,build,*.po,*.pot,integration,releasenotes
filename=*.py,trove-*
[testenv:api-ref]
diff --git a/trove/cmd/conductor.py b/trove/cmd/conductor.py
index daff5df4..793ad6b6 100644
--- a/trove/cmd/conductor.py
+++ b/trove/cmd/conductor.py
@@ -22,6 +22,7 @@ from trove.conductor import api as conductor_api
@with_initialize
def main(conf):
from trove.common import notification
+ from trove.common.rpc import conductor_host_serializer as sz
from trove.common.rpc import service as rpc_service
from trove.instance import models as inst_models
@@ -29,8 +30,9 @@ def main(conf):
inst_models.persist_instance_fault)
topic = conf.conductor_queue
server = rpc_service.RpcService(
- manager=conf.conductor_manager, topic=topic,
- rpc_api_version=conductor_api.API.API_LATEST_VERSION)
+ key=None, manager=conf.conductor_manager, topic=topic,
+ rpc_api_version=conductor_api.API.API_LATEST_VERSION,
+ secure_serializer=sz.ConductorHostSerializer)
workers = conf.trove_conductor_workers or processutils.get_worker_count()
launcher = openstack_service.launch(conf, server, workers=workers)
launcher.wait()
diff --git a/trove/cmd/fakemode.py b/trove/cmd/fakemode.py
index 66e5b3cd..e66431fd 100644
--- a/trove/cmd/fakemode.py
+++ b/trove/cmd/fakemode.py
@@ -54,7 +54,7 @@ def start_fake_taskmanager(conf):
from trove.common.rpc import service as rpc_service
from trove.common.rpc import version as rpc_version
taskman_service = rpc_service.RpcService(
- topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION,
+ key='', topic=topic, rpc_api_version=rpc_version.RPC_API_VERSION,
manager='trove.taskmanager.manager.Manager')
taskman_service.start()
diff --git a/trove/cmd/guest.py b/trove/cmd/guest.py
index ccb33563..19692d14 100644
--- a/trove/cmd/guest.py
+++ b/trove/cmd/guest.py
@@ -30,13 +30,15 @@ from trove.guestagent import api as guest_api
CONF = cfg.CONF
# The guest_id opt definition must match the one in common/cfg.py
CONF.register_opts([openstack_cfg.StrOpt('guest_id', default=None,
- help="ID of the Guest Instance.")])
+ help="ID of the Guest Instance."),
+ openstack_cfg.StrOpt('instance_rpc_encr_key',
+ help=('Key (OpenSSL aes_cbc) for '
+ 'instance RPC encryption.'))])
def main():
cfg.parse_args(sys.argv)
logging.setup(CONF, None)
-
debug_utils.setup()
from trove.guestagent import dbaas
@@ -51,6 +53,9 @@ def main():
"was not injected into the guest or not read by guestagent"))
raise RuntimeError(msg)
+ # BUG(1650518): Cleanup in the Pike release
+ # make it fatal if CONF.instance_rpc_encr_key is None
+
# rpc module must be loaded after decision about thread monkeypatching
# because if thread module is not monkeypatched we can't use eventlet
# executor from oslo_messaging library.
@@ -59,6 +64,7 @@ def main():
from trove.common.rpc import service as rpc_service
server = rpc_service.RpcService(
+ key=CONF.instance_rpc_encr_key,
topic="guestagent.%s" % CONF.guest_id,
manager=manager, host=CONF.guest_id,
rpc_api_version=guest_api.API.API_LATEST_VERSION)
diff --git a/trove/cmd/taskmanager.py b/trove/cmd/taskmanager.py
index aaef017c..549e14b2 100644
--- a/trove/cmd/taskmanager.py
+++ b/trove/cmd/taskmanager.py
@@ -29,8 +29,14 @@ def startup(conf, topic):
notification.DBaaSAPINotification.register_notify_callback(
inst_models.persist_instance_fault)
+
+ if conf.enable_secure_rpc_messaging:
+ key = conf.taskmanager_rpc_encr_key
+ else:
+ key = None
+
server = rpc_service.RpcService(
- manager=conf.taskmanager_manager, topic=topic,
+ key=key, manager=conf.taskmanager_manager, topic=topic,
rpc_api_version=task_api.API.API_LATEST_VERSION)
launcher = openstack_service.launch(conf, server)
launcher.wait()
diff --git a/trove/common/apischema.py b/trove/common/apischema.py
index 4f424107..d9bc8c50 100644
--- a/trove/common/apischema.py
+++ b/trove/common/apischema.py
@@ -567,10 +567,16 @@ guest_log = {
module_contents = {
"type": "string",
"minLength": 1,
- "maxLength": 16777215,
+ "maxLength": 4294967295,
"pattern": "^.*.+.*$"
}
+module_apply_order = {
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 9,
+}
+
module = {
"create": {
"name": "module:create",
@@ -597,6 +603,9 @@ module = {
"all_tenants": boolean_string,
"visible": boolean_string,
"live_update": boolean_string,
+ "priority_apply": boolean_string,
+ "apply_order": module_apply_order,
+ "full_access": boolean_string,
}
}
}
@@ -629,6 +638,9 @@ module = {
"all_datastore_versions": boolean_string,
"visible": boolean_string,
"live_update": boolean_string,
+ "priority_apply": boolean_string,
+ "apply_order": module_apply_order,
+ "full_access": boolean_string,
}
}
}
diff --git a/trove/common/cfg.py b/trove/common/cfg.py
index 72d7030f..20051650 100644
--- a/trove/common/cfg.py
+++ b/trove/common/cfg.py
@@ -444,6 +444,16 @@ common_opts = [
help='Maximum size of a chunk saved in guest log container.'),
cfg.IntOpt('guest_log_expiry', default=2592000,
help='Expiry (in seconds) of objects in guest log container.'),
+ cfg.BoolOpt('enable_secure_rpc_messaging', default=True,
+ help='Should RPC messaging traffic be secured by encryption.'),
+ cfg.StrOpt('taskmanager_rpc_encr_key',
+ default='bzH6y0SGmjuoY0FNSTptrhgieGXNDX6PIhvz',
+ help='Key (OpenSSL aes_cbc) for taskmanager RPC encryption.'),
+ cfg.StrOpt('inst_rpc_key_encr_key',
+ default='emYjgHFqfXNB1NGehAFIUeoyw4V4XwWHEaKP',
+ help='Key (OpenSSL aes_cbc) to encrypt instance keys in DB.'),
+ cfg.StrOpt('instance_rpc_encr_key',
+ help='Key (OpenSSL aes_cbc) for instance RPC encryption.'),
]
@@ -1569,23 +1579,21 @@ def get_configuration_property(property_name):
def set_api_config_defaults():
"""This method updates all configuration default values."""
- # CORS Middleware Defaults
- # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
- cfg.set_defaults(cors.CORS_OPTS,
- allow_headers=['X-Auth-Token',
- 'X-Identity-Status',
- 'X-Roles',
- 'X-Service-Catalog',
- 'X-User-Id',
- 'X-Tenant-Id',
- 'X-OpenStack-Request-ID'],
- expose_headers=['X-Auth-Token',
- 'X-Subject-Token',
- 'X-Service-Token',
- 'X-OpenStack-Request-ID'],
- allow_methods=['GET',
- 'PUT',
- 'POST',
- 'DELETE',
- 'PATCH']
- )
+ cors.set_defaults(
+ allow_headers=['X-Auth-Token',
+ 'X-Identity-Status',
+ 'X-Roles',
+ 'X-Service-Catalog',
+ 'X-User-Id',
+ 'X-Tenant-Id',
+ 'X-OpenStack-Request-ID'],
+ expose_headers=['X-Auth-Token',
+ 'X-Subject-Token',
+ 'X-Service-Token',
+ 'X-OpenStack-Request-ID'],
+ allow_methods=['GET',
+ 'PUT',
+ 'POST',
+ 'DELETE',
+ 'PATCH']
+ )
diff --git a/trove/common/context.py b/trove/common/context.py
index 254993d8..5098626a 100644
--- a/trove/common/context.py
+++ b/trove/common/context.py
@@ -39,6 +39,7 @@ class TroveContext(context.RequestContext):
self.marker = kwargs.pop('marker', None)
self.service_catalog = kwargs.pop('service_catalog', None)
self.user_identity = kwargs.pop('user_identity', None)
+ self.instance_id = kwargs.pop('instance_id', None)
# TODO(esp): not sure we need this
self.timeout = kwargs.pop('timeout', None)
@@ -64,6 +65,9 @@ class TroveContext(context.RequestContext):
@classmethod
def _remove_incompatible_context_args(cls, values):
+ LOG.debug("Running in unsafe mode and ignoring incompatible context.")
+ return values
+
context_keys = vars(cls()).keys()
for dict_key in values.keys():
if dict_key not in context_keys:
diff --git a/trove/common/crypto_utils.py b/trove/common/crypto_utils.py
index bd8e3fb0..9e3d5613 100644
--- a/trove/common/crypto_utils.py
+++ b/trove/common/crypto_utils.py
@@ -20,7 +20,9 @@ from Crypto.Cipher import AES
from Crypto import Random
import hashlib
from oslo_utils import encodeutils
+import random
import six
+import string
from trove.common import stream_codecs
@@ -68,3 +70,9 @@ def decrypt_data(data, key, iv_bit_count=IV_BIT_COUNT):
aes = AES.new(md5_key, AES.MODE_CBC, bytes(iv))
decrypted = aes.decrypt(bytes(data[iv_bit_count:]))
return unpad_after_decryption(decrypted)
+
+
+def generate_random_key(length=32, chars=None):
+ chars = chars if chars else (string.ascii_uppercase +
+ string.ascii_lowercase + string.digits)
+ return ''.join(random.choice(chars) for _ in range(length))
diff --git a/trove/common/db/models.py b/trove/common/db/models.py
index 232a7b40..0d0b76ff 100644
--- a/trove/common/db/models.py
+++ b/trove/common/db/models.py
@@ -99,7 +99,7 @@ class DatastoreSchema(DatastoreModelsBase):
self._character_set = None
# If both or neither are passed in this is a bug.
if bool(deserializing) == bool(name):
- raise RuntimeError("Bug in DatastoreSchema()")
+ raise RuntimeError(_("Bug in DatastoreSchema()"))
if not deserializing:
self.name = name
diff --git a/trove/common/limits.py b/trove/common/limits.py
index eb0ac023..662ab795 100644
--- a/trove/common/limits.py
+++ b/trove/common/limits.py
@@ -77,7 +77,7 @@ class Limit(object):
self.remaining = int(value)
if value <= 0:
- raise ValueError("Limit value must be > 0")
+ raise ValueError(_("Limit value must be > 0"))
self.last_request = None
self.next_request = None
@@ -299,15 +299,15 @@ class Limiter(object):
for group in limits.split(';'):
group = group.strip()
if group[:1] != '(' or group[-1:] != ')':
- raise ValueError("Limit rules must be surrounded by "
- "parentheses")
+ raise ValueError(_("Limit rules must be surrounded by "
+ "parentheses"))
group = group[1:-1]
# Extract the Limit arguments
args = [a.strip() for a in group.split(',')]
if len(args) != 5:
- raise ValueError("Limit rules must contain the following "
- "arguments: verb, uri, regex, value, unit")
+ raise ValueError(_("Limit rules must contain the following "
+ "arguments: verb, uri, regex, value, unit"))
# Pull out the arguments
verb, uri, regex, value, unit = args
@@ -321,7 +321,7 @@ class Limiter(object):
# Convert unit
unit = unit.upper()
if unit not in Limit.UNIT_MAP:
- raise ValueError("Invalid units specified")
+ raise ValueError(_("Invalid units specified"))
unit = Limit.UNIT_MAP[unit]
# Build a limit
diff --git a/trove/common/models.py b/trove/common/models.py
index 78e5e9a2..847a9dd0 100644
--- a/trove/common/models.py
+++ b/trove/common/models.py
@@ -18,6 +18,7 @@
from oslo_utils.importutils import import_class
from trove.common import cfg
+from trove.common.i18n import _
from trove.common import remote
CONF = cfg.CONF
@@ -91,7 +92,7 @@ class RemoteModelBase(ModelBase):
# if the object is a list, it will turn it into a list of hash's again
def data(self, **options):
if self._data_object is None:
- raise LookupError("data object is None")
+ raise LookupError(_("data object is None"))
if isinstance(self._data_object, list):
return [self._data_item(item) for item in self._data_object]
else:
diff --git a/trove/common/remote.py b/trove/common/remote.py
index 76b9335a..0818b6a7 100644
--- a/trove/common/remote.py
+++ b/trove/common/remote.py
@@ -98,9 +98,13 @@ def nova_client(context, region_name=None):
endpoint_region=region_name or CONF.os_region_name,
endpoint_type=CONF.nova_compute_endpoint_type)
- client = Client(CONF.nova_client_version, context.user, context.auth_token,
- bypass_url=url, tenant_id=context.tenant,
- auth_url=PROXY_AUTH_URL)
+ client = Client(CONF.nova_client_version,
+ username=context.user,
+ bypass_url=url,
+ tenant_id=context.tenant,
+ project_domain_name=context.project_domain_name,
+ auth_url=PROXY_AUTH_URL,
+ auth_token=context.auth_token)
client.client.auth_token = context.auth_token
client.client.management_url = url
return client
@@ -112,7 +116,6 @@ def create_admin_nova_client(context):
:return: a client for nova for the trove admin
"""
client = create_nova_client(context)
- client.client.auth_token = None
return client
diff --git a/trove/common/rpc/conductor_guest_serializer.py b/trove/common/rpc/conductor_guest_serializer.py
new file mode 100644
index 00000000..e3b8afa3
--- /dev/null
+++ b/trove/common/rpc/conductor_guest_serializer.py
@@ -0,0 +1,60 @@
+# Copyright 2016 Tesora, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+
+from trove.common import crypto_utils as crypto
+from trove.common.i18n import _
+from trove.common.rpc import serializer
+
+CONF = cfg.CONF
+
+
+# BUG(1650518): Cleanup in the Pike release
+class ConductorGuestSerializer(serializer.TroveSerializer):
+ def __init__(self, base, key):
+ self._key = key
+ super(ConductorGuestSerializer, self).__init__(base)
+
+ def _serialize_entity(self, ctxt, entity):
+ if self._key is None:
+ return entity
+
+ value = crypto.encode_data(
+ crypto.encrypt_data(
+ jsonutils.dumps(entity), self._key))
+
+ return jsonutils.dumps({'entity': value, 'csz-instance-id':
+ CONF.guest_id})
+
+ def _deserialize_entity(self, ctxt, entity):
+ msg = (_("_deserialize_entity not implemented in "
+ "ConductorGuestSerializer."))
+ raise Exception(msg)
+
+ def _serialize_context(self, ctxt):
+ if self._key is None:
+ return ctxt
+
+ cstr = jsonutils.dumps(ctxt)
+
+ return {'context':
+ crypto.encode_data(
+ crypto.encrypt_data(cstr, self._key)),
+ 'csz-instance-id': CONF.guest_id}
+
+ def _deserialize_context(self, ctxt):
+ msg = (_("_deserialize_context not implemented in "
+ "ConductorGuestSerializer."))
+ raise Exception(msg)
diff --git a/trove/common/rpc/conductor_host_serializer.py b/trove/common/rpc/conductor_host_serializer.py
new file mode 100644
index 00000000..0e17efd2
--- /dev/null
+++ b/trove/common/rpc/conductor_host_serializer.py
@@ -0,0 +1,83 @@
+# Copyright 2016 Tesora, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+from oslo_serialization import jsonutils
+
+from trove.common import crypto_utils as cu
+from trove.common.rpc import serializer
+from trove.instance.models import get_instance_encryption_key
+
+CONF = cfg.CONF
+
+
+# BUG(1650518): Cleanup in the Pike release
+class ConductorHostSerializer(serializer.TroveSerializer):
+ def __init__(self, base, *_):
+ super(ConductorHostSerializer, self).__init__(base)
+
+ def _serialize_entity(self, ctxt, entity):
+ try:
+ if ctxt.instance_id is None:
+ return entity
+ except (ValueError, TypeError):
+ return entity
+
+ instance_key = get_instance_encryption_key(ctxt.instance_id)
+
+ estr = jsonutils.dumps(entity)
+ return cu.encode_data(cu.encrypt_data(estr, instance_key))
+
+ def _deserialize_entity(self, ctxt, entity):
+ try:
+ entity = jsonutils.loads(entity)
+ instance_id = entity['csz-instance-id']
+ except (ValueError, TypeError):
+ return entity
+
+ instance_key = get_instance_encryption_key(instance_id)
+
+ estr = cu.decrypt_data(cu.decode_data(entity['entity']),
+ instance_key)
+ entity = jsonutils.loads(estr)
+
+ return entity
+
+ def _serialize_context(self, ctxt):
+ try:
+ if ctxt.instance_id is None:
+ return ctxt
+ except (ValueError, TypeError):
+ return ctxt
+
+ instance_key = get_instance_encryption_key(ctxt.instance_id)
+
+ cstr = jsonutils.dumps(ctxt)
+ return {'context': cu.encode_data(cu.encrypt_data(cstr,
+ instance_key))}
+
+ def _deserialize_context(self, ctxt):
+ try:
+ instance_id = ctxt.get('csz-instance-id', None)
+
+ if instance_id is not None:
+ instance_key = get_instance_encryption_key(instance_id)
+
+ cstr = cu.decrypt_data(cu.decode_data(ctxt['context']),
+ instance_key)
+ ctxt = jsonutils.loads(cstr)
+ except (ValueError, TypeError):
+ return ctxt
+
+ ctxt['instance_id'] = instance_id
+ return ctxt
diff --git a/trove/common/rpc/secure_serializer.py b/trove/common/rpc/secure_serializer.py
new file mode 100644
index 00000000..3430b939
--- /dev/null
+++ b/trove/common/rpc/secure_serializer.py
@@ -0,0 +1,59 @@
+# Copyright 2016 Tesora, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_serialization import jsonutils
+
+from trove.common import crypto_utils as cu
+from trove.common.rpc import serializer
+
+
+# BUG(1650518): Cleanup in the Pike release
+class SecureSerializer(serializer.TroveSerializer):
+ def __init__(self, base, key):
+ self._key = key
+ super(SecureSerializer, self).__init__(base)
+
+ def _serialize_entity(self, ctxt, entity):
+ if self._key is None:
+ return entity
+
+ estr = jsonutils.dumps(entity)
+ return cu.encode_data(cu.encrypt_data(estr, self._key))
+
+ def _deserialize_entity(self, ctxt, entity):
+ try:
+ if self._key is not None:
+ estr = cu.decrypt_data(cu.decode_data(entity), self._key)
+ entity = jsonutils.loads(estr)
+ except (ValueError, TypeError):
+ return entity
+
+ return entity
+
+ def _serialize_context(self, ctxt):
+ if self._key is None:
+ return ctxt
+
+ cstr = jsonutils.dumps(ctxt)
+ return {'context': cu.encode_data(cu.encrypt_data(cstr, self._key))}
+
+ def _deserialize_context(self, ctxt):
+ try:
+ if self._key is not None:
+ cstr = cu.decrypt_data(cu.decode_data(ctxt['context']),
+ self._key)
+ ctxt = jsonutils.loads(cstr)
+ except (ValueError, TypeError):
+ return ctxt
+
+ return ctxt
diff --git a/trove/common/rpc/serializer.py b/trove/common/rpc/serializer.py
new file mode 100644
index 00000000..0073f293
--- /dev/null
+++ b/trove/common/rpc/serializer.py
@@ -0,0 +1,86 @@
+# Copyright 2016 Tesora, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import oslo_messaging as messaging
+from osprofiler import profiler
+
+from trove.common.context import TroveContext
+
+
+class TroveSerializer(messaging.Serializer):
+ """The Trove serializer class that handles class inheritence and base
+ serializers.
+ """
+
+ def __init__(self, base):
+ self._base = base
+
+ def _serialize_entity(self, context, entity):
+ return entity
+
+ def serialize_entity(self, context, entity):
+ if self._base:
+ entity = self._base.serialize_entity(context, entity)
+
+ return self._serialize_entity(context, entity)
+
+ def _deserialize_entity(self, context, entity):
+ return entity
+
+ def deserialize_entity(self, context, entity):
+ entity = self._deserialize_entity(context, entity)
+
+ if self._base:
+ entity = self._base.deserialize_entity(context, entity)
+
+ return entity
+
+ def _serialize_context(self, context):
+ return context
+
+ def serialize_context(self, context):
+ if self._base:
+ context = self._base.serialize_context(context)
+
+ return self._serialize_context(context)
+
+ def _deserialize_context(self, context):
+ return context
+
+ def deserialize_context(self, context):
+ context = self._deserialize_context(context)
+
+ if self._base:
+ context = self._base.deserialize_context(context)
+
+ return context
+
+
+class TroveRequestContextSerializer(TroveSerializer):
+ def _serialize_context(self, context):
+ _context = context.to_dict()
+ prof = profiler.get()
+ if prof:
+ trace_info = {
+ "hmac_key": prof.hmac_key,
+ "base_id": prof.get_base_id(),
+ "parent_id": prof.get_id()
+ }
+ _context.update({"trace_info": trace_info})
+ return _context
+
+ def _deserialize_context(self, context):
+ trace_info = context.pop("trace_info", None)
+ if trace_info:
+ profiler.init(**trace_info)
+ return TroveContext.from_dict(context)
diff --git a/trove/common/rpc/service.py b/trove/common/rpc/service.py
index f5ff2af8..ed3924c0 100644
--- a/trove/common/rpc/service.py
+++ b/trove/common/rpc/service.py
@@ -29,6 +29,7 @@ from osprofiler import profiler
from trove.common import cfg
from trove.common.i18n import _
from trove.common import profile
+from trove.common.rpc import secure_serializer as ssz
from trove import rpc
@@ -38,9 +39,10 @@ LOG = logging.getLogger(__name__)
class RpcService(service.Service):
- def __init__(self, host=None, binary=None, topic=None, manager=None,
- rpc_api_version=None):
+ def __init__(self, key, host=None, binary=None, topic=None, manager=None,
+ rpc_api_version=None, secure_serializer=ssz.SecureSerializer):
super(RpcService, self).__init__()
+ self.key = key
self.host = host or CONF.host
self.binary = binary or os.path.basename(inspect.stack()[-1][1])
self.topic = topic or self.binary.rpartition('trove-')[2]
@@ -48,6 +50,7 @@ class RpcService(service.Service):
self.manager_impl = profiler.trace_cls("rpc")(_manager)
self.rpc_api_version = rpc_api_version or \
self.manager_impl.RPC_API_VERSION
+ self.secure_serializer = secure_serializer
profile.setup_profiler(self.binary, self.host)
def start(self):
@@ -60,7 +63,9 @@ class RpcService(service.Service):
self.manager_impl.target = target
endpoints = [self.manager_impl]
- self.rpcserver = rpc.get_server(target, endpoints)
+ self.rpcserver = rpc.get_server(
+ target, endpoints, key=self.key,
+ secure_serializer=self.secure_serializer)
self.rpcserver.start()
# TODO(hub-cap): Currently the context is none... do we _need_ it here?
diff --git a/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py b/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py
index 7374538d..7af17353 100644
--- a/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/galera_common/taskmanager.py
@@ -78,7 +78,8 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
LOG.debug("Waiting for instances to get to cluster-ready status.")
# Wait for cluster members to get to cluster-ready status.
if not self._all_instances_ready(instance_ids, cluster_id):
- raise TroveError("Instances in cluster did not report ACTIVE")
+ raise TroveError(_("Instances in cluster did not report "
+ "ACTIVE"))
LOG.debug("All members ready, proceeding for cluster setup.")
instances = [Instance.load(context, instance_id) for instance_id
@@ -173,8 +174,8 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
for db_inst in db_instances
if db_inst.id not in new_instance_ids]
if not existing_instances:
- raise TroveError("Unable to determine existing cluster "
- "member(s)")
+ raise TroveError(_("Unable to determine existing cluster "
+ "member(s)"))
# get list of ips of existing cluster members
existing_cluster_ips = [self.get_ip(instance) for instance in
@@ -187,7 +188,8 @@ class GaleraCommonClusterTasks(task_models.ClusterTasks):
# Wait for cluster members to get to cluster-ready status.
if not self._all_instances_ready(new_instance_ids, cluster_id):
- raise TroveError("Instances in cluster did not report ACTIVE")
+ raise TroveError(_("Instances in cluster did not report "
+ "ACTIVE"))
LOG.debug("All members ready, proceeding for cluster setup.")
diff --git a/trove/common/strategies/cluster/experimental/redis/taskmanager.py b/trove/common/strategies/cluster/experimental/redis/taskmanager.py
index 8b2b772c..8f86775b 100644
--- a/trove/common/strategies/cluster/experimental/redis/taskmanager.py
+++ b/trove/common/strategies/cluster/experimental/redis/taskmanager.py
@@ -112,8 +112,8 @@ class RedisClusterTasks(task_models.ClusterTasks):
for db_inst in db_instances
if db_inst.id not in new_instance_ids)
if not cluster_head:
- raise TroveError("Unable to determine existing Redis cluster "
- "member")
+ raise TroveError(_("Unable to determine existing Redis cluster"
+ " member"))
(cluster_head_ip, cluster_head_port) = (
self.get_guest(cluster_head).get_node_ip())
diff --git a/trove/common/strategies/strategy.py b/trove/common/strategies/strategy.py
index 8139748a..c176601d 100644
--- a/trove/common/strategies/strategy.py
+++ b/trove/common/strategies/strategy.py
@@ -19,6 +19,7 @@ import abc
from oslo_log import log as logging
import six
+from trove.common.i18n import _
from trove.common import utils
@@ -53,7 +54,7 @@ class Strategy(object):
ns = ns or cls.__strategy_ns__
if ns is None:
raise RuntimeError(
- 'No namespace provided or __strategy_ns__ unset')
+ _('No namespace provided and __strategy_ns__ unset'))
LOG.debug('Looking for strategy %s in %s', name, ns)
diff --git a/trove/common/xmlutils.py b/trove/common/xmlutils.py
index 1231a590..8f70bc50 100644
--- a/trove/common/xmlutils.py
+++ b/trove/common/xmlutils.py
@@ -17,6 +17,8 @@ from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
+from trove.common.i18n import _
+
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
@@ -29,21 +31,21 @@ class ProtectedExpatParser(expatreader.ExpatParser):
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
- raise ValueError("Inline DTD forbidden")
+ raise ValueError(_("Inline DTD forbidden"))
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
- raise ValueError("<!ENTITY> entity declaration forbidden")
+ raise ValueError(_("<!ENTITY> entity declaration forbidden"))
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
- raise ValueError("<!ENTITY> unparsed entity forbidden")
+ raise ValueError(_("<!ENTITY> unparsed entity forbidden"))
def external_entity_ref(self, context, base, systemId, publicId):
- raise ValueError("<!ENTITY> external entity forbidden")
+ raise ValueError(_("<!ENTITY> external entity forbidden"))
def notation_decl(self, name, base, sysid, pubid):
- raise ValueError("<!ENTITY> notation forbidden")
+ raise ValueError(_("<!ENTITY> notation forbidden"))
def reset(self):
expatreader.ExpatParser.reset(self)
diff --git a/trove/conductor/api.py b/trove/conductor/api.py
index 757416b2..be73b2b7 100644
--- a/trove/conductor/api.py
+++ b/trove/conductor/api.py
@@ -16,6 +16,7 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from trove.common import cfg
+from trove.common.rpc import conductor_guest_serializer as sz
from trove.common.serializable_notification import SerializableNotification
from trove import rpc
@@ -62,9 +63,10 @@ class API(object):
self.client = self.get_client(target, version_cap)
def get_client(self, target, version_cap, serializer=None):
- return rpc.get_client(target,
+ return rpc.get_client(target, key=CONF.instance_rpc_encr_key,
version_cap=version_cap,
- serializer=serializer)
+ serializer=serializer,
+ secure_serializer=sz.ConductorGuestSerializer)
def heartbeat(self, instance_id, payload, sent=None):
LOG.debug("Making async call to cast heartbeat for instance: %s"
diff --git a/trove/configuration/models.py b/trove/configuration/models.py
index 5935f34a..7a240cb5 100644
--- a/trove/configuration/models.py
+++ b/trove/configuration/models.py
@@ -39,9 +39,9 @@ class Configurations(object):
@staticmethod
def load(context):
if context is None:
- raise TypeError("Argument context not defined.")
+ raise TypeError(_("Argument context not defined."))
elif id is None:
- raise TypeError("Argument is not defined.")
+ raise TypeError(_("Argument is not defined."))
if context.is_admin:
db_info = DBConfiguration.find_all(deleted=False)
diff --git a/trove/db/models.py b/trove/db/models.py
index 6b8e0475..90dc4800 100644
--- a/trove/db/models.py
+++ b/trove/db/models.py
@@ -13,6 +13,7 @@
# under the License.
from oslo_log import log as logging
+from oslo_utils import strutils
from trove.common import exception
from trove.common.i18n import _
@@ -59,13 +60,15 @@ class DatabaseModelBase(models.ModelBase):
raise exception.InvalidModelError(errors=self.errors)
self['updated'] = utils.utcnow()
LOG.debug("Saving %(name)s: %(dict)s" %
- {'name': self.__class__.__name__, 'dict': self.__dict__})
+ {'name': self.__class__.__name__,
+ 'dict': strutils.mask_dict_password(self.__dict__)})
return self.db_api.save(self)
def delete(self):
self['updated'] = utils.utcnow()
LOG.debug("Deleting %(name)s: %(dict)s" %
- {'name': self.__class__.__name__, 'dict': self.__dict__})
+ {'name': self.__class__.__name__,
+ 'dict': strutils.mask_dict_password(self.__dict__)})
if self.preserve_on_delete:
self['deleted_at'] = utils.utcnow()
diff --git a/trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py b/trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py
new file mode 100644
index 00000000..0b7634f7
--- /dev/null
+++ b/trove/db/sqlalchemy/migrate_repo/versions/040_module_priority.py
@@ -0,0 +1,48 @@
+# Copyright 2016 Tesora, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import MetaData
+from sqlalchemy.sql.expression import update
+
+from trove.db.sqlalchemy.migrate_repo.schema import Boolean
+from trove.db.sqlalchemy.migrate_repo.schema import Integer
+from trove.db.sqlalchemy.migrate_repo.schema import Table
+from trove.db.sqlalchemy.migrate_repo.schema import Text
+
+
+COLUMN_NAME_1 = 'priority_apply'
+COLUMN_NAME_2 = 'apply_order'
+COLUMN_NAME_3 = 'is_admin'
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+ modules = Table('modules', meta, autoload=True)
+ is_nullable = True if migrate_engine.name == "sqlite" else False
+ column = Column(COLUMN_NAME_1, Boolean(), nullable=is_nullable, default=0)
+ modules.create_column(column)
+ column = Column(COLUMN_NAME_2, Integer(), nullable=is_nullable, default=5)
+ modules.create_column(column)
+ column = Column(COLUMN_NAME_3, Boolean(), nullable=is_nullable, default=0)
+ modules.create_column(column)
+ modules.c.contents.alter(Text(length=4294967295))
+ # mark all non-visible, auto-apply and all-tenant modules as is_admin
+ update(table=modules,
+ values=dict(is_admin=1),
+ whereclause="visible=0 or auto_apply=1 or tenant_id is null"
+ ).execute()
diff --git a/trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py b/trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py
new file mode 100644
index 00000000..7477cfaf
--- /dev/null
+++ b/trove/db/sqlalchemy/migrate_repo/versions/041_instance_keys.py
@@ -0,0 +1,30 @@
+# Copyright 2016 Tesora, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from sqlalchemy.schema import Column
+from sqlalchemy.schema import MetaData
+
+from trove.db.sqlalchemy.migrate_repo.schema import String
+from trove.db.sqlalchemy.migrate_repo.schema import Table
+
+
+meta = MetaData()
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances = Table('instances', meta, autoload=True)
+ instances.create_column(Column('encrypted_key', String(255)))
diff --git a/trove/dns/designate/driver.py b/trove/dns/designate/driver.py
index b8de297c..35cc38b7 100644
--- a/trove/dns/designate/driver.py
+++ b/trove/dns/designate/driver.py
@@ -28,6 +28,7 @@ import six
from trove.common import cfg
from trove.common import exception
+from trove.common.i18n import _
from trove.dns import driver
@@ -83,7 +84,8 @@ class DesignateDriver(driver.DnsDriver):
"""Creates the entry in the driver at the given dns zone."""
dns_zone = entry.dns_zone or self.default_dns_zone
if not dns_zone.id:
- raise TypeError("The entry's dns_zone must have an ID specified.")
+ raise TypeError(_("The entry's dns_zone must have an ID "
+ "specified."))
name = entry.name
LOG.debug("Creating DNS entry %s." % name)
client = self.dns_client
@@ -125,16 +127,16 @@ class DesignateDriver(driver.DnsDriver):
def modify_content(self, name, content, dns_zone):
# We dont need this in trove for now
- raise NotImplementedError("Not implemented for Designate DNS.")
+ raise NotImplementedError(_("Not implemented for Designate DNS."))
def rename_entry(self, content, name, dns_zone):
# We dont need this in trove for now
- raise NotImplementedError("Not implemented for Designate DNS.")
+ raise NotImplementedError(_("Not implemented for Designate DNS."))
def _get_records(self, dns_zone):
dns_zone = dns_zone or self.default_dns_zone
if not dns_zone:
- raise TypeError('DNS domain is must be specified')
+ raise TypeError(_('DNS domain is must be specified'))
return self.dns_client.records.list(dns_zone.id)
diff --git a/trove/extensions/common/service.py b/trove/extensions/common/service.py
index 78669266..9c702ff4 100644
--- a/trove/extensions/common/service.py
+++ b/trove/extensions/common/service.py
@@ -25,6 +25,7 @@ from trove.cluster import models as cluster_models
from trove.cluster.models import DBCluster
from trove.common import cfg
from trove.common import exception
+from trove.common.i18n import _
from trove.common.i18n import _LI
from trove.common import policy
from trove.common import wsgi
@@ -145,7 +146,7 @@ class ClusterRootController(DefaultRootController):
is_root_enabled = models.ClusterRoot.load(context, instance_id)
except exception.UnprocessableEntity:
raise exception.UnprocessableEntity(
- "Cluster %s is not ready." % instance_id)
+ _("Cluster %s is not ready.") % instance_id)
return wsgi.Result(views.RootEnabledView(is_root_enabled).data(), 200)
def cluster_root_index(self, req, tenant_id, cluster_id):
@@ -225,7 +226,8 @@ class RootController(ExtensionController):
return root_controller.root_create(req, body, tenant_id,
instance_id, is_cluster)
else:
- raise NoSuchOptError('root_controller', group='datastore_manager')
+ opt = 'root_controller'
+ raise NoSuchOptError(opt, group='datastore_manager')
def delete(self, req, tenant_id, instance_id):
datastore_manager, is_cluster = self._get_datastore(tenant_id,
diff --git a/trove/extensions/mgmt/instances/models.py b/trove/extensions/mgmt/instances/models.py
index 3c0a5bbc..cd656f44 100644
--- a/trove/extensions/mgmt/instances/models.py
+++ b/trove/extensions/mgmt/instances/models.py
@@ -151,7 +151,7 @@ class MgmtInstances(imodels.Instances):
return SimpleMgmtInstance(context, db, server, status)
if context is None:
- raise TypeError("Argument context not defined.")
+ raise TypeError(_("Argument context not defined."))
find_server = imodels.create_server_list_matcher(servers)
instances = imodels.Instances._load_servers_status(load_instance,
context,
diff --git a/trove/guestagent/api.py b/trove/guestagent/api.py
index 180388a0..85be70fa 100644
--- a/trove/guestagent/api.py
+++ b/trove/guestagent/api.py
@@ -69,13 +69,16 @@ class API(object):
version_cap = self.VERSION_ALIASES.get(
CONF.upgrade_levels.guestagent, CONF.upgrade_levels.guestagent)
- target = messaging.Target(topic=self._get_routing_key(),
- version=version_cap)
+ self.target = messaging.Target(topic=self._get_routing_key(),
+ version=version_cap)
- self.client = self.get_client(target, version_cap)
+ self.client = self.get_client(self.target, version_cap)
def get_client(self, target, version_cap, serializer=None):
- return rpc.get_client(target,
+ from trove.instance.models import get_instance_encryption_key
+
+ instance_key = get_instance_encryption_key(self.id)
+ return rpc.get_client(target, key=instance_key,
version_cap=version_cap,
serializer=serializer)
@@ -328,12 +331,15 @@ class API(object):
method do nothing in case a queue is already created by
the guest
"""
+ from trove.instance.models import DBInstance
server = None
target = messaging.Target(topic=self._get_routing_key(),
server=self.id,
version=self.API_BASE_VERSION)
try:
- server = rpc.get_server(target, [])
+ instance = DBInstance.get_by(id=self.id)
+ instance_key = instance.key if instance else None
+ server = rpc.get_server(target, [], key=instance_key)
server.start()
finally:
if server is not None:
@@ -352,6 +358,10 @@ class API(object):
"""Recover the guest after upgrading the guest's image."""
LOG.debug("Recover the guest after upgrading the guest's image.")
version = self.API_BASE_VERSION
+ LOG.debug("Recycling the client ...")
+ version_cap = self.VERSION_ALIASES.get(
+ CONF.upgrade_levels.guestagent, CONF.upgrade_levels.guestagent)
+ self.client = self.get_client(self.target, version_cap)
self._call("post_upgrade", AGENT_HIGH_TIMEOUT, version=version,
upgrade_info=upgrade_info)
diff --git a/trove/guestagent/backup/backupagent.py b/trove/guestagent/backup/backupagent.py
index a02680f3..7854035a 100644
--- a/trove/guestagent/backup/backupagent.py
+++ b/trove/guestagent/backup/backupagent.py
@@ -54,8 +54,10 @@ class BackupAgent(object):
try:
runner = get_restore_strategy(backup_type, RESTORE_NAMESPACE)
except ImportError:
- raise UnknownBackupType("Unknown Backup type: %s in namespace %s"
- % (backup_type, RESTORE_NAMESPACE))
+ raise UnknownBackupType(_("Unknown Backup type: %(type)s in "
+ "namespace %(ns)s")
+ % {"type": backup_type,
+ "ns": RESTORE_NAMESPACE})
return runner
def stream_backup_to_storage(self, context, backup_info, runner, storage,
diff --git a/trove/guestagent/datastore/experimental/redis/manager.py b/trove/guestagent/datastore/experimental/redis/manager.py
index 34137a3a..3e111056 100644
--- a/trove/guestagent/datastore/experimental/redis/manager.py
+++ b/trove/guestagent/datastore/experimental/redis/manager.py
@@ -192,7 +192,7 @@ class Manager(manager.Manager):
self.replication.enable_as_slave(self._app, replica_info,
slave_config)
except Exception:
- LOG.exception("Error enabling replication.")
+ LOG.exception(_("Error enabling replication."))
raise
def make_read_only(self, context, read_only):
diff --git a/trove/guestagent/datastore/experimental/vertica/service.py b/trove/guestagent/datastore/experimental/vertica/service.py
index 2bf77068..3af55116 100644
--- a/trove/guestagent/datastore/experimental/vertica/service.py
+++ b/trove/guestagent/datastore/experimental/vertica/service.py
@@ -150,8 +150,8 @@ class VerticaApp(object):
(system.VERTICA_AGENT_SERVICE_COMMAND % "enable")]
subprocess.Popen(command)
except Exception:
- LOG.exception(_("Failed to enable db on boot."))
- raise RuntimeError("Could not enable db on boot.")
+ LOG.exception(_("Failed to enable database on boot."))
+ raise RuntimeError(_("Could not enable database on boot."))
def _disable_db_on_boot(self):
try:
@@ -160,8 +160,8 @@ class VerticaApp(object):
command = (system.VERTICA_AGENT_SERVICE_COMMAND % "disable")
system.shell_execute(command)
except exception.ProcessExecutionError:
- LOG.exception(_("Failed to disable db on boot."))
- raise RuntimeError("Could not disable db on boot.")
+ LOG.exception(_("Failed to disable database on boot."))
+ raise RuntimeError(_("Could not disable database on boot."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
"""Stop the database."""
@@ -186,13 +186,13 @@ class VerticaApp(object):
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop Vertica."))
self.status.end_restart()
- raise RuntimeError("Could not stop Vertica!")
+ raise RuntimeError(_("Could not stop Vertica!"))
LOG.debug("Database stopped.")
else:
LOG.debug("Database is not running.")
except exception.ProcessExecutionError:
LOG.exception(_("Failed to stop database."))
- raise RuntimeError("Could not stop database.")
+ raise RuntimeError(_("Could not stop database."))
def start_db(self, update_db=False):
"""Start the database."""
@@ -354,8 +354,8 @@ class VerticaApp(object):
% func_name)
loaded_udls.append(func_name)
else:
- LOG.warning("Skipping %s as path %s not found." %
- (func_name, path))
+ LOG.warning(_("Skipping %(func)s as path %(path)s not "
+ "found.") % {"func": func_name, "path": path})
LOG.info(_("The following UDL functions are available for use: %s")
% loaded_udls)
diff --git a/trove/guestagent/datastore/experimental/vertica/system.py b/trove/guestagent/datastore/experimental/vertica/system.py
index 5ca9c689..50de1b4f 100644
--- a/trove/guestagent/datastore/experimental/vertica/system.py
+++ b/trove/guestagent/datastore/experimental/vertica/system.py
@@ -13,6 +13,7 @@
import re
+from trove.common.i18n import _
from trove.common import utils
ALTER_DB_CFG = "ALTER DATABASE %s SET %s = %s"
@@ -97,7 +98,7 @@ class VSqlError(object):
"""
parse = re.match("^(ERROR|WARNING) (\d+): (.+)$", stderr)
if not parse:
- raise ValueError("VSql stderr %(msg)s not recognized."
+ raise ValueError(_("VSql stderr %(msg)s not recognized.")
% {'msg': stderr})
self.type = parse.group(1)
self.code = int(parse.group(2))
diff --git a/trove/guestagent/datastore/manager.py b/trove/guestagent/datastore/manager.py
index be3778e7..0133eefa 100644
--- a/trove/guestagent/datastore/manager.py
+++ b/trove/guestagent/datastore/manager.py
@@ -15,6 +15,7 @@
#
import abc
+import operator
from oslo_config import cfg as oslo_cfg
from oslo_log import log as logging
@@ -62,6 +63,8 @@ class Manager(periodic_task.PeriodicTasks):
GUEST_LOG_DEFS_ERROR_LABEL = 'error'
GUEST_LOG_DEFS_SLOW_QUERY_LABEL = 'slow_query'
+ MODULE_APPLY_TO_ALL = module_manager.ModuleManager.MODULE_APPLY_TO_ALL
+
def __init__(self, manager_name):
super(Manager, self).__init__(CONF)
@@ -644,18 +647,36 @@ class Manager(periodic_task.PeriodicTasks):
def module_apply(self, context, modules=None):
LOG.info(_("Applying modules."))
results = []
- for module_data in modules:
- module = module_data['module']
+ modules = [data['module'] for data in modules]
+ try:
+ # make sure the modules are applied in the correct order
+ modules.sort(key=operator.itemgetter('apply_order'))
+ modules.sort(key=operator.itemgetter('priority_apply'),
+ reverse=True)
+ except KeyError:
+ # If we don't have ordering info then maybe we're running
+ # a version of the module feature before ordering was
+ # introduced. In that case, since we don't have any
+ # way to order the modules we should just continue.
+ pass
+ for module in modules:
id = module.get('id', None)
module_type = module.get('type', None)
name = module.get('name', None)
- tenant = module.get('tenant', None)
- datastore = module.get('datastore', None)
- ds_version = module.get('datastore_version', None)
+ tenant = module.get('tenant', self.MODULE_APPLY_TO_ALL)
+ datastore = module.get('datastore', self.MODULE_APPLY_TO_ALL)
+ ds_version = module.get('datastore_version',
+ self.MODULE_APPLY_TO_ALL)
contents = module.get('contents', None)
md5 = module.get('md5', None)
auto_apply = module.get('auto_apply', True)
visible = module.get('visible', True)
+ is_admin = module.get('is_admin', None)
+ if is_admin is None:
+ # fall back to the old method of checking for an admin option
+ is_admin = (tenant == self.MODULE_APPLY_TO_ALL or
+ not visible or
+ auto_apply)
if not name:
raise AttributeError(_("Module name not specified"))
if not contents:
@@ -665,9 +686,14 @@ class Manager(periodic_task.PeriodicTasks):
raise exception.ModuleTypeNotFound(
_("No driver implemented for module type '%s'") %
module_type)
+ if (datastore and datastore != self.MODULE_APPLY_TO_ALL and
+ datastore != CONF.datastore_manager):
+ reason = (_("Module not valid for datastore %s") %
+ CONF.datastore_manager)
+ raise exception.ModuleInvalid(reason=reason)
result = module_manager.ModuleManager.apply_module(
driver, module_type, name, tenant, datastore, ds_version,
- contents, id, md5, auto_apply, visible)
+ contents, id, md5, auto_apply, visible, is_admin)
results.append(result)
LOG.info(_("Returning list of modules: %s") % results)
return results
diff --git a/trove/guestagent/datastore/mysql_common/manager.py b/trove/guestagent/datastore/mysql_common/manager.py
index 07be748e..54b3c4a6 100644
--- a/trove/guestagent/datastore/mysql_common/manager.py
+++ b/trove/guestagent/datastore/mysql_common/manager.py
@@ -415,7 +415,7 @@ class MySqlManager(manager.Manager):
self._validate_slave_for_replication(context, replica_info)
self.replication.enable_as_slave(app, replica_info, slave_config)
except Exception:
- LOG.exception("Error enabling replication.")
+ LOG.exception(_("Error enabling replication."))
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
diff --git a/trove/guestagent/datastore/mysql_common/service.py b/trove/guestagent/datastore/mysql_common/service.py
index b1714327..fe8358a3 100644
--- a/trove/guestagent/datastore/mysql_common/service.py
+++ b/trove/guestagent/datastore/mysql_common/service.py
@@ -763,7 +763,7 @@ class BaseMySqlApp(object):
shell=True)
except KeyError:
LOG.exception(_("Error enabling MySQL start on boot."))
- raise RuntimeError("Service is not discovered.")
+ raise RuntimeError(_("Service is not discovered."))
def _disable_mysql_on_boot(self):
try:
@@ -771,7 +771,7 @@ class BaseMySqlApp(object):
shell=True)
except KeyError:
LOG.exception(_("Error disabling MySQL start on boot."))
- raise RuntimeError("Service is not discovered.")
+ raise RuntimeError(_("Service is not discovered."))
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.info(_("Stopping MySQL."))
@@ -782,13 +782,13 @@ class BaseMySqlApp(object):
shell=True)
except KeyError:
LOG.exception(_("Error stopping MySQL."))
- raise RuntimeError("Service is not discovered.")
+ raise RuntimeError(_("Service is not discovered."))
if not self.status.wait_for_real_status_to_change_to(
rd_instance.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL."))
self.status.end_restart()
- raise RuntimeError("Could not stop MySQL!")
+ raise RuntimeError(_("Could not stop MySQL!"))
def _remove_anonymous_user(self, client):
LOG.debug("Removing anonymous user.")
@@ -858,7 +858,7 @@ class BaseMySqlApp(object):
% (self.get_data_dir(), index),
force=True, as_root=True)
except exception.ProcessExecutionError:
- LOG.exception("Could not delete logfile.")
+ LOG.exception(_("Could not delete logfile."))
raise
def remove_overrides(self):
@@ -976,7 +976,7 @@ class BaseMySqlApp(object):
utils.execute_with_timeout(self.mysql_service['cmd_start'],
shell=True, timeout=timeout)
except KeyError:
- raise RuntimeError("Service is not discovered.")
+ raise RuntimeError(_("Service is not discovered."))
except exception.ProcessExecutionError:
# it seems mysql (percona, at least) might come back with [Fail]
# but actually come up ok. we're looking into the timing issue on
@@ -996,7 +996,7 @@ class BaseMySqlApp(object):
LOG.exception(_("Error killing stalled MySQL start command."))
# There's nothing more we can do...
self.status.end_restart()
- raise RuntimeError("Could not start MySQL!")
+ raise RuntimeError(_("Could not start MySQL!"))
def start_db_with_conf_changes(self, config_contents):
LOG.info(_("Starting MySQL with conf changes."))
@@ -1005,7 +1005,7 @@ class BaseMySqlApp(object):
if self.status.is_running:
LOG.error(_("Cannot execute start_db_with_conf_changes because "
"MySQL state == %s.") % self.status)
- raise RuntimeError("MySQL not stopped.")
+ raise RuntimeError(_("MySQL not stopped."))
LOG.info(_("Resetting configuration."))
self._reset_configuration(config_contents)
self.start_mysql(True)
diff --git a/trove/guestagent/datastore/service.py b/trove/guestagent/datastore/service.py
index a3af9af8..48dae63e 100644
--- a/trove/guestagent/datastore/service.py
+++ b/trove/guestagent/datastore/service.py
@@ -63,7 +63,7 @@ class BaseDbStatus(object):
def __init__(self):
if self._instance is not None:
- raise RuntimeError("Cannot instantiate twice.")
+ raise RuntimeError(_("Cannot instantiate twice."))
self.status = None
self.restart_mode = False
diff --git a/trove/guestagent/dbaas.py b/trove/guestagent/dbaas.py
index fbe07947..fc975efd 100644
--- a/trove/guestagent/dbaas.py
+++ b/trove/guestagent/dbaas.py
@@ -77,7 +77,7 @@ def get_filesystem_volume_stats(fs_path):
stats = os.statvfs(fs_path)
except OSError:
LOG.exception(_("Error getting volume stats."))
- raise RuntimeError("Filesystem not found (%s)" % fs_path)
+ raise RuntimeError(_("Filesystem not found (%s)") % fs_path)
total = stats.f_blocks * stats.f_bsize
free = stats.f_bfree * stats.f_bsize
diff --git a/trove/guestagent/guest_log.py b/trove/guestagent/guest_log.py
index 26170c4b..82c532b6 100644
--- a/trove/guestagent/guest_log.py
+++ b/trove/guestagent/guest_log.py
@@ -266,7 +266,7 @@ class GuestLog(object):
LogStatus.Published, LogStatus.Enabled)
# We've already handled this case (log rotated) so what gives?
else:
- raise ("Bug in _log_rotated ?")
+ raise Exception(_("Bug in _log_rotated ?"))
else:
self._published_size = 0
self._size = 0
diff --git a/trove/guestagent/module/module_manager.py b/trove/guestagent/module/module_manager.py
index 28de671d..cf2d5304 100644
--- a/trove/guestagent/module/module_manager.py
+++ b/trove/guestagent/module/module_manager.py
@@ -15,6 +15,7 @@
#
import datetime
+import operator
import os
from oslo_log import log as logging
@@ -41,12 +42,12 @@ class ModuleManager(object):
@classmethod
def get_current_timestamp(cls):
- return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[0:22]
@classmethod
def apply_module(cls, driver, module_type, name, tenant,
datastore, ds_version, contents, module_id, md5,
- auto_apply, visible):
+ auto_apply, visible, admin_module):
tenant = tenant or cls.MODULE_APPLY_TO_ALL
datastore = datastore or cls.MODULE_APPLY_TO_ALL
ds_version = ds_version or cls.MODULE_APPLY_TO_ALL
@@ -57,9 +58,9 @@ class ModuleManager(object):
now = cls.get_current_timestamp()
default_result = cls.build_default_result(
module_type, name, tenant, datastore,
- ds_version, module_id, md5, auto_apply, visible, now)
+ ds_version, module_id, md5,
+ auto_apply, visible, now, admin_module)
result = cls.read_module_result(module_dir, default_result)
- admin_module = cls.is_admin_module(tenant, auto_apply, visible)
try:
driver.configure(name, datastore, ds_version, data_file)
applied, message = driver.apply(
@@ -83,7 +84,7 @@ class ModuleManager(object):
result['tenant'] = tenant
result['auto_apply'] = auto_apply
result['visible'] = visible
- result['admin_only'] = admin_module
+ result['is_admin'] = admin_module
cls.write_module_result(module_dir, result)
return result
@@ -113,8 +114,7 @@ class ModuleManager(object):
@classmethod
def build_default_result(cls, module_type, name, tenant,
datastore, ds_version, module_id, md5,
- auto_apply, visible, now):
- admin_module = cls.is_admin_module(tenant, auto_apply, visible)
+ auto_apply, visible, now, admin_module):
result = {
'type': module_type,
'name': name,
@@ -130,7 +130,7 @@ class ModuleManager(object):
'removed': None,
'auto_apply': auto_apply,
'visible': visible,
- 'admin_only': admin_module,
+ 'is_admin': admin_module,
'contents': None,
}
return result
@@ -183,7 +183,9 @@ class ModuleManager(object):
(is_admin or result.get('visible'))):
if include_contents:
codec = stream_codecs.Base64Codec()
- if not is_admin and result.get('admin_only'):
+ # keep admin_only for backwards compatibility
+ if not is_admin and (result.get('is_admin') or
+ result.get('admin_only')):
contents = (
"Must be admin to retrieve contents for module %s"
% result.get('name', 'Unknown'))
@@ -195,6 +197,7 @@ class ModuleManager(object):
result['contents'] = operating_system.read_file(
contents_file, codec=codec, decode=False)
results.append(result)
+ results.sort(key=operator.itemgetter('updated'), reverse=True)
return results
@classmethod
diff --git a/trove/guestagent/pkg.py b/trove/guestagent/pkg.py
index 28654dcd..94459271 100644
--- a/trove/guestagent/pkg.py
+++ b/trove/guestagent/pkg.py
@@ -118,7 +118,7 @@ class BasePackagerMixin(object):
self.pexpect_wait_and_close_proc(child)
except pexpect.TIMEOUT:
self.pexpect_kill_proc(child)
- raise PkgTimeout("Process timeout after %i seconds." % time_out)
+ raise PkgTimeout(_("Process timeout after %i seconds.") % time_out)
return (i, match)
@@ -151,7 +151,7 @@ class RPMPackagerMixin(BasePackagerMixin):
while result == CONFLICT_REMOVED:
result = self._install(packages, time_out)
if result != OK:
- raise PkgPackageStateError("Cannot install packages.")
+ raise PkgPackageStateError(_("Cannot install packages."))
def pkg_is_installed(self, packages):
packages = packages if isinstance(packages, list) else packages.split()
@@ -188,7 +188,7 @@ class RPMPackagerMixin(BasePackagerMixin):
return
result = self._remove(package_name, time_out)
if result != OK:
- raise PkgPackageStateError("Package %s is in a bad state."
+ raise PkgPackageStateError(_("Package %s is in a bad state.")
% package_name)
@@ -218,18 +218,19 @@ class RedhatPackagerMixin(RPMPackagerMixin):
LOG.debug("Running package install command: %s" % cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
- raise PkgPermissionError("Invalid permissions.")
+ raise PkgPermissionError(_("Invalid permissions."))
elif i == 1:
- raise PkgNotFoundError("Could not find pkg %s" % match.group(1))
+ raise PkgNotFoundError(_("Could not find package %s") %
+ match.group(1))
elif i == 2 or i == 3 or i == 4:
self._rpm_remove_nodeps(match.group(1))
return CONFLICT_REMOVED
elif i == 5:
- raise PkgScriptletError("Package scriptlet failed")
+ raise PkgScriptletError(_("Package scriptlet failed"))
elif i == 6 or i == 7:
- raise PkgDownloadError("Package download problem")
+ raise PkgDownloadError(_("Package download problem"))
elif i == 8:
- raise PkgSignError("GPG key retrieval failed")
+ raise PkgSignError(_("GPG key retrieval failed"))
return OK
def _remove(self, package_name, time_out):
@@ -247,9 +248,10 @@ class RedhatPackagerMixin(RPMPackagerMixin):
'Removed:']
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
- raise PkgPermissionError("Invalid permissions.")
+ raise PkgPermissionError(_("Invalid permissions."))
elif i == 1:
- raise PkgNotFoundError("Could not find pkg %s" % package_name)
+ raise PkgNotFoundError(_("Could not find package %s") %
+ package_name)
return OK
@@ -294,7 +296,7 @@ class DebianPackagerMixin(BasePackagerMixin):
utils.execute("dpkg", "--configure", "-a",
run_as_root=True, root_helper="sudo")
except ProcessExecutionError:
- raise PkgConfigureError("Error configuring package.")
+ raise PkgConfigureError(_("Error configuring package."))
finally:
os.remove(fname)
@@ -324,9 +326,10 @@ class DebianPackagerMixin(BasePackagerMixin):
LOG.debug("Running package install command: %s" % cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
- raise PkgPermissionError("Invalid permissions.")
+ raise PkgPermissionError(_("Invalid permissions."))
elif i == 1 or i == 2 or i == 3:
- raise PkgNotFoundError("Could not find apt %s" % match.group(1))
+ raise PkgNotFoundError(_("Could not find package %s") %
+ match.group(1))
elif i == 4:
return RUN_DPKG_FIRST
elif i == 5:
@@ -356,9 +359,10 @@ class DebianPackagerMixin(BasePackagerMixin):
LOG.debug("Running remove package command %s" % cmd)
i, match = self.pexpect_run(cmd, output_expects, time_out)
if i == 0:
- raise PkgPermissionError("Invalid permissions.")
+ raise PkgPermissionError(_("Invalid permissions."))
elif i == 1:
- raise PkgNotFoundError("Could not find pkg %s" % package_name)
+ raise PkgNotFoundError(_("Could not find package %s") %
+ package_name)
elif i == 2 or i == 3:
return REINSTALL_FIRST
elif i == 4:
@@ -381,7 +385,7 @@ class DebianPackagerMixin(BasePackagerMixin):
self._fix(time_out)
result = self._install(packages, time_out)
if result != OK:
- raise PkgPackageStateError("Packages is in a bad state.")
+ raise PkgPackageStateError(_("Packages are in a bad state."))
# even after successful install, packages can stay unconfigured
# config_opts - is dict with name/value for questions asked by
# interactive configure script
@@ -429,7 +433,7 @@ class DebianPackagerMixin(BasePackagerMixin):
self._fix(time_out)
result = self._remove(package_name, time_out)
if result != OK:
- raise PkgPackageStateError("Package %s is in a bad state."
+ raise PkgPackageStateError(_("Package %s is in a bad state.")
% package_name)
diff --git a/trove/guestagent/strategies/backup/mysql_impl.py b/trove/guestagent/strategies/backup/mysql_impl.py
index a9d8b9f2..6721aa69 100644
--- a/trove/guestagent/strategies/backup/mysql_impl.py
+++ b/trove/guestagent/strategies/backup/mysql_impl.py
@@ -50,10 +50,17 @@ class InnoBackupEx(base.BackupRunner):
__strategy_name__ = 'innobackupex'
@property
+ def user_and_pass(self):
+ return (' --user=%(user)s --password=%(password)s ' %
+ {'user': ADMIN_USER_NAME,
+ 'password': MySqlApp.get_auth_password()})
+
+ @property
def cmd(self):
cmd = ('sudo innobackupex'
' --stream=xbstream'
' %(extra_opts)s ' +
+ self.user_and_pass +
MySqlApp.get_data_dir() +
' 2>/tmp/innobackupex.log'
)
@@ -97,7 +104,7 @@ class InnoBackupExIncremental(InnoBackupEx):
def __init__(self, *args, **kwargs):
if not kwargs.get('lsn'):
- raise AttributeError('lsn attribute missing, bad parent?')
+ raise AttributeError(_('lsn attribute missing, bad parent?'))
super(InnoBackupExIncremental, self).__init__(*args, **kwargs)
self.parent_location = kwargs.get('parent_location')
self.parent_checksum = kwargs.get('parent_checksum')
@@ -109,6 +116,7 @@ class InnoBackupExIncremental(InnoBackupEx):
' --incremental'
' --incremental-lsn=%(lsn)s'
' %(extra_opts)s ' +
+ self.user_and_pass +
MySqlApp.get_data_dir() +
' 2>/tmp/innobackupex.log')
return cmd + self.zip_cmd + self.encrypt_cmd
diff --git a/trove/guestagent/strategies/replication/experimental/postgresql_impl.py b/trove/guestagent/strategies/replication/experimental/postgresql_impl.py
index 855b48fb..1f1408f6 100644
--- a/trove/guestagent/strategies/replication/experimental/postgresql_impl.py
+++ b/trove/guestagent/strategies/replication/experimental/postgresql_impl.py
@@ -253,7 +253,7 @@ class PostgresqlReplicationStreaming(base.Replication):
def _write_standby_recovery_file(self, service, snapshot,
sslmode='prefer'):
- LOG.info("Snapshot data received:" + str(snapshot))
+ LOG.info(_("Snapshot data received: %s") % str(snapshot))
logging_config = snapshot['log_position']
conninfo_params = \
diff --git a/trove/guestagent/strategies/restore/experimental/postgresql_impl.py b/trove/guestagent/strategies/restore/experimental/postgresql_impl.py
index 2bee9d2c..d2f57520 100644
--- a/trove/guestagent/strategies/restore/experimental/postgresql_impl.py
+++ b/trove/guestagent/strategies/restore/experimental/postgresql_impl.py
@@ -119,7 +119,7 @@ class PgBaseBackup(base.RestoreRunner):
def pre_restore(self):
self.app.stop_db()
- LOG.info("Preparing WAL archive dir")
+ LOG.info(_("Preparing WAL archive dir"))
self.app.recreate_wal_archive_dir()
datadir = self.app.pgsql_data_dir
operating_system.remove(datadir, force=True, recursive=True,
diff --git a/trove/guestagent/strategies/restore/mysql_impl.py b/trove/guestagent/strategies/restore/mysql_impl.py
index 2d206ac7..0897be86 100644
--- a/trove/guestagent/strategies/restore/mysql_impl.py
+++ b/trove/guestagent/strategies/restore/mysql_impl.py
@@ -173,7 +173,7 @@ class MySQLRestoreMixin(object):
try:
operating_system.remove(file_path, force=True, as_root=True)
except Exception:
- LOG.exception("Could not remove file: '%s'" % file_path)
+ LOG.exception(_("Could not remove file: '%s'") % file_path)
@classmethod
def _is_non_zero_file(self, fp):
diff --git a/trove/guestagent/volume.py b/trove/guestagent/volume.py
index 2558b0bf..f78965be 100644
--- a/trove/guestagent/volume.py
+++ b/trove/guestagent/volume.py
@@ -14,14 +14,14 @@
# under the License.
import os
+import shlex
from tempfile import NamedTemporaryFile
+import traceback
from oslo_log import log as logging
-import pexpect
from trove.common import cfg
-from trove.common.exception import GuestError
-from trove.common.exception import ProcessExecutionError
+from trove.common import exception
from trove.common.i18n import _
from trove.common import utils
from trove.guestagent.common import operating_system
@@ -32,6 +32,12 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
+def log_and_raise(message):
+ LOG.exception(message)
+ raise_msg = message + _("\nExc: %s") % traceback.format_exc()
+ raise exception.GuestError(original_message=raise_msg)
+
+
class VolumeDevice(object):
def __init__(self, device_path):
@@ -47,9 +53,14 @@ class VolumeDevice(object):
target_dir = TMP_MOUNT_POINT
if target_subdir:
target_dir = target_dir + "/" + target_subdir
- utils.execute("sudo", "rsync", "--safe-links", "--perms",
- "--recursive", "--owner", "--group", "--xattrs",
- "--sparse", source_dir, target_dir)
+ try:
+ utils.execute("rsync", "--safe-links", "--perms",
+ "--recursive", "--owner", "--group", "--xattrs",
+ "--sparse", source_dir, target_dir,
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Could not migrate data.")
+ log_and_raise(msg)
self.unmount(TMP_MOUNT_POINT)
def _check_device_exists(self):
@@ -63,46 +74,48 @@ class VolumeDevice(object):
num_tries = CONF.num_tries
LOG.debug("Checking if %s exists." % self.device_path)
- utils.execute('sudo', 'blockdev', '--getsize64', self.device_path,
+ utils.execute("blockdev", "--getsize64", self.device_path,
+ run_as_root=True, root_helper="sudo",
attempts=num_tries)
- except ProcessExecutionError:
- LOG.exception(_("Error getting device status"))
- raise GuestError(original_message=_(
- "InvalidDevicePath(path=%s)") % self.device_path)
+ except exception.ProcessExecutionError:
+ msg = _("Device '%s' is not ready.") % self.device_path
+ log_and_raise(msg)
def _check_format(self):
- """Checks that an unmounted volume is formatted."""
- cmd = "sudo dumpe2fs %s" % self.device_path
- LOG.debug("Checking whether %s is formatted: %s." %
- (self.device_path, cmd))
-
- child = pexpect.spawn(cmd)
+ """Checks that a volume is formatted."""
+ LOG.debug("Checking whether '%s' is formatted." % self.device_path)
try:
- i = child.expect(['has_journal', 'Wrong magic number'])
- if i == 0:
- return
- volume_fstype = CONF.volume_fstype
- raise IOError(
- _('Device path at {0} did not seem to be {1}.').format(
- self.device_path, volume_fstype))
-
- except pexpect.EOF:
- raise IOError(_("Volume was not formatted."))
- child.expect(pexpect.EOF)
+ stdout, stderr = utils.execute(
+ "dumpe2fs", self.device_path,
+ run_as_root=True, root_helper="sudo")
+ if 'has_journal' not in stdout:
+ msg = _("Volume '%s' does not appear to be formatted.") % (
+ self.device_path)
+ raise exception.GuestError(original_message=msg)
+ except exception.ProcessExecutionError as pe:
+ if 'Wrong magic number' in pe.stderr:
+ volume_fstype = CONF.volume_fstype
+ msg = _("'Device '%(dev)s' did not seem to be '%(type)s'.") % (
+ {'dev': self.device_path, 'type': volume_fstype})
+ log_and_raise(msg)
+ msg = _("Volume '%s' was not formatted.") % self.device_path
+ log_and_raise(msg)
def _format(self):
"""Calls mkfs to format the device at device_path."""
volume_fstype = CONF.volume_fstype
- format_options = CONF.format_options
- cmd = "sudo mkfs -t %s %s %s" % (volume_fstype,
- format_options, self.device_path)
+ format_options = shlex.split(CONF.format_options)
+ format_options.append(self.device_path)
volume_format_timeout = CONF.volume_format_timeout
- LOG.debug("Formatting %s. Executing: %s." %
- (self.device_path, cmd))
- child = pexpect.spawn(cmd, timeout=volume_format_timeout)
- # child.expect("(y,n)")
- # child.sendline('y')
- child.expect(pexpect.EOF)
+ LOG.debug("Formatting '%s'." % self.device_path)
+ try:
+ utils.execute_with_timeout(
+ "mkfs", "--type", volume_fstype, *format_options,
+ run_as_root=True, root_helper="sudo",
+ timeout=volume_format_timeout)
+ except exception.ProcessExecutionError:
+ msg = _("Could not format '%s'.") % self.device_path
+ log_and_raise(msg)
def format(self):
"""Formats the device at device_path and checks the filesystem."""
@@ -119,56 +132,77 @@ class VolumeDevice(object):
if write_to_fstab:
mount_point.write_to_fstab()
+ def _wait_for_mount(self, mount_point, timeout=2):
+ """Wait for a fs to be mounted."""
+ def wait_for_mount():
+ return operating_system.is_mount(mount_point)
+
+ try:
+ utils.poll_until(wait_for_mount, sleep_time=1, time_out=timeout)
+ except exception.PollTimeOut:
+ return False
+
+ return True
+
def resize_fs(self, mount_point):
"""Resize the filesystem on the specified device."""
self._check_device_exists()
+ # Some OS's will mount a file systems after it's attached if
+ # an entry is put in the fstab file (like Trove does).
+ # Thus it may be necessary to wait for the mount and then unmount
+ # the fs again (since the volume was just attached).
+ if self._wait_for_mount(mount_point, timeout=2):
+ LOG.debug("Unmounting '%s' before resizing." % mount_point)
+ self.unmount(mount_point)
try:
- # check if the device is mounted at mount_point before e2fsck
- if not os.path.ismount(mount_point):
- utils.execute("e2fsck", "-f", "-p", self.device_path,
- run_as_root=True, root_helper="sudo")
+ utils.execute("e2fsck", "-f", "-p", self.device_path,
+ run_as_root=True, root_helper="sudo")
utils.execute("resize2fs", self.device_path,
run_as_root=True, root_helper="sudo")
- except ProcessExecutionError:
- LOG.exception(_("Error resizing file system."))
- raise GuestError(original_message=_(
- "Error resizing the filesystem: %s") % self.device_path)
+ except exception.ProcessExecutionError:
+ msg = _("Error resizing the filesystem with device '%s'.") % (
+ self.device_path)
+ log_and_raise(msg)
def unmount(self, mount_point):
if operating_system.is_mount(mount_point):
- cmd = "sudo umount %s" % mount_point
- child = pexpect.spawn(cmd)
- child.expect(pexpect.EOF)
+ try:
+ utils.execute("umount", mount_point,
+ run_as_root=True, root_helper='sudo')
+ except exception.ProcessExecutionError:
+ msg = _("Error unmounting '%s'.") % mount_point
+ log_and_raise(msg)
+ else:
+ LOG.debug("'%s' is not a mounted fs, cannot unmount", mount_point)
def unmount_device(self, device_path):
# unmount if device is already mounted
mount_points = self.mount_points(device_path)
for mnt in mount_points:
- LOG.info(_("Device %(device)s is already mounted in "
- "%(mount_point)s. Unmounting now.") %
+ LOG.info(_("Device '%(device)s' is mounted on "
+ "'%(mount_point)s'. Unmounting now.") %
{'device': device_path, 'mount_point': mnt})
self.unmount(mnt)
def mount_points(self, device_path):
"""Returns a list of mount points on the specified device."""
stdout, stderr = utils.execute(
- "grep %s /etc/mtab" % device_path,
+ "grep '^%s ' /etc/mtab" % device_path,
shell=True, check_exit_code=[0, 1])
return [entry.strip().split()[1] for entry in stdout.splitlines()]
- def set_readahead_size(self, readahead_size,
- execute_function=utils.execute):
+ def set_readahead_size(self, readahead_size):
"""Set the readahead size of disk."""
self._check_device_exists()
try:
- execute_function("sudo", "blockdev", "--setra",
- readahead_size, self.device_path)
- except ProcessExecutionError:
- LOG.exception(_("Error setting readhead size to %(size)s "
- "for device %(device)s.") %
- {'size': readahead_size, 'device': self.device_path})
- raise GuestError(original_message=_(
- "Error setting readhead size: %s.") % self.device_path)
+ utils.execute("blockdev", "--setra",
+ readahead_size, self.device_path,
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Error setting readahead size to %(size)s "
+ "for device %(device)s.") % {
+ 'size': readahead_size, 'device': self.device_path}
+ log_and_raise(msg)
class VolumeMountPoint(object):
@@ -180,17 +214,21 @@ class VolumeMountPoint(object):
self.mount_options = CONF.mount_options
def mount(self):
- if not os.path.exists(self.mount_point):
+ if not operating_system.exists(self.mount_point, is_directory=True,
+ as_root=True):
operating_system.create_directory(self.mount_point, as_root=True)
LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, "
"volume_type:{2}, mount options:{3}".format(
self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))
- cmd = ("sudo mount -t %s -o %s %s %s" %
- (self.volume_fstype, self.mount_options, self.device_path,
- self.mount_point))
- child = pexpect.spawn(cmd)
- child.expect(pexpect.EOF)
+ try:
+ utils.execute("mount", "-t", self.volume_fstype,
+ "-o", self.mount_options,
+ self.device_path, self.mount_point,
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Could not mount '%s'.") % self.mount_point
+ log_and_raise(msg)
def write_to_fstab(self):
fstab_line = ("%s\t%s\t%s\t%s\t0\t0" %
@@ -201,6 +239,11 @@ class VolumeMountPoint(object):
fstab_content = fstab.read()
with NamedTemporaryFile(mode='w', delete=False) as tempfstab:
tempfstab.write(fstab_content + fstab_line)
- utils.execute("sudo", "install", "-o", "root", "-g", "root", "-m",
- "644", tempfstab.name, "/etc/fstab")
+ try:
+ utils.execute("install", "-o", "root", "-g", "root",
+ "-m", "644", tempfstab.name, "/etc/fstab",
+ run_as_root=True, root_helper="sudo")
+ except exception.ProcessExecutionError:
+ msg = _("Could not add '%s' to fstab.") % self.mount_point
+ log_and_raise(msg)
os.remove(tempfstab.name)
diff --git a/trove/instance/models.py b/trove/instance/models.py
index b18999bf..5cc53ec5 100644
--- a/trove/instance/models.py
+++ b/trove/instance/models.py
@@ -26,6 +26,7 @@ from oslo_log import log as logging
from trove.backup.models import Backup
from trove.common import cfg
+from trove.common import crypto_utils as cu
from trove.common import exception
from trove.common.glance_remote import create_glance_client
from trove.common.i18n import _, _LE, _LI, _LW
@@ -287,8 +288,8 @@ class SimpleInstance(object):
def datastore_status(self, datastore_status):
if datastore_status and not isinstance(datastore_status,
InstanceServiceStatus):
- raise ValueError("datastore_status must be of type "
- "InstanceServiceStatus. Got %s instead." %
+ raise ValueError(_("datastore_status must be of type "
+ "InstanceServiceStatus. Got %s instead.") %
datastore_status.__class__.__name__)
self.__datastore_status = datastore_status
@@ -433,6 +434,10 @@ class SimpleInstance(object):
def region_name(self):
return self.db_info.region_id
+ @property
+ def encrypted_rpc_messaging(self):
+ return True if self.db_info.encrypted_key is not None else False
+
class DetailInstance(SimpleInstance):
"""A detailed view of an Instance.
@@ -478,9 +483,9 @@ def get_db_info(context, id, cluster_id=None, include_deleted=False):
:rtype: trove.instance.models.DBInstance
"""
if context is None:
- raise TypeError("Argument context not defined.")
+ raise TypeError(_("Argument context not defined."))
elif id is None:
- raise TypeError("Argument id not defined.")
+ raise TypeError(_("Argument id not defined."))
args = {'id': id}
if cluster_id is not None:
@@ -564,6 +569,26 @@ def load_server_group_info(instance, context, compute_id):
instance.locality = srv_grp.ServerGroup.get_locality(server_group)
+def validate_modules_for_apply(modules, datastore_id, datastore_version_id):
+ for module in modules:
+ if (module.datastore_id and
+ module.datastore_id != datastore_id):
+ reason = (_("Module '%(mod)s' cannot be applied "
+ " (Wrong datastore '%(ds)s' - expected '%(ds2)s')")
+ % {'mod': module.name, 'ds': module.datastore_id,
+ 'ds2': datastore_id})
+ raise exception.ModuleInvalid(reason=reason)
+ if (module.datastore_version_id and
+ module.datastore_version_id != datastore_version_id):
+ reason = (_("Module '%(mod)s' cannot be applied "
+ " (Wrong datastore version '%(ver)s' "
+ "- expected '%(ver2)s')")
+ % {'mod': module.name,
+ 'ver': module.datastore_version_id,
+ 'ver2': datastore_version_id})
+ raise exception.ModuleInvalid(reason=reason)
+
+
class BaseInstance(SimpleInstance):
"""Represents an instance.
-----------
@@ -729,6 +754,14 @@ class BaseInstance(SimpleInstance):
"tenant_id=%s\n"
% (self.id, datastore_manager, self.tenant_id))}
+ instance_key = get_instance_encryption_key(self.id)
+ if instance_key:
+ files = {guest_info_file: (
+ "%s"
+ "instance_rpc_encr_key=%s\n" % (
+ files.get(guest_info_file),
+ instance_key))}
+
if os.path.isfile(CONF.get('guest_config')):
with open(CONF.get('guest_config'), "r") as f:
files[os.path.join(injected_config_location,
@@ -980,13 +1013,8 @@ class Instance(BuiltInstance):
for aa_module in auto_apply_modules:
if aa_module.id not in module_ids:
modules.append(aa_module)
- module_list = []
- for module in modules:
- module.contents = module_models.Module.deprocess_contents(
- module.contents)
- module_info = module_views.DetailedModuleView(module).data(
- include_contents=True)
- module_list.append(module_info)
+ validate_modules_for_apply(modules, datastore.id, datastore_version.id)
+ module_list = module_views.get_module_list(modules)
def _create_resources():
@@ -1390,7 +1418,7 @@ class Instances(object):
return SimpleInstance(context, db_info, status)
if context is None:
- raise TypeError("Argument context not defined.")
+ raise TypeError(_("Argument context not defined."))
client = create_nova_client(context)
servers = client.servers.list()
query_opts = {'tenant_id': context.tenant,
@@ -1487,7 +1515,8 @@ class DBInstance(dbmodels.DatabaseModelBase):
'task_id', 'task_description', 'task_start_time',
'volume_id', 'deleted', 'tenant_id',
'datastore_version_id', 'configuration_id', 'slave_of_id',
- 'cluster_id', 'shard_id', 'type', 'region_id']
+ 'cluster_id', 'shard_id', 'type', 'region_id',
+ 'encrypted_key']
def __init__(self, task_status, **kwargs):
"""
@@ -1500,9 +1529,27 @@ class DBInstance(dbmodels.DatabaseModelBase):
kwargs["task_id"] = task_status.code
kwargs["task_description"] = task_status.db_text
kwargs["deleted"] = False
+
+ if CONF.enable_secure_rpc_messaging:
+ key = cu.generate_random_key()
+ kwargs["encrypted_key"] = cu.encode_data(cu.encrypt_data(
+ key, CONF.inst_rpc_key_encr_key))
+ LOG.debug("Generated unique RPC encryption key for "
+ "instance. key = %s" % key)
+ else:
+ kwargs["encrypted_key"] = None
+
super(DBInstance, self).__init__(**kwargs)
self.set_task_status(task_status)
+ @property
+ def key(self):
+ if self.encrypted_key is None:
+ return None
+
+ return cu.decrypt_data(cu.decode_data(self.encrypted_key),
+ CONF.inst_rpc_key_encr_key)
+
def _validate(self, errors):
if InstanceTask.from_code(self.task_id) is None:
errors['task_id'] = "Not valid."
@@ -1519,6 +1566,56 @@ class DBInstance(dbmodels.DatabaseModelBase):
task_status = property(get_task_status, set_task_status)
+class instance_encryption_key_cache(object):
+ def __init__(self, func, lru_cache_size=10):
+ self._table = {}
+ self._lru = []
+ self._lru_cache_size = lru_cache_size
+ self._func = func
+
+ def get(self, instance_id):
+ if instance_id in self._table:
+ if self._lru.index(instance_id) > 0:
+ self._lru.remove(instance_id)
+ self._lru.insert(0, instance_id)
+
+ return self._table[instance_id]
+ else:
+ val = self._func(instance_id)
+
+ # BUG(1650518): Cleanup in the Pike release
+ if val is None:
+ return val
+
+ if len(self._lru) == self._lru_cache_size:
+ tail = self._lru.pop()
+ del self._table[tail]
+
+ self._lru.insert(0, instance_id)
+ self._table[instance_id] = val
+ return self._table[instance_id]
+
+ def __getitem__(self, instance_id):
+ return self.get(instance_id)
+
+
+def _get_instance_encryption_key(instance_id):
+ instance = DBInstance.find_by(id=instance_id)
+
+ if instance is not None:
+ return instance.key
+ else:
+ raise exception.NotFound(uuid=id)
+
+
+_instance_encryption_key = instance_encryption_key_cache(
+ func=_get_instance_encryption_key)
+
+
+def get_instance_encryption_key(instance_id):
+ return _instance_encryption_key[instance_id]
+
+
def persist_instance_fault(notification, event_qualifier):
"""This callback is registered to be fired whenever a
notification is sent out.
@@ -1534,7 +1631,7 @@ def persist_instance_fault(notification, event_qualifier):
save_instance_fault(instance_id, message, details)
-def save_instance_fault(instance_id, message, details):
+def save_instance_fault(instance_id, message, details, skip_delta=None):
if instance_id:
try:
# Make sure it's a valid id - sometimes the error is related
@@ -1544,8 +1641,19 @@ def save_instance_fault(instance_id, message, details):
det = utils.format_output(details)
try:
fault = DBInstanceFault.find_by(instance_id=instance_id)
- fault.set_info(msg, det)
- fault.save()
+ skip = False
+ # If we were passed in a skip_delta, only update the fault
+ # if the old one is at least skip_delta seconds in the past
+ if skip_delta:
+ skip_time = fault.updated + timedelta(seconds=skip_delta)
+ now = datetime.now()
+ skip = now < skip_time
+ if skip:
+ LOG.debug(
+ "Skipping fault message in favor of previous one")
+ else:
+ fault.set_info(msg, det)
+ fault.save()
except exception.ModelNotFoundError:
DBInstanceFault.create(
instance_id=instance_id,
diff --git a/trove/instance/service.py b/trove/instance/service.py
index 686e3e53..031b0f89 100644
--- a/trove/instance/service.py
+++ b/trove/instance/service.py
@@ -536,13 +536,9 @@ class InstanceController(wsgi.Controller):
self.authorize_instance_action(context, 'module_apply', instance)
module_ids = [mod['id'] for mod in body.get('modules', [])]
modules = module_models.Modules.load_by_ids(context, module_ids)
- module_list = []
- for module in modules:
- module.contents = module_models.Module.deprocess_contents(
- module.contents)
- module_info = module_views.DetailedModuleView(module).data(
- include_contents=True)
- module_list.append(module_info)
+ models.validate_modules_for_apply(
+ modules, instance.datastore.id, instance.datastore_version.id)
+ module_list = module_views.get_module_list(modules)
client = create_guest_client(context, id)
result_list = client.module_apply(module_list)
models.Instance.add_instance_modules(context, id, modules)
diff --git a/trove/instance/views.py b/trove/instance/views.py
index 6721ec10..30c045c7 100644
--- a/trove/instance/views.py
+++ b/trove/instance/views.py
@@ -127,6 +127,8 @@ class InstanceDetailView(InstanceView):
if self.context.is_admin:
result['instance']['server_id'] = self.instance.server_id
result['instance']['volume_id'] = self.instance.volume_id
+ result['instance']['encrypted_rpc_messaging'] = (
+ self.instance.encrypted_rpc_messaging)
return result
diff --git a/trove/module/models.py b/trove/module/models.py
index 19cfb0f3..0ee40dd5 100644
--- a/trove/module/models.py
+++ b/trove/module/models.py
@@ -46,9 +46,9 @@ class Modules(object):
@staticmethod
def load(context, datastore=None):
if context is None:
- raise TypeError("Argument context not defined.")
+ raise TypeError(_("Argument context not defined."))
elif id is None:
- raise TypeError("Argument is not defined.")
+ raise TypeError(_("Argument is not defined."))
query_opts = {'deleted': False}
if datastore:
@@ -75,9 +75,9 @@ class Modules(object):
def load_auto_apply(context, datastore_id, datastore_version_id):
"""Return all the auto-apply modules for the given criteria."""
if context is None:
- raise TypeError("Argument context not defined.")
+ raise TypeError(_("Argument context not defined."))
elif id is None:
- raise TypeError("Argument is not defined.")
+ raise TypeError(_("Argument is not defined."))
query_opts = {'deleted': False,
'auto_apply': True}
@@ -113,9 +113,9 @@ class Modules(object):
for other tenants, unless the user is admin.
"""
if context is None:
- raise TypeError("Argument context not defined.")
+ raise TypeError(_("Argument context not defined."))
elif id is None:
- raise TypeError("Argument is not defined.")
+ raise TypeError(_("Argument is not defined."))
modules = []
if module_ids:
@@ -137,12 +137,14 @@ class Module(object):
@staticmethod
def create(context, name, module_type, contents,
description, tenant_id, datastore,
- datastore_version, auto_apply, visible, live_update):
+ datastore_version, auto_apply, visible, live_update,
+ priority_apply, apply_order, full_access):
if module_type.lower() not in Modules.VALID_MODULE_TYPES:
LOG.error(_("Valid module types: %s") % Modules.VALID_MODULE_TYPES)
raise exception.ModuleTypeNotFound(module_type=module_type)
Module.validate_action(
- context, 'create', tenant_id, auto_apply, visible)
+ context, 'create', tenant_id, auto_apply, visible, priority_apply,
+ full_access)
datastore_id, datastore_version_id = Module.validate_datastore(
datastore, datastore_version)
if Module.key_exists(
@@ -153,6 +155,9 @@ class Module(object):
raise exception.ModuleAlreadyExists(
name=name, datastore=datastore_str, ds_version=ds_version_str)
md5, processed_contents = Module.process_contents(contents)
+ is_admin = context.is_admin
+ if full_access:
+ is_admin = 0
module = DBModule.create(
name=name,
type=module_type.lower(),
@@ -164,37 +169,53 @@ class Module(object):
auto_apply=auto_apply,
visible=visible,
live_update=live_update,
+ priority_apply=priority_apply,
+ apply_order=apply_order,
+ is_admin=is_admin,
md5=md5)
return module
# Certain fields require admin access to create/change/delete
@staticmethod
- def validate_action(context, action_str, tenant_id, auto_apply, visible):
- error_str = None
- if not context.is_admin:
- option_strs = []
- if tenant_id is None:
- option_strs.append(_("Tenant: %s") % Modules.MATCH_ALL_NAME)
- if auto_apply:
- option_strs.append(_("Auto: %s") % auto_apply)
- if not visible:
- option_strs.append(_("Visible: %s") % visible)
- if option_strs:
- error_str = "(" + " ".join(option_strs) + ")"
- if error_str:
+ def validate_action(context, action_str, tenant_id, auto_apply, visible,
+ priority_apply, full_access):
+ admin_options_str = None
+ option_strs = []
+ if tenant_id is None:
+ option_strs.append(_("Tenant: %s") % Modules.MATCH_ALL_NAME)
+ if auto_apply:
+ option_strs.append(_("Auto: %s") % auto_apply)
+ if not visible:
+ option_strs.append(_("Visible: %s") % visible)
+ if priority_apply:
+ option_strs.append(_("Priority: %s") % priority_apply)
+ if full_access is not None:
+ if full_access and option_strs:
+ admin_options_str = "(" + ", ".join(option_strs) + ")"
+ raise exception.InvalidModelError(
+ errors=_('Cannot make module full access: %s') %
+ admin_options_str)
+ option_strs.append(_("Full Access: %s") % full_access)
+ if option_strs:
+ admin_options_str = "(" + ", ".join(option_strs) + ")"
+ if not context.is_admin and admin_options_str:
raise exception.ModuleAccessForbidden(
- action=action_str, options=error_str)
+ action=action_str, options=admin_options_str)
+ return admin_options_str
@staticmethod
def validate_datastore(datastore, datastore_version):
datastore_id = None
datastore_version_id = None
if datastore:
- ds, ds_ver = datastore_models.get_datastore_version(
- type=datastore, version=datastore_version)
- datastore_id = ds.id
if datastore_version:
+ ds, ds_ver = datastore_models.get_datastore_version(
+ type=datastore, version=datastore_version)
+ datastore_id = ds.id
datastore_version_id = ds_ver.id
+ else:
+ ds = datastore_models.Datastore.load(datastore)
+ datastore_id = ds.id
elif datastore_version:
msg = _("Cannot specify version without datastore")
raise exception.BadRequest(message=msg)
@@ -237,7 +258,8 @@ class Module(object):
def delete(context, module):
Module.validate_action(
context, 'delete',
- module.tenant_id, module.auto_apply, module.visible)
+ module.tenant_id, module.auto_apply, module.visible,
+ module.priority_apply, None)
Module.enforce_live_update(module.id, module.live_update, module.md5)
module.deleted = True
module.deleted_at = datetime.utcnow()
@@ -282,28 +304,33 @@ class Module(object):
return module
@staticmethod
- def update(context, module, original_module):
+ def update(context, module, original_module, full_access):
Module.enforce_live_update(
original_module.id, original_module.live_update,
original_module.md5)
- # we don't allow any changes to 'admin'-type modules, even if
- # the values changed aren't the admin ones.
- access_tenant_id = (None if (original_module.tenant_id is None or
- module.tenant_id is None)
- else module.tenant_id)
- access_auto_apply = original_module.auto_apply or module.auto_apply
- access_visible = original_module.visible and module.visible
- Module.validate_action(
- context, 'update',
- access_tenant_id, access_auto_apply, access_visible)
+ # we don't allow any changes to 'is_admin' modules by non-admin
+ if original_module.is_admin and not context.is_admin:
+ raise exception.ModuleAccessForbidden(
+ action='update', options='(Module is an admin module)')
+ # we don't allow any changes to admin-only attributes by non-admin
+ admin_options = Module.validate_action(
+ context, 'update', module.tenant_id, module.auto_apply,
+ module.visible, module.priority_apply, full_access)
+ # make sure we set the is_admin flag, but only if it was
+ # originally is_admin or we changed an admin option
+ module.is_admin = original_module.is_admin or (
+ 1 if admin_options else 0)
+ # but we turn it on/off if full_access is specified
+ if full_access is not None:
+ module.is_admin = 0 if full_access else 1
ds_id, ds_ver_id = Module.validate_datastore(
module.datastore_id, module.datastore_version_id)
if module.contents != original_module.contents:
md5, processed_contents = Module.process_contents(module.contents)
module.md5 = md5
module.contents = processed_contents
- else:
- # on load the contents were decrypted, so
+ elif hasattr(original_module, 'encrypted_contents'):
+ # on load the contents may have been decrypted, so
# we need to put the encrypted contents back before we update
module.contents = original_module.encrypted_contents
if module.datastore_id:
@@ -415,6 +442,7 @@ class DBModule(models.DatabaseModelBase):
'id', 'name', 'type', 'contents', 'description',
'tenant_id', 'datastore_id', 'datastore_version_id',
'auto_apply', 'visible', 'live_update',
+ 'priority_apply', 'apply_order', 'is_admin',
'md5', 'created', 'updated', 'deleted', 'deleted_at']
diff --git a/trove/module/service.py b/trove/module/service.py
index c6b08e1c..b75108ec 100644
--- a/trove/module/service.py
+++ b/trove/module/service.py
@@ -91,11 +91,15 @@ class ModuleController(wsgi.Controller):
auto_apply = body['module'].get('auto_apply', 0)
visible = body['module'].get('visible', 1)
live_update = body['module'].get('live_update', 0)
+ priority_apply = body['module'].get('priority_apply', 0)
+ apply_order = body['module'].get('apply_order', 5)
+ full_access = body['module'].get('full_access', None)
module = models.Module.create(
context, name, module_type, contents,
description, module_tenant_id, datastore, ds_version,
- auto_apply, visible, live_update)
+ auto_apply, visible, live_update, priority_apply,
+ apply_order, full_access)
view_data = views.DetailedModuleView(module)
return wsgi.Result(view_data.data(), 200)
@@ -154,8 +158,15 @@ class ModuleController(wsgi.Controller):
module.visible = body['module']['visible']
if 'live_update' in body['module']:
module.live_update = body['module']['live_update']
-
- models.Module.update(context, module, original_module)
+ if 'priority_apply' in body['module']:
+ module.priority_apply = body['module']['priority_apply']
+ if 'apply_order' in body['module']:
+ module.apply_order = body['module']['apply_order']
+ full_access = None
+ if 'full_access' in body['module']:
+ full_access = body['module']['full_access']
+
+ models.Module.update(context, module, original_module, full_access)
view_data = views.DetailedModuleView(module)
return wsgi.Result(view_data.data(), 200)
diff --git a/trove/module/views.py b/trove/module/views.py
index 63c4a5fa..5a747caf 100644
--- a/trove/module/views.py
+++ b/trove/module/views.py
@@ -32,9 +32,12 @@ class ModuleView(object):
tenant_id=self.module.tenant_id,
datastore_id=self.module.datastore_id,
datastore_version_id=self.module.datastore_version_id,
- auto_apply=self.module.auto_apply,
+ auto_apply=bool(self.module.auto_apply),
+ priority_apply=bool(self.module.priority_apply),
+ apply_order=self.module.apply_order,
+ is_admin=bool(self.module.is_admin),
md5=self.module.md5,
- visible=self.module.visible,
+ visible=bool(self.module.visible),
created=self.module.created,
updated=self.module.updated)
# add extra data to make results more legible
@@ -48,13 +51,15 @@ class ModuleView(object):
datastore = self.module.datastore_id
datastore_version = self.module.datastore_version_id
if datastore:
- ds, ds_ver = (
- datastore_models.get_datastore_version(
- type=datastore, version=datastore_version))
- datastore = ds.name
if datastore_version:
+ ds, ds_ver = (
+ datastore_models.get_datastore_version(
+ type=datastore, version=datastore_version))
+ datastore = ds.name
datastore_version = ds_ver.name
else:
+ ds = datastore_models.Datastore.load(datastore)
+ datastore = ds.name
datastore_version = models.Modules.MATCH_ALL_NAME
else:
datastore = models.Modules.MATCH_ALL_NAME
@@ -91,9 +96,22 @@ class DetailedModuleView(ModuleView):
def data(self, include_contents=False):
return_value = super(DetailedModuleView, self).data()
module_dict = return_value["module"]
- module_dict["live_update"] = self.module.live_update
+ module_dict["live_update"] = bool(self.module.live_update)
if hasattr(self.module, 'instance_count'):
module_dict["instance_count"] = self.module.instance_count
if include_contents:
+ if not hasattr(self.module, 'encrypted_contents'):
+ self.module.encrypted_contents = self.module.contents
+ self.module.contents = models.Module.deprocess_contents(
+ self.module.contents)
module_dict['contents'] = self.module.contents
return {"module": module_dict}
+
+
+def get_module_list(modules):
+ module_list = []
+ for module in modules:
+ module_info = DetailedModuleView(module).data(
+ include_contents=True)
+ module_list.append(module_info)
+ return module_list
diff --git a/trove/network/neutron.py b/trove/network/neutron.py
index c8c60d9c..3a5f59dc 100644
--- a/trove/network/neutron.py
+++ b/trove/network/neutron.py
@@ -18,6 +18,7 @@ from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
from trove.common import exception
+from trove.common.i18n import _
from trove.common import remote
from trove.network import base
@@ -51,7 +52,7 @@ class NeutronDriver(base.NetworkDriver):
try:
return self.client.show_security_group(security_group=group_id)
except neutron_exceptions.NeutronClientException as e:
- LOG.exception('Failed to get remote security group')
+ LOG.exception(_('Failed to get remote security group'))
raise exception.TroveError(str(e))
def create_security_group(self, name, description):
@@ -63,14 +64,14 @@ class NeutronDriver(base.NetworkDriver):
sec_group.get('security_group', sec_group))
except neutron_exceptions.NeutronClientException as e:
- LOG.exception('Failed to create remote security group')
+ LOG.exception(_('Failed to create remote security group'))
raise exception.SecurityGroupCreationError(str(e))
def delete_security_group(self, sec_group_id):
try:
self.client.delete_security_group(security_group=sec_group_id)
except neutron_exceptions.NeutronClientException as e:
- LOG.exception('Failed to delete remote security group')
+ LOG.exception(_('Failed to delete remote security group'))
raise exception.SecurityGroupDeletionError(str(e))
def add_security_group_rule(self, sec_group_id, protocol,
@@ -95,9 +96,10 @@ class NeutronDriver(base.NetworkDriver):
except neutron_exceptions.NeutronClientException as e:
# ignore error if rule already exists
if e.status_code == 409:
- LOG.exception("secgroup rule already exists")
+ LOG.exception(_("Security group rule already exists"))
else:
- LOG.exception('Failed to add rule to remote security group')
+ LOG.exception(_('Failed to add rule to remote security '
+ 'group'))
raise exception.SecurityGroupRuleCreationError(str(e))
def delete_security_group_rule(self, sec_group_rule_id):
@@ -106,7 +108,7 @@ class NeutronDriver(base.NetworkDriver):
security_group_rule=sec_group_rule_id)
except neutron_exceptions.NeutronClientException as e:
- LOG.exception('Failed to delete rule to remote security group')
+ LOG.exception(_('Failed to delete rule to remote security group'))
raise exception.SecurityGroupRuleDeletionError(str(e))
def _convert_to_nova_security_group_format(self, security_group):
diff --git a/trove/network/nova.py b/trove/network/nova.py
index a66a8be4..c45e9481 100644
--- a/trove/network/nova.py
+++ b/trove/network/nova.py
@@ -18,6 +18,7 @@ from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.common import exception
+from trove.common.i18n import _
from trove.common import remote
from trove.network import base
@@ -38,7 +39,7 @@ class NovaNetwork(base.NetworkDriver):
try:
return self.client.security_groups.get(group_id)
except nova_exceptions.ClientException as e:
- LOG.exception('Failed to get remote security group')
+ LOG.exception(_('Failed to get remote security group'))
raise exception.TroveError(str(e))
def create_security_group(self, name, description):
@@ -47,14 +48,14 @@ class NovaNetwork(base.NetworkDriver):
name=name, description=description)
return sec_group
except nova_exceptions.ClientException as e:
- LOG.exception('Failed to create remote security group')
+ LOG.exception(_('Failed to create remote security group'))
raise exception.SecurityGroupCreationError(str(e))
def delete_security_group(self, sec_group_id):
try:
self.client.security_groups.delete(sec_group_id)
except nova_exceptions.ClientException as e:
- LOG.exception('Failed to delete remote security group')
+ LOG.exception(_('Failed to delete remote security group'))
raise exception.SecurityGroupDeletionError(str(e))
def add_security_group_rule(self, sec_group_id, protocol,
@@ -69,7 +70,7 @@ class NovaNetwork(base.NetworkDriver):
return sec_group_rule
except nova_exceptions.ClientException as e:
- LOG.exception('Failed to add rule to remote security group')
+ LOG.exception(_('Failed to add rule to remote security group'))
raise exception.SecurityGroupRuleCreationError(str(e))
def delete_security_group_rule(self, sec_group_rule_id):
@@ -77,5 +78,5 @@ class NovaNetwork(base.NetworkDriver):
self.client.security_group_rules.delete(sec_group_rule_id)
except nova_exceptions.ClientException as e:
- LOG.exception('Failed to delete rule to remote security group')
+ LOG.exception(_('Failed to delete rule to remote security group'))
raise exception.SecurityGroupRuleDeletionError(str(e))
diff --git a/trove/rpc.py b/trove/rpc.py
index b8e826eb..dff472ee 100644
--- a/trove/rpc.py
+++ b/trove/rpc.py
@@ -23,22 +23,18 @@ __all__ = [
'add_extra_exmods',
'clear_extra_exmods',
'get_allowed_exmods',
- 'RequestContextSerializer',
'get_client',
'get_server',
'get_notifier',
- 'TRANSPORT_ALIASES',
]
from oslo_config import cfg
import oslo_messaging as messaging
-from oslo_serialization import jsonutils
-from osprofiler import profiler
-from trove.common.context import TroveContext
import trove.common.exception
-
+from trove.common.rpc import secure_serializer as ssz
+from trove.common.rpc import serializer as sz
CONF = cfg.CONF
TRANSPORT = None
@@ -50,25 +46,15 @@ ALLOWED_EXMODS = [
EXTRA_EXMODS = []
-# TODO(esp): Remove or update these paths
-TRANSPORT_ALIASES = {
- 'trove.openstack.common.rpc.impl_kombu': 'rabbit',
- 'trove.openstack.common.rpc.impl_qpid': 'qpid',
- 'trove.openstack.common.rpc.impl_zmq': 'zmq',
- 'trove.rpc.impl_kombu': 'rabbit',
- 'trove.rpc.impl_qpid': 'qpid',
- 'trove.rpc.impl_zmq': 'zmq',
-}
-
def init(conf):
global TRANSPORT, NOTIFIER
exmods = get_allowed_exmods()
TRANSPORT = messaging.get_transport(conf,
- allowed_remote_exmods=exmods,
- aliases=TRANSPORT_ALIASES)
+ allowed_remote_exmods=exmods)
- serializer = RequestContextSerializer(JsonPayloadSerializer())
+ serializer = sz.TroveRequestContextSerializer(
+ messaging.JsonPayloadSerializer())
NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
@@ -96,60 +82,26 @@ def get_allowed_exmods():
return ALLOWED_EXMODS + EXTRA_EXMODS
-class JsonPayloadSerializer(messaging.NoOpSerializer):
- @staticmethod
- def serialize_entity(context, entity):
- return jsonutils.to_primitive(entity, convert_instances=True)
-
-
-class RequestContextSerializer(messaging.Serializer):
-
- def __init__(self, base):
- self._base = base
-
- def serialize_entity(self, context, entity):
- if not self._base:
- return entity
- return self._base.serialize_entity(context, entity)
-
- def deserialize_entity(self, context, entity):
- if not self._base:
- return entity
- return self._base.deserialize_entity(context, entity)
-
- def serialize_context(self, context):
- _context = context.to_dict()
- prof = profiler.get()
- if prof:
- trace_info = {
- "hmac_key": prof.hmac_key,
- "base_id": prof.get_base_id(),
- "parent_id": prof.get_id()
- }
- _context.update({"trace_info": trace_info})
- return _context
-
- def deserialize_context(self, context):
- trace_info = context.pop("trace_info", None)
- if trace_info:
- profiler.init(**trace_info)
- return TroveContext.from_dict(context)
-
-
def get_transport_url(url_str=None):
- return messaging.TransportURL.parse(CONF, url_str, TRANSPORT_ALIASES)
+ return messaging.TransportURL.parse(CONF, url_str)
-def get_client(target, version_cap=None, serializer=None):
+def get_client(target, key, version_cap=None, serializer=None,
+ secure_serializer=ssz.SecureSerializer):
assert TRANSPORT is not None
- serializer = RequestContextSerializer(serializer)
+ # BUG(1650518): Cleanup in the Pike release
+ # uncomment this (following) line in the pike release
+ # assert key is not None
+ serializer = secure_serializer(
+ sz.TroveRequestContextSerializer(serializer), key)
return messaging.RPCClient(TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
-def get_server(target, endpoints, serializer=None):
+def get_server(target, endpoints, key, serializer=None,
+ secure_serializer=ssz.SecureSerializer):
assert TRANSPORT is not None
# Thread module is not monkeypatched if remote debugging is enabled.
@@ -160,7 +112,12 @@ def get_server(target, endpoints, serializer=None):
executor = "blocking" if debug_utils.enabled() else "eventlet"
- serializer = RequestContextSerializer(serializer)
+ # BUG(1650518): Cleanup in the Pike release
+ # uncomment this (following) line in the pike release
+ # assert key is not None
+ serializer = secure_serializer(
+ sz.TroveRequestContextSerializer(serializer), key)
+
return messaging.get_rpc_server(TRANSPORT,
target,
endpoints,
diff --git a/trove/taskmanager/api.py b/trove/taskmanager/api.py
index 1c1b01aa..93b6fe51 100644
--- a/trove/taskmanager/api.py
+++ b/trove/taskmanager/api.py
@@ -23,6 +23,7 @@ import oslo_messaging as messaging
from trove.common import cfg
from trove.common import exception
+from trove.common.i18n import _
from trove.common.notification import NotificationCastWrapper
from trove.common.strategies.cluster import strategy
from trove.guestagent import models as agent_models
@@ -77,7 +78,12 @@ class API(object):
cctxt.cast(self.context, method_name, **kwargs)
def get_client(self, target, version_cap, serializer=None):
- return rpc.get_client(target,
+ if CONF.enable_secure_rpc_messaging:
+ key = CONF.taskmanager_rpc_encr_key
+ else:
+ key = None
+
+ return rpc.get_client(target, key=key,
version_cap=version_cap,
serializer=serializer)
@@ -89,7 +95,7 @@ class API(object):
if obj_dict.get('manager'):
del obj_dict['manager']
return obj_dict
- raise ValueError("Could not transform %s" % obj_ref)
+ raise ValueError(_("Could not transform %s") % obj_ref)
def _delete_heartbeat(self, instance_id):
agent_heart_beat = agent_models.AgentHeartBeat()
diff --git a/trove/taskmanager/manager.py b/trove/taskmanager/manager.py
index 4e2555d2..69963024 100644
--- a/trove/taskmanager/manager.py
+++ b/trove/taskmanager/manager.py
@@ -148,11 +148,12 @@ class Manager(periodic_task.PeriodicTasks):
self._set_task_status(exception_replicas,
InstanceTasks.PROMOTION_ERROR)
msg = (_("promote-to-replica-source %(id)s: The following "
- "replicas may not have been switched: %(replicas)s") %
+ "replicas may not have been switched: %(replicas)s:"
+ "\n%(err)s") %
{"id": master_candidate.id,
- "replicas": [repl.id for repl in exception_replicas]})
- raise ReplicationSlaveAttachError("%s:\n%s" %
- (msg, error_messages))
+ "replicas": [repl.id for repl in exception_replicas],
+ "err": error_messages})
+ raise ReplicationSlaveAttachError(msg)
with EndNotification(context):
master_candidate = BuiltInstanceTasks.load(context, instance_id)
@@ -228,11 +229,12 @@ class Manager(periodic_task.PeriodicTasks):
self._set_task_status(exception_replicas,
InstanceTasks.EJECTION_ERROR)
msg = (_("eject-replica-source %(id)s: The following "
- "replicas may not have been switched: %(replicas)s") %
+ "replicas may not have been switched: %(replicas)s:"
+ "\n%(err)s") %
{"id": master_candidate.id,
- "replicas": [repl.id for repl in exception_replicas]})
- raise ReplicationSlaveAttachError("%s:\n%s" %
- (msg, error_messages))
+ "replicas": [repl.id for repl in exception_replicas],
+ "err": error_messages})
+ raise ReplicationSlaveAttachError(msg)
with EndNotification(context):
master = BuiltInstanceTasks.load(context, instance_id)
diff --git a/trove/taskmanager/models.py b/trove/taskmanager/models.py
index 160cf1c9..da031119 100755
--- a/trove/taskmanager/models.py
+++ b/trove/taskmanager/models.py
@@ -31,6 +31,7 @@ from trove.cluster.models import Cluster
from trove.cluster.models import DBCluster
from trove.cluster import tasks
from trove.common import cfg
+from trove.common import crypto_utils as cu
from trove.common import exception
from trove.common.exception import BackupCreationError
from trove.common.exception import GuestError
@@ -364,7 +365,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
finally:
if error_message:
inst_models.save_instance_fault(
- self.id, error_message, error_details)
+ self.id, error_message, error_details,
+ skip_delta=USAGE_SLEEP_TIME + 1)
def create_instance(self, flavor, image_id, databases, users,
datastore_manager, packages, volume_size,
@@ -724,7 +726,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
try:
heat_template = heat_template_unicode.encode('utf-8')
except UnicodeEncodeError:
- raise TroveError("Failed to utf-8 encode Heat template.")
+ raise TroveError(_("Failed to utf-8 encode Heat template."))
parameters = {"Flavor": flavor["name"],
"VolumeSize": volume_size,
@@ -746,23 +748,24 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
sleep_time=USAGE_SLEEP_TIME,
time_out=HEAT_TIME_OUT)
except PollTimeOut:
- raise TroveError("Failed to obtain Heat stack status. "
- "Timeout occurred.")
+ raise TroveError(_("Failed to obtain Heat stack status. "
+ "Timeout occurred."))
stack = client.stacks.get(stack_name)
if ((stack.action, stack.stack_status)
not in HEAT_STACK_SUCCESSFUL_STATUSES):
- raise TroveError("Failed to create Heat stack.")
+ raise TroveError(_("Failed to create Heat stack."))
resource = client.resources.get(stack.id, 'BaseInstance')
if resource.resource_status != HEAT_RESOURCE_SUCCESSFUL_STATE:
- raise TroveError("Failed to provision Heat base instance.")
+ raise TroveError(_("Failed to provision Heat base instance."))
instance_id = resource.physical_resource_id
if self.volume_support:
resource = client.resources.get(stack.id, 'DataVolume')
if resource.resource_status != HEAT_RESOURCE_SUCCESSFUL_STATE:
- raise TroveError("Failed to provision Heat data volume.")
+ raise TroveError(_("Failed to provision Heat data "
+ "volume."))
volume_id = resource.physical_resource_id
self.update_db(compute_instance_id=instance_id,
volume_id=volume_id)
@@ -1000,8 +1003,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
LOG.debug("Creating dns entry...")
ip = self.dns_ip_address
if not ip:
- raise TroveError("Failed to create DNS entry for instance %s. "
- "No IP available." % self.id)
+ raise TroveError(_("Failed to create DNS entry for instance "
+ "%s. No IP available.") % self.id)
dns_client.create_instance_entry(self.id, ip)
LOG.debug("Successfully created DNS entry for instance: %s" %
self.id)
@@ -1419,6 +1422,24 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
volume_device = self._fix_device_path(
volume.attachments[0]['device'])
+ # BUG(1650518): Cleanup in the Pike release some instances
+ # that we will be upgrading will be pre secureserialier
+ # and will have no instance_key entries. If this is one of
+ # those instances, make a key. That will make it appear in
+ # the injected files that are generated next. From this
+ # point, and until the guest comes up, attempting to send
+ # messages to it will fail because the RPC framework will
+ # encrypt messages to a guest which potentially doesn't
+ # have the code to handle it.
+ if CONF.enable_secure_rpc_messaging and (
+ self.db_info.encrypted_key is None):
+ encrypted_key = cu.encode_data(cu.encrypt_data(
+ cu.generate_random_key(),
+ CONF.inst_rpc_key_encr_key))
+ self.update_db(encrypted_key=encrypted_key)
+ LOG.debug("Generated unique RPC encryption key for "
+ "instance = %s, key = %s" % (self.id, encrypted_key))
+
injected_files = self.get_injected_files(
datastore_version.manager)
LOG.debug("Rebuilding instance %(instance)s with image %(image)s.",
@@ -1507,8 +1528,8 @@ class BackupTasks(object):
"Details: %s") % e)
backup.state = bkup_models.BackupState.DELETE_FAILED
backup.save()
- raise TroveError("Failed to delete swift object for backup %s."
- % backup_id)
+ raise TroveError(_("Failed to delete swift object for backup "
+ "%s.") % backup_id)
else:
backup.delete()
LOG.info(_("Deleted backup %s successfully.") % backup_id)
diff --git a/trove/templates/mariadb/config.template b/trove/templates/mariadb/config.template
index 88614bde..8507e0f3 100644
--- a/trove/templates/mariadb/config.template
+++ b/trove/templates/mariadb/config.template
@@ -1,5 +1,6 @@
[client]
port = 3306
+socket = /var/run/mysqld/mysqld.sock
[mysqld_safe]
nice = 0
@@ -13,6 +14,7 @@ basedir = /usr
datadir = /var/lib/mysql/data
tmpdir = /var/tmp
pid_file = /var/run/mysqld/mysqld.pid
+socket = /var/run/mysqld/mysqld.sock
skip-external-locking = 1
key_buffer_size = {{ (50 * flavor['ram']/512)|int }}M
max_allowed_packet = {{ (1024 * flavor['ram']/512)|int }}K
diff --git a/trove/tests/api/backups.py b/trove/tests/api/backups.py
index a4ebd6f7..ddb7dc14 100644
--- a/trove/tests/api/backups.py
+++ b/trove/tests/api/backups.py
@@ -26,7 +26,6 @@ from trove.common import exception
from trove.common.utils import generate_uuid
from trove.common.utils import poll_until
from trove import tests
-from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
@@ -57,31 +56,6 @@ backup_count_for_instance_prior_to_create = 0
class CreateBackups(object):
@test
- def test_backup_create_instance_invalid(self):
- """Test create backup with unknown instance."""
- invalid_inst_id = 'invalid-inst-id'
- try:
- instance_info.dbaas.backups.create(BACKUP_NAME, invalid_inst_id,
- BACKUP_DESC)
- except exceptions.BadRequest as e:
- resp, body = instance_info.dbaas.client.last_response
- assert_equal(resp.status, 400)
- assert_equal(e.message,
- "Validation error: "
- "backup['instance'] u'%s' does not match "
- "'^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-"
- "([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-"
- "([0-9a-fA-F]){12}$'" %
- invalid_inst_id)
-
- @test
- def test_backup_create_instance_not_found(self):
- """Test create backup with unknown instance."""
- assert_raises(exceptions.NotFound, instance_info.dbaas.backups.create,
- BACKUP_NAME, generate_uuid(), BACKUP_DESC)
-
- @test(runs_after=[test_backup_create_instance_invalid,
- test_backup_create_instance_not_found])
def test_backup_create_instance(self):
"""Test create backup for a given instance."""
# Necessary to test that the count increases.
@@ -113,37 +87,6 @@ class CreateBackups(object):
assert_equal(datastore_version.id, result.datastore['version_id'])
-@test(runs_after=[CreateBackups],
- groups=[GROUP, tests.INSTANCES])
-class AfterBackupCreation(object):
-
- @test
- def test_restore_instance_from_not_completed_backup(self):
- assert_raises(exceptions.Conflict,
- RestoreUsingBackup._restore, backup_info.id)
- assert_equal(409, instance_info.dbaas.last_http_code)
-
- @test
- def test_instance_action_right_after_backup_create(self):
- """Test any instance action while backup is running."""
- assert_unprocessable(instance_info.dbaas.instances.resize_instance,
- instance_info.id, 1)
-
- @test
- def test_backup_create_another_backup_running(self):
- """Test create backup when another backup is running."""
- assert_unprocessable(instance_info.dbaas.backups.create,
- 'backup_test2', instance_info.id,
- 'test description2')
-
- @test
- def test_backup_delete_still_running(self):
- """Test delete backup when it is running."""
- result = instance_info.dbaas.backups.list()
- backup = result[0]
- assert_unprocessable(instance_info.dbaas.backups.delete, backup.id)
-
-
class BackupRestoreMixin(object):
def verify_backup(self, backup_id):
@@ -198,7 +141,7 @@ class BackupRestoreMixin(object):
time_out=TIMEOUT_INSTANCE_CREATE)
-@test(runs_after=[AfterBackupCreation],
+@test(runs_after=[CreateBackups],
groups=[GROUP, tests.INSTANCES])
class WaitForBackupCreateToFinish(BackupRestoreMixin):
"""
diff --git a/trove/tests/int_tests.py b/trove/tests/int_tests.py
index c87466fc..015ca03e 100644
--- a/trove/tests/int_tests.py
+++ b/trove/tests/int_tests.py
@@ -34,7 +34,7 @@ from trove.tests.api import users
from trove.tests.api import versions
from trove.tests.scenario import groups
from trove.tests.scenario.groups import backup_group
-from trove.tests.scenario.groups import cluster_actions_group
+from trove.tests.scenario.groups import cluster_group
from trove.tests.scenario.groups import configuration_group
from trove.tests.scenario.groups import database_actions_group
from trove.tests.scenario.groups import guest_log_group
@@ -148,9 +148,25 @@ base_groups = [
]
# Cluster-based groups
-cluster_actions_groups = list(base_groups)
-cluster_actions_groups.extend([cluster_actions_group.GROUP,
- negative_cluster_actions_group.GROUP])
+cluster_create_groups = list(base_groups)
+cluster_create_groups.extend([groups.CLUSTER_DELETE_WAIT])
+
+cluster_actions_groups = list(cluster_create_groups)
+cluster_actions_groups.extend([groups.CLUSTER_ACTIONS_SHRINK_WAIT])
+
+cluster_negative_actions_groups = list(negative_cluster_actions_group.GROUP)
+
+cluster_root_groups = list(cluster_create_groups)
+cluster_root_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ENABLE])
+
+cluster_root_actions_groups = list(cluster_actions_groups)
+cluster_root_actions_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ACTIONS])
+
+cluster_upgrade_groups = list(cluster_create_groups)
+cluster_upgrade_groups.extend([groups.CLUSTER_UPGRADE_WAIT])
+
+cluster_groups = list(cluster_actions_groups)
+cluster_groups.extend([cluster_group.GROUP])
# Single-instance based groups
instance_create_groups = list(base_groups)
@@ -177,6 +193,9 @@ backup_groups.extend([groups.BACKUP,
backup_incremental_groups = list(backup_groups)
backup_incremental_groups.extend([backup_group.GROUP])
+backup_negative_groups = list(backup_groups)
+backup_negative_groups.extend([groups.BACKUP_CREATE_NEGATIVE])
+
configuration_groups = list(instance_create_groups)
configuration_groups.extend([configuration_group.GROUP])
@@ -217,13 +236,20 @@ user_actions_groups = list(instance_create_groups)
user_actions_groups.extend([user_actions_group.GROUP])
# groups common to all datastores
-common_groups = list(instance_groups)
+common_groups = list(instance_create_groups)
common_groups.extend([guest_log_groups, instance_init_groups, module_groups])
# Register: Component based groups
register(["backup"], backup_groups)
register(["backup_incremental"], backup_incremental_groups)
+register(["backup_negative"], backup_negative_groups)
register(["cluster"], cluster_actions_groups)
+register(["cluster_actions"], cluster_actions_groups)
+register(["cluster_create"], cluster_create_groups)
+register(["cluster_negative_actions"], cluster_negative_actions_groups)
+register(["cluster_root"], cluster_root_groups)
+register(["cluster_root_actions"], cluster_root_actions_groups)
+register(["cluster_upgrade"], cluster_upgrade_groups)
register(["common"], common_groups)
register(["configuration"], configuration_groups)
register(["configuration_create"], configuration_create_groups)
@@ -262,7 +288,9 @@ register(
database_actions_groups,
configuration_groups,
user_actions_groups, ],
- multi=[cluster_actions_groups, ]
+ multi=[cluster_actions_groups,
+ cluster_negative_actions_groups,
+ cluster_root_actions_groups, ]
)
register(
@@ -284,22 +312,38 @@ register(
)
register(
- ["postgresql_supported"],
+ ["mariadb_supported"],
single=[common_groups,
backup_incremental_groups,
+ configuration_groups,
database_actions_groups,
+ root_actions_groups,
+ user_actions_groups, ],
+ multi=[replication_promote_groups, ]
+ # multi=[cluster_actions_groups,
+ # cluster_negative_actions_groups,
+ # cluster_root_actions_groups,
+ # replication_promote_groups, ]
+)
+
+register(
+ ["mongodb_supported"],
+ single=[common_groups,
+ backup_groups,
configuration_groups,
+ database_actions_groups,
root_actions_groups,
user_actions_groups, ],
- multi=[replication_groups, ]
+ multi=[cluster_actions_groups, ]
)
register(
- ["mysql_supported", "percona_supported"],
+ ["mysql_supported"],
single=[common_groups,
backup_incremental_groups,
configuration_groups,
database_actions_groups,
+ instance_groups,
instance_upgrade_groups,
root_actions_groups,
user_actions_groups, ],
@@ -307,26 +351,26 @@ register(
)
register(
- ["mariadb_supported"],
+ ["percona_supported"],
single=[common_groups,
backup_incremental_groups,
configuration_groups,
database_actions_groups,
+ instance_upgrade_groups,
root_actions_groups,
user_actions_groups, ],
- multi=[replication_promote_groups,
- cluster_actions_groups, ]
+ multi=[replication_promote_groups, ]
)
register(
- ["mongodb_supported"],
+ ["postgresql_supported"],
single=[common_groups,
- backup_groups,
- configuration_groups,
+ backup_incremental_groups,
database_actions_groups,
+ configuration_groups,
root_actions_groups,
user_actions_groups, ],
- multi=[cluster_actions_groups, ]
+ multi=[replication_groups, ]
)
register(
@@ -338,13 +382,20 @@ register(
root_actions_groups,
user_actions_groups, ],
multi=[]
+ # multi=[cluster_actions_groups,
+ # cluster_negative_actions_groups,
+ # cluster_root_actions_groups, ]
)
register(
["redis_supported"],
single=[common_groups,
- backup_groups, ],
- multi=[replication_promote_groups, ]
+ backup_groups,
+ configuration_groups, ],
+ multi=[
+ # cluster_actions_groups,
+ # cluster_negative_actions_groups,
+ replication_promote_groups, ]
)
register(
@@ -352,5 +403,7 @@ register(
single=[common_groups,
configuration_groups,
root_actions_groups, ],
- multi=[cluster_actions_groups, ]
+ multi=[cluster_actions_groups,
+ cluster_negative_actions_groups,
+ cluster_root_actions_groups, ]
)
diff --git a/trove/tests/scenario/groups/__init__.py b/trove/tests/scenario/groups/__init__.py
index 49de3a67..75c326dd 100644
--- a/trove/tests/scenario/groups/__init__.py
+++ b/trove/tests/scenario/groups/__init__.py
@@ -21,6 +21,8 @@
# Backup Group
BACKUP = "scenario.backup_grp"
BACKUP_CREATE = "scenario.backup_create_grp"
+BACKUP_CREATE_NEGATIVE = "scenario.backup_create_negative_grp"
+BACKUP_CREATE_WAIT = "scenario.backup_create_wait_grp"
BACKUP_DELETE = "scenario.backup_delete_grp"
BACKUP_INST = "scenario.backup_inst_grp"
BACKUP_INST_CREATE = "scenario.backup_inst_create_grp"
@@ -48,6 +50,34 @@ CFGGRP_INST_DELETE = "scenario.cfggrp_inst_delete_grp"
CFGGRP_INST_DELETE_WAIT = "scenario.cfggrp_inst_delete_wait_grp"
+# Cluster Actions Group
+CLUSTER_ACTIONS = "scenario.cluster_actions_grp"
+CLUSTER_ACTIONS_ROOT_ENABLE = "scenario.cluster_actions_root_enable_grp"
+CLUSTER_ACTIONS_ROOT_ACTIONS = "scenario.cluster_actions_root_actions_grp"
+CLUSTER_ACTIONS_ROOT_GROW = "scenario.cluster_actions_root_grow_grp"
+CLUSTER_ACTIONS_ROOT_SHRINK = "scenario.cluster_actions_root_shrink_grp"
+CLUSTER_ACTIONS_GROW_SHRINK = "scenario.cluster_actions_grow_shrink_grp"
+CLUSTER_ACTIONS_GROW = "scenario.cluster_actions_grow_grp"
+CLUSTER_ACTIONS_GROW_WAIT = "scenario.cluster_actions_grow_wait_grp"
+CLUSTER_ACTIONS_SHRINK = "scenario.cluster_actions_shrink_grp"
+CLUSTER_ACTIONS_SHRINK_WAIT = "scenario.cluster_actions_shrink_wait_grp"
+
+
+# Cluster Create Group (in cluster_actions file)
+CLUSTER_CREATE = "scenario.cluster_create_grp"
+CLUSTER_CREATE_WAIT = "scenario.cluster_create_wait_grp"
+
+
+# Cluster Delete Group (in cluster_actions file)
+CLUSTER_DELETE = "scenario.cluster_delete_grp"
+CLUSTER_DELETE_WAIT = "scenario.cluster_delete_wait_grp"
+
+
+# Cluster Upgrade Group (in cluster_actions file)
+CLUSTER_UPGRADE = "scenario.cluster_upgrade_grp"
+CLUSTER_UPGRADE_WAIT = "scenario.cluster_upgrade_wait_grp"
+
+
# Database Actions Group
DB_ACTION_CREATE = "scenario.db_action_create_grp"
DB_ACTION_DELETE = "scenario.db_action_delete_grp"
diff --git a/trove/tests/scenario/groups/backup_group.py b/trove/tests/scenario/groups/backup_group.py
index 273a38cb..16d93def 100644
--- a/trove/tests/scenario/groups/backup_group.py
+++ b/trove/tests/scenario/groups/backup_group.py
@@ -60,47 +60,63 @@ class BackupCreateGroup(TestGroup):
"""Check that create backup is started successfully."""
self.test_runner.run_backup_create()
- @test(depends_on=[backup_create])
+
+@test(depends_on_groups=[groups.BACKUP_CREATE],
+ groups=[groups.BACKUP_CREATE_NEGATIVE])
+class BackupCreateNegativeGroup(TestGroup):
+ """Test Backup Create Negative functionality."""
+
+ def __init__(self):
+ super(BackupCreateNegativeGroup, self).__init__(
+ BackupRunnerFactory.instance())
+
+ @test
def backup_delete_while_backup_running(self):
"""Ensure delete backup fails while it is running."""
self.test_runner.run_backup_delete_while_backup_running()
- @test(depends_on=[backup_create],
- runs_after=[backup_delete_while_backup_running])
+ @test(runs_after=[backup_delete_while_backup_running])
def restore_instance_from_not_completed_backup(self):
"""Ensure a restore fails while the backup is running."""
self.test_runner.run_restore_instance_from_not_completed_backup()
- @test(depends_on=[backup_create],
- runs_after=[restore_instance_from_not_completed_backup])
+ @test(runs_after=[restore_instance_from_not_completed_backup])
def backup_create_another_backup_running(self):
"""Ensure create backup fails when another backup is running."""
self.test_runner.run_backup_create_another_backup_running()
- @test(depends_on=[backup_create],
- runs_after=[backup_create_another_backup_running])
+ @test(runs_after=[backup_create_another_backup_running])
def instance_action_right_after_backup_create(self):
"""Ensure any instance action fails while backup is running."""
self.test_runner.run_instance_action_right_after_backup_create()
- @test
+ @test(runs_after=[instance_action_right_after_backup_create])
def delete_unknown_backup(self):
"""Ensure deleting an unknown backup fails."""
self.test_runner.run_delete_unknown_backup()
- @test
+ @test(runs_after=[instance_action_right_after_backup_create])
def backup_create_instance_invalid(self):
"""Ensure create backup fails with invalid instance id."""
self.test_runner.run_backup_create_instance_invalid()
- @test
+ @test(runs_after=[instance_action_right_after_backup_create])
def backup_create_instance_not_found(self):
"""Ensure create backup fails with unknown instance id."""
self.test_runner.run_backup_create_instance_not_found()
- @test(depends_on=[backup_create],
- runs_after=[delete_unknown_backup, backup_create_instance_invalid,
- backup_create_instance_not_found])
+
+@test(depends_on_groups=[groups.BACKUP_CREATE],
+ groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE_WAIT],
+ runs_after_groups=[groups.BACKUP_CREATE_NEGATIVE])
+class BackupCreateWaitGroup(TestGroup):
+ """Wait for Backup Create to Complete."""
+
+ def __init__(self):
+ super(BackupCreateWaitGroup, self).__init__(
+ BackupRunnerFactory.instance())
+
+ @test
def backup_create_completed(self):
"""Check that the backup completes successfully."""
self.test_runner.run_backup_create_completed()
diff --git a/trove/tests/scenario/groups/cluster_actions_group.py b/trove/tests/scenario/groups/cluster_actions_group.py
deleted file mode 100644
index d69a6d5a..00000000
--- a/trove/tests/scenario/groups/cluster_actions_group.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2015 Tesora Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from proboscis import test
-
-from trove.tests.scenario import groups
-from trove.tests.scenario.groups.test_group import TestGroup
-from trove.tests.scenario.runners import test_runners
-
-
-GROUP = "scenario.cluster_actions_group"
-
-
-class ClusterActionsRunnerFactory(test_runners.RunnerFactory):
-
- _runner_ns = 'cluster_actions_runners'
- _runner_cls = 'ClusterActionsRunner'
-
-
-@test(groups=[GROUP],
- runs_after_groups=[groups.MODULE_INST_DELETE,
- groups.CFGGRP_INST_DELETE,
- groups.INST_ACTIONS_RESIZE_WAIT,
- groups.DB_ACTION_INST_DELETE,
- groups.USER_ACTION_DELETE,
- groups.USER_ACTION_INST_DELETE,
- groups.ROOT_ACTION_INST_DELETE,
- groups.REPL_INST_DELETE_WAIT,
- groups.INST_DELETE_WAIT])
-class ClusterActionsGroup(TestGroup):
-
- def __init__(self):
- super(ClusterActionsGroup, self).__init__(
- ClusterActionsRunnerFactory.instance())
-
- @test
- def cluster_create(self):
- """Create a cluster."""
- self.test_runner.run_cluster_create()
-
- @test(depends_on=[cluster_create])
- def cluster_list(self):
- """List the clusters."""
- self.test_runner.run_cluster_list()
-
- @test(depends_on=[cluster_create])
- def cluster_show(self):
- """Show a cluster."""
- self.test_runner.run_cluster_show()
-
- @test(depends_on=[cluster_create])
- def add_initial_cluster_data(self):
- """Add data to cluster."""
- self.test_runner.run_add_initial_cluster_data()
-
- @test(depends_on=[add_initial_cluster_data])
- def verify_initial_cluster_data(self):
- """Verify the initial data exists on cluster."""
- self.test_runner.run_verify_initial_cluster_data()
-
- @test(depends_on=[cluster_create])
- def cluster_root_enable(self):
- """Root Enable."""
- self.test_runner.run_cluster_root_enable()
-
- @test(depends_on=[cluster_root_enable])
- def verify_cluster_root_enable(self):
- """Verify Root Enable."""
- self.test_runner.run_verify_cluster_root_enable()
-
- @test(depends_on=[cluster_create],
- runs_after=[verify_initial_cluster_data, verify_cluster_root_enable,
- cluster_list, cluster_show])
- def cluster_grow(self):
- """Grow cluster."""
- self.test_runner.run_cluster_grow()
-
- @test(depends_on=[cluster_grow])
- def verify_cluster_root_enable_after_grow(self):
- """Verify Root Enabled after grow."""
- self.test_runner.run_verify_cluster_root_enable()
-
- @test(depends_on=[cluster_grow, add_initial_cluster_data])
- def verify_initial_cluster_data_after_grow(self):
- """Verify the initial data still exists after cluster grow."""
- self.test_runner.run_verify_initial_cluster_data()
-
- @test(depends_on=[cluster_grow],
- runs_after=[verify_initial_cluster_data_after_grow])
- def add_extra_cluster_data_after_grow(self):
- """Add more data to cluster."""
- self.test_runner.run_add_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_grow])
- def verify_extra_cluster_data_after_grow(self):
- """Verify the data added after cluster grow."""
- self.test_runner.run_verify_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_grow],
- runs_after=[verify_extra_cluster_data_after_grow])
- def remove_extra_cluster_data_after_grow(self):
- """Remove the data added after cluster grow."""
- self.test_runner.run_remove_extra_cluster_data()
-
- @test(depends_on=[cluster_create],
- runs_after=[remove_extra_cluster_data_after_grow,
- verify_cluster_root_enable_after_grow])
- def cluster_shrink(self):
- """Shrink cluster."""
- self.test_runner.run_cluster_shrink()
-
- @test(depends_on=[cluster_shrink])
- def verify_cluster_root_enable_after_shrink(self):
- """Verify Root Enable after shrink."""
- self.test_runner.run_verify_cluster_root_enable()
-
- @test(depends_on=[cluster_shrink, add_initial_cluster_data])
- def verify_initial_cluster_data_after_shrink(self):
- """Verify the initial data still exists after cluster shrink."""
- self.test_runner.run_verify_initial_cluster_data()
-
- @test(depends_on=[cluster_shrink],
- runs_after=[verify_initial_cluster_data_after_shrink])
- def add_extra_cluster_data_after_shrink(self):
- """Add more data to cluster."""
- self.test_runner.run_add_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_shrink])
- def verify_extra_cluster_data_after_shrink(self):
- """Verify the data added after cluster shrink."""
- self.test_runner.run_verify_extra_cluster_data()
-
- @test(depends_on=[add_extra_cluster_data_after_shrink],
- runs_after=[verify_extra_cluster_data_after_shrink])
- def remove_extra_cluster_data_after_shrink(self):
- """Remove the data added after cluster shrink."""
- self.test_runner.run_remove_extra_cluster_data()
-
- @test(depends_on=[add_initial_cluster_data],
- runs_after=[remove_extra_cluster_data_after_shrink])
- def remove_initial_cluster_data(self):
- """Remove the initial data from cluster."""
- self.test_runner.run_remove_initial_cluster_data()
-
- @test(depends_on=[cluster_create],
- runs_after=[remove_initial_cluster_data,
- verify_cluster_root_enable_after_shrink])
- def cluster_delete(self):
- """Delete an existing cluster."""
- self.test_runner.run_cluster_delete()
diff --git a/trove/tests/scenario/groups/cluster_group.py b/trove/tests/scenario/groups/cluster_group.py
new file mode 100644
index 00000000..cadd8565
--- /dev/null
+++ b/trove/tests/scenario/groups/cluster_group.py
@@ -0,0 +1,341 @@
+# Copyright 2015 Tesora Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from proboscis import test
+
+from trove.tests.scenario import groups
+from trove.tests.scenario.groups.test_group import TestGroup
+from trove.tests.scenario.runners import test_runners
+
+
+GROUP = "scenario.cluster_group"
+
+
+class ClusterRunnerFactory(test_runners.RunnerFactory):
+
+ _runner_ns = 'cluster_runners'
+ _runner_cls = 'ClusterRunner'
+
+
+@test(groups=[GROUP, groups.CLUSTER_CREATE],
+ runs_after_groups=[groups.MODULE_DELETE,
+ groups.CFGGRP_INST_DELETE,
+ groups.INST_ACTIONS_RESIZE_WAIT,
+ groups.DB_ACTION_INST_DELETE,
+ groups.USER_ACTION_DELETE,
+ groups.USER_ACTION_INST_DELETE,
+ groups.ROOT_ACTION_INST_DELETE,
+ groups.REPL_INST_DELETE_WAIT,
+ groups.INST_DELETE])
+class ClusterCreateGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterCreateGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_create(self):
+ """Create a cluster."""
+ self.test_runner.run_cluster_create()
+
+
+@test(groups=[GROUP, groups.CLUSTER_CREATE_WAIT],
+ depends_on_groups=[groups.CLUSTER_CREATE],
+ runs_after_groups=[groups.MODULE_INST_DELETE_WAIT,
+ groups.CFGGRP_INST_DELETE_WAIT,
+ groups.DB_ACTION_INST_DELETE_WAIT,
+ groups.USER_ACTION_INST_DELETE_WAIT,
+ groups.ROOT_ACTION_INST_DELETE_WAIT,
+ groups.INST_DELETE_WAIT])
+class ClusterCreateWaitGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterCreateWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_create_wait(self):
+ """Wait for cluster create to complete."""
+ self.test_runner.run_cluster_create_wait()
+
+ @test(depends_on=[cluster_create_wait])
+ def add_initial_cluster_data(self):
+ """Add data to cluster."""
+ self.test_runner.run_add_initial_cluster_data()
+
+ @test(depends_on=[add_initial_cluster_data])
+ def verify_initial_cluster_data(self):
+ """Verify the initial data exists on cluster."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(depends_on=[cluster_create_wait])
+ def cluster_list(self):
+ """List the clusters."""
+ self.test_runner.run_cluster_list()
+
+ @test(depends_on=[cluster_create_wait])
+ def cluster_show(self):
+ """Show a cluster."""
+ self.test_runner.run_cluster_show()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_ENABLE],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT])
+class ClusterRootEnableGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterRootEnableGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_root_enable(self):
+ """Root Enable."""
+ self.test_runner.run_cluster_root_enable()
+
+ @test(depends_on=[cluster_root_enable])
+ def verify_cluster_root_enable(self):
+ """Verify Root Enable."""
+ self.test_runner.run_verify_cluster_root_enable()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_GROW_SHRINK,
+ groups.CLUSTER_ACTIONS_GROW],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
+ runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE])
+class ClusterGrowGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterGrowGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_grow(self):
+ """Grow cluster."""
+ self.test_runner.run_cluster_grow()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_GROW_SHRINK,
+ groups.CLUSTER_ACTIONS_GROW_WAIT],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_GROW])
+class ClusterGrowWaitGroup(TestGroup):
+ def __init__(self):
+ super(ClusterGrowWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_grow_wait(self):
+ """Wait for cluster grow to complete."""
+ self.test_runner.run_cluster_grow_wait()
+
+ @test(depends_on=[cluster_grow_wait])
+ def verify_initial_cluster_data_after_grow(self):
+ """Verify the initial data still exists after cluster grow."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(depends_on=[cluster_grow_wait],
+ runs_after=[verify_initial_cluster_data_after_grow])
+ def add_grow_cluster_data(self):
+ """Add more data to cluster after grow."""
+ self.test_runner.run_add_grow_cluster_data()
+
+ @test(depends_on=[add_grow_cluster_data])
+ def verify_grow_cluster_data(self):
+ """Verify the data added after cluster grow."""
+ self.test_runner.run_verify_grow_cluster_data()
+
+ @test(depends_on=[add_grow_cluster_data],
+ runs_after=[verify_grow_cluster_data])
+ def remove_grow_cluster_data(self):
+ """Remove the data added after cluster grow."""
+ self.test_runner.run_remove_grow_cluster_data()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_GROW],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT])
+class ClusterRootEnableGrowGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterRootEnableGrowGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def verify_cluster_root_enable_after_grow(self):
+ """Verify Root Enabled after grow."""
+ self.test_runner.run_verify_cluster_root_enable()
+
+
+@test(groups=[GROUP, groups.CLUSTER_UPGRADE],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
+ runs_after_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT,
+ groups.CLUSTER_ACTIONS_ROOT_GROW])
+class ClusterUpgradeGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterUpgradeGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_upgrade(self):
+ """Upgrade cluster."""
+ self.test_runner.run_cluster_upgrade()
+
+
+@test(groups=[GROUP, groups.CLUSTER_UPGRADE_WAIT],
+ depends_on_groups=[groups.CLUSTER_UPGRADE])
+class ClusterUpgradeWaitGroup(TestGroup):
+ def __init__(self):
+ super(ClusterUpgradeWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_upgrade_wait(self):
+ """Wait for cluster upgrade to complete."""
+ self.test_runner.run_cluster_upgrade_wait()
+
+ @test(depends_on=[cluster_upgrade_wait])
+ def verify_initial_cluster_data_after_upgrade(self):
+ """Verify the initial data still exists after cluster upgrade."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(depends_on=[cluster_upgrade_wait],
+ runs_after=[verify_initial_cluster_data_after_upgrade])
+ def add_upgrade_cluster_data_after_upgrade(self):
+ """Add more data to cluster after upgrade."""
+ self.test_runner.run_add_upgrade_cluster_data()
+
+ @test(depends_on=[add_upgrade_cluster_data_after_upgrade])
+ def verify_upgrade_cluster_data_after_upgrade(self):
+ """Verify the data added after cluster upgrade."""
+ self.test_runner.run_verify_upgrade_cluster_data()
+
+ @test(depends_on=[add_upgrade_cluster_data_after_upgrade],
+ runs_after=[verify_upgrade_cluster_data_after_upgrade])
+ def remove_upgrade_cluster_data_after_upgrade(self):
+ """Remove the data added after cluster upgrade."""
+ self.test_runner.run_remove_upgrade_cluster_data()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_GROW_SHRINK,
+ groups.CLUSTER_ACTIONS_SHRINK],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_GROW_WAIT],
+ runs_after_groups=[groups.CLUSTER_UPGRADE_WAIT])
+class ClusterShrinkGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterShrinkGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_shrink(self):
+ """Shrink cluster."""
+ self.test_runner.run_cluster_shrink()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_SHRINK_WAIT],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK])
+class ClusterShrinkWaitGroup(TestGroup):
+ def __init__(self):
+ super(ClusterShrinkWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_shrink_wait(self):
+ """Wait for the cluster shrink to complete."""
+ self.test_runner.run_cluster_shrink_wait()
+
+ @test(depends_on=[cluster_shrink_wait])
+ def verify_initial_cluster_data_after_shrink(self):
+ """Verify the initial data still exists after cluster shrink."""
+ self.test_runner.run_verify_initial_cluster_data()
+
+ @test(runs_after=[verify_initial_cluster_data_after_shrink])
+ def add_shrink_cluster_data(self):
+ """Add more data to cluster after shrink."""
+ self.test_runner.run_add_shrink_cluster_data()
+
+ @test(depends_on=[add_shrink_cluster_data])
+ def verify_shrink_cluster_data(self):
+ """Verify the data added after cluster shrink."""
+ self.test_runner.run_verify_shrink_cluster_data()
+
+ @test(depends_on=[add_shrink_cluster_data],
+ runs_after=[verify_shrink_cluster_data])
+ def remove_shrink_cluster_data(self):
+ """Remove the data added after cluster shrink."""
+ self.test_runner.run_remove_shrink_cluster_data()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_ACTIONS,
+ groups.CLUSTER_ACTIONS_ROOT_SHRINK],
+ depends_on_groups=[groups.CLUSTER_ACTIONS_SHRINK_WAIT])
+class ClusterRootEnableShrinkGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterRootEnableShrinkGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def verify_cluster_root_enable_after_shrink(self):
+ """Verify Root Enable after shrink."""
+ self.test_runner.run_verify_cluster_root_enable()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_DELETE],
+ depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
+ runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_ENABLE,
+ groups.CLUSTER_ACTIONS_ROOT_GROW,
+ groups.CLUSTER_ACTIONS_ROOT_SHRINK,
+ groups.CLUSTER_ACTIONS_GROW_WAIT,
+ groups.CLUSTER_ACTIONS_SHRINK_WAIT,
+ groups.CLUSTER_UPGRADE_WAIT])
+class ClusterDeleteGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterDeleteGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def remove_initial_cluster_data(self):
+ """Remove the initial data from cluster."""
+ self.test_runner.run_remove_initial_cluster_data()
+
+ @test(runs_after=[remove_initial_cluster_data])
+ def cluster_delete(self):
+ """Delete an existing cluster."""
+ self.test_runner.run_cluster_delete()
+
+
+@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
+ groups.CLUSTER_DELETE_WAIT],
+ depends_on_groups=[groups.CLUSTER_DELETE])
+class ClusterDeleteWaitGroup(TestGroup):
+
+ def __init__(self):
+ super(ClusterDeleteWaitGroup, self).__init__(
+ ClusterRunnerFactory.instance())
+
+ @test
+ def cluster_delete_wait(self):
+ """Wait for the existing cluster to be gone."""
+ self.test_runner.run_cluster_delete_wait()
diff --git a/trove/tests/scenario/groups/module_group.py b/trove/tests/scenario/groups/module_group.py
index d3b7b158..49fc3eab 100644
--- a/trove/tests/scenario/groups/module_group.py
+++ b/trove/tests/scenario/groups/module_group.py
@@ -64,6 +64,21 @@ class ModuleCreateGroup(TestGroup):
self.test_runner.run_module_create_non_admin_hidden()
@test
+ def module_create_non_admin_priority(self):
+ """Ensure create priority module for non-admin fails."""
+ self.test_runner.run_module_create_non_admin_priority()
+
+ @test
+ def module_create_non_admin_no_full_access(self):
+ """Ensure create no full access module for non-admin fails."""
+ self.test_runner.run_module_create_non_admin_no_full_access()
+
+ @test
+ def module_create_full_access_with_admin_opt(self):
+ """Ensure create full access module with admin opts fails."""
+ self.test_runner.run_module_create_full_access_with_admin_opt()
+
+ @test
def module_create_bad_datastore(self):
"""Ensure create module with invalid datastore fails."""
self.test_runner.run_module_create_bad_datastore()
@@ -154,12 +169,24 @@ class ModuleCreateGroup(TestGroup):
@test(depends_on=[module_create, module_create_bin, module_create_bin2],
runs_after=[module_create_admin_live_update])
+ def module_create_admin_priority_apply(self):
+ """Check that create module works with priority-apply option."""
+ self.test_runner.run_module_create_admin_priority_apply()
+
+ @test(depends_on=[module_create, module_create_bin, module_create_bin2],
+ runs_after=[module_create_admin_priority_apply])
def module_create_datastore(self):
"""Check that create module with datastore works."""
self.test_runner.run_module_create_datastore()
@test(depends_on=[module_create, module_create_bin, module_create_bin2],
runs_after=[module_create_datastore])
+ def module_create_different_datastore(self):
+ """Check that create module with different datastore works."""
+ self.test_runner.run_module_create_different_datastore()
+
+ @test(depends_on=[module_create, module_create_bin, module_create_bin2],
+ runs_after=[module_create_different_datastore])
def module_create_ds_version(self):
"""Check that create module with ds version works."""
self.test_runner.run_module_create_ds_version()
@@ -176,8 +203,20 @@ class ModuleCreateGroup(TestGroup):
"""Check that create with same name on different tenant works."""
self.test_runner.run_module_create_different_tenant()
- @test(depends_on=[module_create_all_tenant],
+ @test(depends_on=[module_create, module_create_bin, module_create_bin2],
runs_after=[module_create_different_tenant])
+ def module_create_full_access(self):
+ """Check that create by admin with full access works."""
+ self.test_runner.run_module_create_full_access()
+
+ @test(depends_on=[module_create_all_tenant],
+ runs_after=[module_create_full_access])
+ def module_full_access_toggle(self):
+ """Check that toggling full access works."""
+ self.test_runner.run_module_full_access_toggle()
+
+ @test(depends_on=[module_create_all_tenant],
+ runs_after=[module_full_access_toggle])
def module_list_again(self):
"""Check that list modules skips invisible modules."""
self.test_runner.run_module_list_again()
@@ -236,60 +275,66 @@ class ModuleCreateGroup(TestGroup):
@test(depends_on=[module_update],
runs_after=[module_update_invisible_toggle])
+ def module_update_priority_toggle(self):
+ """Check that update module works for priority toggle."""
+ self.test_runner.run_module_update_priority_toggle()
+
+ @test(depends_on=[module_update],
+ runs_after=[module_update_priority_toggle])
def module_update_unauth(self):
"""Ensure update module for unauth user fails."""
self.test_runner.run_module_update_unauth()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_auto(self):
"""Ensure update module to auto_apply for non-admin fails."""
self.test_runner.run_module_update_non_admin_auto()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_auto_off(self):
"""Ensure update module to auto_apply off for non-admin fails."""
self.test_runner.run_module_update_non_admin_auto_off()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_auto_any(self):
"""Ensure any update module to auto_apply for non-admin fails."""
self.test_runner.run_module_update_non_admin_auto_any()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_all_tenant(self):
"""Ensure update module to all tenant for non-admin fails."""
self.test_runner.run_module_update_non_admin_all_tenant()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_all_tenant_off(self):
"""Ensure update module to all tenant off for non-admin fails."""
self.test_runner.run_module_update_non_admin_all_tenant_off()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_all_tenant_any(self):
"""Ensure any update module to all tenant for non-admin fails."""
self.test_runner.run_module_update_non_admin_all_tenant_any()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_invisible(self):
"""Ensure update module to invisible for non-admin fails."""
self.test_runner.run_module_update_non_admin_invisible()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_invisible_off(self):
"""Ensure update module to invisible off for non-admin fails."""
self.test_runner.run_module_update_non_admin_invisible_off()
@test(depends_on=[module_update],
- runs_after=[module_update_invisible_toggle])
+ runs_after=[module_update_priority_toggle])
def module_update_non_admin_invisible_any(self):
"""Ensure any update module to invisible for non-admin fails."""
self.test_runner.run_module_update_non_admin_invisible_any()
@@ -325,6 +370,11 @@ class ModuleInstCreateGroup(TestGroup):
"""Check that module-apply works."""
self.test_runner.run_module_apply()
+ @test(runs_after=[module_query_empty])
+ def module_apply_wrong_module(self):
+ """Ensure that module-apply for wrong module fails."""
+ self.test_runner.run_module_apply_wrong_module()
+
@test(depends_on=[module_apply])
def module_list_instance_after_apply(self):
"""Check that the instance has one module associated."""
@@ -356,6 +406,11 @@ class ModuleInstCreateGroup(TestGroup):
"""Check that creating an instance with modules works."""
self.test_runner.run_create_inst_with_mods()
+ @test(runs_after=[module_query_empty])
+ def create_inst_with_wrong_module(self):
+ """Ensure that creating an inst with wrong ds mod fails."""
+ self.test_runner.run_create_inst_with_wrong_module()
+
@test(depends_on=[module_apply])
def module_delete_applied(self):
"""Ensure that deleting an applied module fails."""
diff --git a/trove/tests/scenario/helpers/test_helper.py b/trove/tests/scenario/helpers/test_helper.py
index 0deeb9ca..dc4cfcd7 100644
--- a/trove/tests/scenario/helpers/test_helper.py
+++ b/trove/tests/scenario/helpers/test_helper.py
@@ -44,11 +44,13 @@ class DataType(Enum):
tiny2 = 4
# a third tiny dataset (also for replication propagation)
tiny3 = 5
+ # a forth tiny dataset (for cluster propagation)
+ tiny4 = 6
# small amount of data (this can be added to each instance
# after creation, for example).
- small = 6
+ small = 7
# large data, enough to make creating a backup take 20s or more.
- large = 7
+ large = 8
class TestHelper(object):
@@ -130,6 +132,9 @@ class TestHelper(object):
DataType.tiny3.name: {
self.DATA_START: 3000,
self.DATA_SIZE: 100},
+ DataType.tiny4.name: {
+ self.DATA_START: 4000,
+ self.DATA_SIZE: 100},
DataType.small.name: {
self.DATA_START: 10000,
self.DATA_SIZE: 1000},
@@ -216,54 +221,55 @@ class TestHelper(object):
Since this method may be called multiple times, the
'add_actual_data' function should be idempotent.
"""
- self._perform_data_action(self.FN_ADD, data_type.name, host,
- *args, **kwargs)
+ self._perform_data_action(self.FN_ADD, data_type.name,
+ host, *args, **kwargs)
def remove_data(self, data_type, host, *args, **kwargs):
"""Removes all data associated with 'data_type'. See
instructions for 'add_data' for implementation guidance.
"""
- self._perform_data_action(self.FN_REMOVE, data_type.name, host,
- *args, **kwargs)
+ self._perform_data_action(self.FN_REMOVE, data_type.name,
+ host, *args, **kwargs)
def verify_data(self, data_type, host, *args, **kwargs):
"""Verify that the data of type 'data_type' exists in the
datastore. This can be done by testing edge cases, and possibly
some random elements within the set. See
instructions for 'add_data' for implementation guidance.
- By default, the verification is attempted 10 times, sleeping for 3
+ """
+ self._perform_data_action(self.FN_VERIFY, data_type.name,
+ host, *args, **kwargs)
+
+ def _perform_data_action(self, fn_type, fn_name, host,
+ *args, **kwargs):
+ """By default, the action is attempted 10 times, sleeping for 3
seconds between each attempt. This can be controlled by the
retry_count and retry_sleep kwarg values.
"""
retry_count = kwargs.pop('retry_count', 10) or 0
retry_sleep = kwargs.pop('retry_sleep', 3) or 0
+
+ fns = self._data_fns[fn_type]
+ data_fn_name = self.data_fn_pattern % (fn_type, fn_name)
attempts = -1
while True:
attempts += 1
try:
- self._perform_data_action(self.FN_VERIFY, data_type.name, host,
- *args, **kwargs)
+ fns[data_fn_name](self, host, *args, **kwargs)
break
+ except SkipTest:
+ raise
except Exception as ex:
- self.report.log("Attempt %d to verify data type %s failed\n%s"
- % (attempts, data_type.name, ex))
+ self.report.log("Attempt %d to %s data type %s failed\n%s"
+ % (attempts, fn_type, fn_name, ex))
if attempts > retry_count:
- raise
+ raise RuntimeError("Error calling %s from class %s - %s" %
+ (data_fn_name, self.__class__.__name__,
+ ex))
self.report.log("Trying again (after %d second sleep)" %
retry_sleep)
sleep(retry_sleep)
- def _perform_data_action(self, fn_type, fn_name, host, *args, **kwargs):
- fns = self._data_fns[fn_type]
- data_fn_name = self.data_fn_pattern % (fn_type, fn_name)
- try:
- fns[data_fn_name](self, host, *args, **kwargs)
- except SkipTest:
- raise
- except Exception as ex:
- raise RuntimeError("Error calling %s from class %s - %s" %
- (data_fn_name, self.__class__.__name__, ex))
-
def _build_data_fns(self):
"""Build the base data functions specified by FN_TYPE_*
for each of the types defined in the DataType class. For example,
@@ -481,9 +487,24 @@ class TestHelper(object):
"""
return False
- ##############
+ ################
# Module related
- ##############
+ ################
def get_valid_module_type(self):
"""Return a valid module type."""
return "Ping"
+
+ #################
+ # Cluster related
+ #################
+ def get_cluster_types(self):
+ """Returns a list of cluster type lists to use when creating instances.
+ The list should be the same size as the number of cluster instances
+ that will be created. If not specified, no types are sent to
+ cluster-create. Cluster grow uses the first type in the list for the
+ first instance, and doesn't use anything for the second instance
+ (i.e. doesn't pass in anything for 'type').
+ An example for this method would be:
+ return [['data', 'other_type'], ['third_type']]
+ """
+ return None
diff --git a/trove/tests/scenario/runners/__init__.py b/trove/tests/scenario/runners/__init__.py
index ecddc075..9cd3b4e2 100644
--- a/trove/tests/scenario/runners/__init__.py
+++ b/trove/tests/scenario/runners/__init__.py
@@ -1,3 +1,5 @@
BUG_EJECT_VALID_MASTER = 1622014
BUG_WRONG_API_VALIDATION = 1498573
BUG_STOP_DB_IN_CLUSTER = 1645096
+BUG_UNAUTH_TEST_WRONG = 1653614
+BUG_FORCE_DELETE_FAILS = 1656422
diff --git a/trove/tests/scenario/runners/backup_runners.py b/trove/tests/scenario/runners/backup_runners.py
index 5e660aab..059c357f 100644
--- a/trove/tests/scenario/runners/backup_runners.py
+++ b/trove/tests/scenario/runners/backup_runners.py
@@ -313,6 +313,7 @@ class BackupRunner(TestRunner):
self.assert_client_code(client, expected_http_code)
self.assert_equal('BUILD', result.status,
'Unexpected instance status')
+ self.register_debug_inst_ids(result.id)
return result.id
def _restore_from_backup(self, client, backup_ref, suffix=''):
diff --git a/trove/tests/scenario/runners/cluster_actions_runners.py b/trove/tests/scenario/runners/cluster_runners.py
index 7af99972..de060487 100644
--- a/trove/tests/scenario/runners/cluster_actions_runners.py
+++ b/trove/tests/scenario/runners/cluster_runners.py
@@ -29,7 +29,7 @@ from trove.tests.util.check import TypeCheck
from troveclient.compat import exceptions
-class ClusterActionsRunner(TestRunner):
+class ClusterRunner(TestRunner):
USE_CLUSTER_ID_FLAG = 'TESTS_USE_CLUSTER_ID'
DO_NOT_DELETE_CLUSTER_FLAG = 'TESTS_DO_NOT_DELETE_CLUSTER'
@@ -37,7 +37,7 @@ class ClusterActionsRunner(TestRunner):
EXTRA_INSTANCE_NAME = "named_instance"
def __init__(self):
- super(ClusterActionsRunner, self).__init__()
+ super(ClusterRunner, self).__init__()
self.cluster_name = 'test_cluster'
self.cluster_id = 0
@@ -46,6 +46,9 @@ class ClusterActionsRunner(TestRunner):
self.srv_grp_id = None
self.current_root_creds = None
self.locality = 'affinity'
+ self.initial_instance_count = None
+ self.cluster_instances = None
+ self.cluster_removed_instances = None
@property
def is_using_existing_cluster(self):
@@ -60,7 +63,6 @@ class ClusterActionsRunner(TestRunner):
return 2
def run_cluster_create(self, num_nodes=None, expected_task_name='BUILDING',
- expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
self.cluster_count_before_create = len(
self.auth_client.clusters.list())
@@ -69,59 +71,72 @@ class ClusterActionsRunner(TestRunner):
instance_flavor = self.get_instance_flavor()
- instances_def = [
+ instance_defs = [
self.build_flavor(
flavor_id=self.get_flavor_href(instance_flavor),
- volume_size=self.instance_info.volume['size'])] * num_nodes
+ volume_size=self.instance_info.volume['size'])
+ for count in range(0, num_nodes)]
+ types = self.test_helper.get_cluster_types()
+ for index, instance_def in enumerate(instance_defs):
+ instance_def['nics'] = self.instance_info.nics
+ if types and index < len(types):
+ instance_def['type'] = types[index]
self.cluster_id = self.assert_cluster_create(
- self.cluster_name, instances_def, self.locality,
- expected_task_name, expected_instance_states, expected_http_code)
+ self.cluster_name, instance_defs, self.locality,
+ expected_task_name, expected_http_code)
def assert_cluster_create(
self, cluster_name, instances_def, locality, expected_task_name,
- expected_instance_states, expected_http_code):
+ expected_http_code):
+
self.report.log("Testing cluster create: %s" % cluster_name)
+ client = self.auth_client
cluster = self.get_existing_cluster()
if cluster:
self.report.log("Using an existing cluster: %s" % cluster.id)
- cluster_instances = self._get_cluster_instances(cluster.id)
- self.assert_all_instance_states(
- cluster_instances, expected_instance_states[-1:])
else:
- cluster = self.auth_client.clusters.create(
+ cluster = client.clusters.create(
cluster_name, self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
instances=instances_def, locality=locality)
+ self.assert_client_code(client, expected_http_code)
self._assert_cluster_values(cluster, expected_task_name)
- # Don't give an expected task here or it will do a 'get' on
- # the cluster. We tested the cluster values above.
- self._assert_cluster_action(cluster.id, None,
- expected_http_code)
- cluster_instances = self._get_cluster_instances(cluster.id)
- self.assert_all_instance_states(
- cluster_instances, expected_instance_states)
- # Create the helper user/database on the first node.
- # The cluster should handle the replication itself.
+ for instance in cluster.instances:
+ self.register_debug_inst_ids(instance['id'])
+ return cluster.id
+
+ def run_cluster_create_wait(self,
+ expected_instance_states=['BUILD', 'ACTIVE']):
+
+ self.assert_cluster_create_wait(
+ self.cluster_id, expected_instance_states=expected_instance_states)
+
+ def assert_cluster_create_wait(
+ self, cluster_id, expected_instance_states):
+ client = self.auth_client
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
+ self.assert_all_instance_states(
+ cluster_instances, expected_instance_states)
+ # Create the helper user/database on the first node.
+ # The cluster should handle the replication itself.
+ if not self.get_existing_cluster():
self.create_test_helper_on_instance(cluster_instances[0])
- # make sure the server_group was created
- self.cluster_inst_ids = [inst.id for inst in cluster_instances]
- for id in self.cluster_inst_ids:
- srv_grp_id = self.assert_server_group_exists(id)
- if self.srv_grp_id and self.srv_grp_id != srv_grp_id:
- self.fail("Found multiple server groups for cluster")
- self.srv_grp_id = srv_grp_id
-
- cluster_id = cluster.id
# Although all instances have already acquired the expected state,
# we still need to poll for the final cluster task, because
# it may take up to the periodic task interval until the task name
# gets updated in the Trove database.
- self._assert_cluster_states(cluster_id, ['NONE'])
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
- return cluster_id
+ # make sure the server_group was created
+ self.cluster_inst_ids = [inst.id for inst in cluster_instances]
+ for id in self.cluster_inst_ids:
+ srv_grp_id = self.assert_server_group_exists(id)
+ if self.srv_grp_id and self.srv_grp_id != srv_grp_id:
+ self.fail("Found multiple server groups for cluster")
+ self.srv_grp_id = srv_grp_id
def get_existing_cluster(self):
if self.is_using_existing_cluster:
@@ -134,10 +149,10 @@ class ClusterActionsRunner(TestRunner):
self.cluster_count_before_create + 1,
expected_http_code)
- def assert_cluster_list(self, expected_count,
- expected_http_code):
- count = len(self.auth_client.clusters.list())
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ def assert_cluster_list(self, expected_count, expected_http_code):
+ client = self.auth_client
+ count = len(client.clusters.list())
+ self.assert_client_code(client, expected_http_code)
self.assert_equal(expected_count, count, "Unexpected cluster count")
def run_cluster_show(self, expected_http_code=200,
@@ -147,19 +162,23 @@ class ClusterActionsRunner(TestRunner):
def assert_cluster_show(self, cluster_id, expected_task_name,
expected_http_code):
- self._assert_cluster_response(cluster_id, expected_task_name)
+ self._assert_cluster_response(self.auth_client,
+ cluster_id, expected_task_name)
def run_cluster_root_enable(self, expected_task_name=None,
expected_http_code=200):
root_credentials = self.test_helper.get_helper_credentials_root()
- self.current_root_creds = self.auth_client.root.create_cluster_root(
+ if not root_credentials or not root_credentials.get('name'):
+ raise SkipTest("No root credentials provided.")
+ client = self.auth_client
+ self.current_root_creds = client.root.create_cluster_root(
self.cluster_id, root_credentials['password'])
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
self.assert_equal(root_credentials['name'],
self.current_root_creds[0])
self.assert_equal(root_credentials['password'],
self.current_root_creds[1])
- self._assert_cluster_action(self.cluster_id, expected_task_name,
- expected_http_code)
def run_verify_cluster_root_enable(self):
if not self.current_root_creds:
@@ -181,9 +200,6 @@ class ClusterActionsRunner(TestRunner):
def run_add_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_add_cluster_data(data_type, self.cluster_id)
- def run_add_extra_cluster_data(self, data_type=DataType.tiny2):
- self.assert_add_cluster_data(data_type, self.cluster_id)
-
def assert_add_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
self.test_helper.add_data(data_type, self.extract_ipv4s(cluster.ip)[0])
@@ -191,9 +207,6 @@ class ClusterActionsRunner(TestRunner):
def run_verify_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_verify_cluster_data(data_type, self.cluster_id)
- def run_verify_extra_cluster_data(self, data_type=DataType.tiny2):
- self.assert_verify_cluster_data(data_type, self.cluster_id)
-
def assert_verify_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
for ipv4 in self.extract_ipv4s(cluster.ip):
@@ -203,9 +216,6 @@ class ClusterActionsRunner(TestRunner):
def run_remove_initial_cluster_data(self, data_type=DataType.tiny):
self.assert_remove_cluster_data(data_type, self.cluster_id)
- def run_remove_extra_cluster_data(self, data_type=DataType.tiny2):
- self.assert_remove_cluster_data(data_type, self.cluster_id)
-
def assert_remove_cluster_data(self, data_type, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
self.test_helper.remove_data(
@@ -221,6 +231,10 @@ class ClusterActionsRunner(TestRunner):
self._build_instance_def(flavor_href,
self.instance_info.volume['size'],
self.EXTRA_INSTANCE_NAME)]
+ types = self.test_helper.get_cluster_types()
+ if types and types[0]:
+ added_instance_defs[0]['type'] = types[0]
+
self.assert_cluster_grow(
self.cluster_id, added_instance_defs, expected_task_name,
expected_http_code)
@@ -230,115 +244,200 @@ class ClusterActionsRunner(TestRunner):
flavor_id=flavor_id, volume_size=volume_size)
if name:
instance_def.update({'name': name})
+ instance_def.update({'nics': self.instance_info.nics})
return instance_def
def assert_cluster_grow(self, cluster_id, added_instance_defs,
expected_task_name, expected_http_code):
- cluster = self.auth_client.clusters.get(cluster_id)
+ client = self.auth_client
+ cluster = client.clusters.get(cluster_id)
initial_instance_count = len(cluster.instances)
- cluster = self.auth_client.clusters.grow(cluster_id,
- added_instance_defs)
- self._assert_cluster_action(cluster_id, expected_task_name,
- expected_http_code)
+ cluster = client.clusters.grow(cluster_id, added_instance_defs)
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
self.assert_equal(len(added_instance_defs),
len(cluster.instances) - initial_instance_count,
"Unexpected number of added nodes.")
- cluster_instances = self._get_cluster_instances(cluster_id)
+ def run_cluster_grow_wait(self):
+ self.assert_cluster_grow_wait(self.cluster_id)
+
+ def assert_cluster_grow_wait(self, cluster_id):
+ client = self.auth_client
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
- self._assert_cluster_states(cluster_id, ['NONE'])
- self._assert_cluster_response(cluster_id, 'NONE')
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
+ self._assert_cluster_response(client, cluster_id, 'NONE')
+
+ def run_add_grow_cluster_data(self, data_type=DataType.tiny2):
+ self.assert_add_cluster_data(data_type, self.cluster_id)
+
+ def run_verify_grow_cluster_data(self, data_type=DataType.tiny2):
+ self.assert_verify_cluster_data(data_type, self.cluster_id)
+
+ def run_remove_grow_cluster_data(self, data_type=DataType.tiny2):
+ self.assert_remove_cluster_data(data_type, self.cluster_id)
+
+ def run_cluster_upgrade(self, expected_task_name='UPGRADING_CLUSTER',
+ expected_http_code=202):
+ self.assert_cluster_upgrade(self.cluster_id,
+ expected_task_name, expected_http_code)
+
+ def assert_cluster_upgrade(self, cluster_id,
+ expected_task_name, expected_http_code):
+ client = self.auth_client
+ cluster = client.clusters.get(cluster_id)
+ self.initial_instance_count = len(cluster.instances)
+
+ client.clusters.upgrade(
+ cluster_id, self.instance_info.dbaas_datastore_version)
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
+
+ def run_cluster_upgrade_wait(self):
+ self.assert_cluster_upgrade_wait(
+ self.cluster_id, expected_last_instance_state='ACTIVE')
+
+ def assert_cluster_upgrade_wait(self, cluster_id,
+ expected_last_instance_state):
+ client = self.auth_client
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
+ self.assert_equal(
+ self.initial_instance_count,
+ len(cluster_instances),
+ "Unexpected number of instances after upgrade.")
+ self.assert_all_instance_states(cluster_instances,
+ [expected_last_instance_state])
+ self._assert_cluster_response(client, cluster_id, 'NONE')
+
+ def run_add_upgrade_cluster_data(self, data_type=DataType.tiny3):
+ self.assert_add_cluster_data(data_type, self.cluster_id)
+
+ def run_verify_upgrade_cluster_data(self, data_type=DataType.tiny3):
+ self.assert_verify_cluster_data(data_type, self.cluster_id)
+
+ def run_remove_upgrade_cluster_data(self, data_type=DataType.tiny3):
+ self.assert_remove_cluster_data(data_type, self.cluster_id)
- def run_cluster_shrink(
- self, expected_task_name=None, expected_http_code=202):
- self.assert_cluster_shrink(self.cluster_id, [self.EXTRA_INSTANCE_NAME],
+ def run_cluster_shrink(self, expected_task_name='SHRINKING_CLUSTER',
+ expected_http_code=202):
+ self.assert_cluster_shrink(self.auth_client,
+ self.cluster_id, [self.EXTRA_INSTANCE_NAME],
expected_task_name, expected_http_code)
- def assert_cluster_shrink(self, cluster_id, removed_instance_names,
+ def assert_cluster_shrink(self, client, cluster_id, removed_instance_names,
expected_task_name, expected_http_code):
- cluster = self.auth_client.clusters.get(cluster_id)
- initial_instance_count = len(cluster.instances)
+ cluster = client.clusters.get(cluster_id)
+ self.initial_instance_count = len(cluster.instances)
- removed_instances = self._find_cluster_instances_by_name(
- cluster, removed_instance_names)
+ self.cluster_removed_instances = (
+ self._find_cluster_instances_by_name(
+ cluster, removed_instance_names))
- cluster = self.auth_client.clusters.shrink(
- cluster_id, [{'id': instance['id']}
- for instance in removed_instances])
+ client.clusters.shrink(
+ cluster_id, [{'id': instance.id}
+ for instance in self.cluster_removed_instances])
- self._assert_cluster_action(cluster_id, expected_task_name,
- expected_http_code)
+ self.assert_client_code(client, expected_http_code)
+ self._assert_cluster_response(client, cluster_id, expected_task_name)
- self._assert_cluster_states(cluster_id, ['NONE'])
- cluster = self.auth_client.clusters.get(cluster_id)
+ def _find_cluster_instances_by_name(self, cluster, instance_names):
+ return [self.auth_client.instances.get(instance['id'])
+ for instance in cluster.instances
+ if instance['name'] in instance_names]
+
+ def run_cluster_shrink_wait(self):
+ self.assert_cluster_shrink_wait(
+ self.cluster_id, expected_last_instance_state='SHUTDOWN')
+
+ def assert_cluster_shrink_wait(self, cluster_id,
+ expected_last_instance_state):
+ client = self.auth_client
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
+ cluster = client.clusters.get(cluster_id)
self.assert_equal(
- len(removed_instance_names),
- initial_instance_count - len(cluster.instances),
+ len(self.cluster_removed_instances),
+ self.initial_instance_count - len(cluster.instances),
"Unexpected number of removed nodes.")
- cluster_instances = self._get_cluster_instances(cluster_id)
+ cluster_instances = self._get_cluster_instances(client, cluster_id)
self.assert_all_instance_states(cluster_instances, ['ACTIVE'])
+ self.assert_all_gone(self.cluster_removed_instances,
+ expected_last_instance_state)
+ self._assert_cluster_response(client, cluster_id, 'NONE')
- self._assert_cluster_response(cluster_id, 'NONE')
+ def run_add_shrink_cluster_data(self, data_type=DataType.tiny4):
+ self.assert_add_cluster_data(data_type, self.cluster_id)
- def _find_cluster_instances_by_name(self, cluster, instance_names):
- return [instance for instance in cluster.instances
- if instance['name'] in instance_names]
+ def run_verify_shrink_cluster_data(self, data_type=DataType.tiny4):
+ self.assert_verify_cluster_data(data_type, self.cluster_id)
+
+ def run_remove_shrink_cluster_data(self, data_type=DataType.tiny4):
+ self.assert_remove_cluster_data(data_type, self.cluster_id)
def run_cluster_delete(
- self, expected_task_name='DELETING',
- expected_last_instance_state='SHUTDOWN', expected_http_code=202):
+ self, expected_task_name='DELETING', expected_http_code=202):
if self.has_do_not_delete_cluster:
self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was "
"specified, skipping delete...")
raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.")
self.assert_cluster_delete(
- self.cluster_id, expected_task_name, expected_last_instance_state,
- expected_http_code)
+ self.cluster_id, expected_http_code)
- def assert_cluster_delete(
- self, cluster_id, expected_task_name, expected_last_instance_state,
- expected_http_code):
+ def assert_cluster_delete(self, cluster_id, expected_http_code):
self.report.log("Testing cluster delete: %s" % cluster_id)
- cluster_instances = self._get_cluster_instances(cluster_id)
+ client = self.auth_client
+ self.cluster_instances = self._get_cluster_instances(client,
+ cluster_id)
+
+ client.clusters.delete(cluster_id)
+ self.assert_client_code(client, expected_http_code)
+
+ def _get_cluster_instances(self, client, cluster_id):
+ cluster = client.clusters.get(cluster_id)
+ return [client.instances.get(instance['id'])
+ for instance in cluster.instances]
- self.auth_client.clusters.delete(cluster_id)
+ def run_cluster_delete_wait(
+ self, expected_task_name='DELETING',
+ expected_last_instance_state='SHUTDOWN'):
+ if self.has_do_not_delete_cluster:
+ self.report.log("TESTS_DO_NOT_DELETE_CLUSTER=True was "
+ "specified, skipping delete wait...")
+ raise SkipTest("TESTS_DO_NOT_DELETE_CLUSTER was specified.")
+
+ self.assert_cluster_delete_wait(
+ self.cluster_id, expected_task_name, expected_last_instance_state)
+
+ def assert_cluster_delete_wait(
+ self, cluster_id, expected_task_name,
+ expected_last_instance_state):
+ client = self.auth_client
# Since the server_group is removed right at the beginning of the
# cluster delete process we can't check for locality anymore.
- self._assert_cluster_action(cluster_id, expected_task_name,
- expected_http_code, check_locality=False)
+ self._assert_cluster_response(client, cluster_id, expected_task_name,
+ check_locality=False)
- self.assert_all_gone(cluster_instances, expected_last_instance_state)
- self._assert_cluster_gone(cluster_id)
+ self.assert_all_gone(self.cluster_instances,
+ expected_last_instance_state)
+ self._assert_cluster_gone(client, cluster_id)
# make sure the server group is gone too
self.assert_server_group_gone(self.srv_grp_id)
- def _get_cluster_instances(self, cluster_id):
- cluster = self.auth_client.clusters.get(cluster_id)
- return [self.auth_client.instances.get(instance['id'])
- for instance in cluster.instances]
-
- def _assert_cluster_action(
- self, cluster_id, expected_task_name, expected_http_code,
- check_locality=True):
- if expected_http_code is not None:
- self.assert_client_code(expected_http_code,
- client=self.auth_client)
- if expected_task_name:
- self._assert_cluster_response(cluster_id, expected_task_name,
- check_locality=check_locality)
-
- def _assert_cluster_states(self, cluster_id, expected_states,
+ def _assert_cluster_states(self, client, cluster_id, expected_states,
fast_fail_status=None):
for status in expected_states:
start_time = timer.time()
try:
- poll_until(lambda: self._has_task(
- cluster_id, status, fast_fail_status=fast_fail_status),
+ poll_until(
+ lambda: self._has_task(
+ client, cluster_id, status,
+ fast_fail_status=fast_fail_status),
sleep_time=self.def_sleep_time,
time_out=self.def_timeout)
self.report.log("Cluster has gone '%s' in %s." %
@@ -351,8 +450,8 @@ class ClusterActionsRunner(TestRunner):
return True
- def _has_task(self, cluster_id, task, fast_fail_status=None):
- cluster = self.auth_client.clusters.get(cluster_id)
+ def _has_task(self, client, cluster_id, task, fast_fail_status=None):
+ cluster = client.clusters.get(cluster_id)
task_name = cluster.task['name']
self.report.log("Waiting for cluster '%s' to become '%s': %s"
% (cluster_id, task, task_name))
@@ -361,10 +460,9 @@ class ClusterActionsRunner(TestRunner):
% (cluster_id, task))
return task_name == task
- def _assert_cluster_response(self, cluster_id, expected_task_name,
- expected_http_code=200, check_locality=True):
- cluster = self.auth_client.clusters.get(cluster_id)
- self.assert_client_code(expected_http_code, client=self.auth_client)
+ def _assert_cluster_response(self, client, cluster_id, expected_task_name,
+ check_locality=True):
+ cluster = client.clusters.get(cluster_id)
self._assert_cluster_values(cluster, expected_task_name,
check_locality=check_locality)
@@ -391,63 +489,63 @@ class ClusterActionsRunner(TestRunner):
self.assert_equal(self.locality, cluster.locality,
"Unexpected cluster locality")
- def _assert_cluster_gone(self, cluster_id):
+ def _assert_cluster_gone(self, client, cluster_id):
t0 = timer.time()
try:
# This will poll until the cluster goes away.
- self._assert_cluster_states(cluster_id, ['NONE'])
+ self._assert_cluster_states(client, cluster_id, ['NONE'])
self.fail(
"Cluster '%s' still existed after %s seconds."
% (cluster_id, self._time_since(t0)))
except exceptions.NotFound:
- self.assert_client_code(404, client=self.auth_client)
+ self.assert_client_code(client, 404)
-class CassandraClusterActionsRunner(ClusterActionsRunner):
+class CassandraClusterRunner(ClusterRunner):
def run_cluster_root_enable(self):
raise SkipTest("Operation is currently not supported.")
-class MariadbClusterActionsRunner(ClusterActionsRunner):
+class MariadbClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
return self.get_datastore_config_property('min_cluster_member_count')
- def run_cluster_root_enable(self):
- raise SkipTest("Operation is currently not supported.")
-
-class PxcClusterActionsRunner(ClusterActionsRunner):
+class MongodbClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
- return self.get_datastore_config_property('min_cluster_member_count')
+ return 3
def run_cluster_delete(self, expected_task_name='NONE',
expected_http_code=202):
raise SkipKnownBug(runners.BUG_STOP_DB_IN_CLUSTER)
-class VerticaClusterActionsRunner(ClusterActionsRunner):
+class PxcClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
- return self.get_datastore_config_property('cluster_member_count')
+ return self.get_datastore_config_property('min_cluster_member_count')
-class RedisClusterActionsRunner(ClusterActionsRunner):
+class RedisClusterRunner(ClusterRunner):
- def run_cluster_root_enable(self):
- raise SkipTest("Operation is currently not supported.")
+ # Since Redis runs all the shrink code in the API server, the call
+ # will not return until the task name has been set back to 'NONE' so
+ # we can't check it.
+ def run_cluster_shrink(self, expected_task_name='NONE',
+ expected_http_code=202):
+ return super(RedisClusterRunner, self).run_cluster_shrink(
+ expected_task_name=expected_task_name,
+ expected_http_code=expected_http_code)
-class MongodbClusterActionsRunner(ClusterActionsRunner):
-
- def run_cluster_root_enable(self):
- raise SkipTest("Operation is currently not supported.")
+class VerticaClusterRunner(ClusterRunner):
@property
def min_cluster_node_count(self):
- return 3
+ return self.get_datastore_config_property('cluster_member_count')
diff --git a/trove/tests/scenario/runners/configuration_runners.py b/trove/tests/scenario/runners/configuration_runners.py
index aeed459f..e1e17ae3 100644
--- a/trove/tests/scenario/runners/configuration_runners.py
+++ b/trove/tests/scenario/runners/configuration_runners.py
@@ -533,6 +533,7 @@ class ConfigurationRunner(TestRunner):
configuration=config_id)
self.assert_client_code(client, 200)
self.assert_equal("BUILD", result.status, 'Unexpected inst status')
+ self.register_debug_inst_ids(result.id)
return result.id
def run_wait_for_conf_instance(
diff --git a/trove/tests/scenario/runners/guest_log_runners.py b/trove/tests/scenario/runners/guest_log_runners.py
index 4e905b85..5eb96f24 100644
--- a/trove/tests/scenario/runners/guest_log_runners.py
+++ b/trove/tests/scenario/runners/guest_log_runners.py
@@ -22,6 +22,8 @@ from trove.guestagent.common import operating_system
from trove.guestagent import guest_log
from trove.tests.config import CONFIG
from trove.tests.scenario.helpers.test_helper import DataType
+from trove.tests.scenario import runners
+from trove.tests.scenario.runners.test_runners import SkipKnownBug
from trove.tests.scenario.runners.test_runners import TestRunner
@@ -71,6 +73,7 @@ class GuestLogRunner(TestRunner):
log_list = list(client.instances.log_list(self.instance_info.id))
log_names = list(ll.name for ll in log_list)
self.assert_list_elements_equal(expected_list, log_names)
+ self.register_debug_inst_ids(self.instance_info.id)
def run_test_admin_log_list(self):
self.assert_log_list(self.admin_client,
@@ -78,8 +81,9 @@ class GuestLogRunner(TestRunner):
def run_test_log_show(self):
log_pending = self._set_zero_or_none()
+ log_name = self._get_exposed_user_log_name()
self.assert_log_show(self.auth_client,
- self._get_exposed_user_log_name(),
+ log_name,
expected_published=0,
expected_pending=log_pending)
@@ -294,54 +298,51 @@ class GuestLogRunner(TestRunner):
def run_test_log_enable_sys(self,
expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_enable_fails(
self.admin_client,
expected_exception, expected_http_code,
- self._get_unexposed_sys_log_name())
+ log_name)
def assert_log_enable_fails(self, client,
expected_exception, expected_http_code,
log_name):
- self.assert_raises(expected_exception, None,
+ self.assert_raises(expected_exception, expected_http_code,
client, client.instances.log_enable,
self.instance_info.id, log_name)
- # we may not be using the main client, so check explicitly here
- self.assert_client_code(client, expected_http_code)
def run_test_log_disable_sys(self,
expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_disable_fails(
self.admin_client,
expected_exception, expected_http_code,
- self._get_unexposed_sys_log_name())
+ log_name)
def assert_log_disable_fails(self, client,
expected_exception, expected_http_code,
log_name, discard=None):
- self.assert_raises(expected_exception, None,
+ self.assert_raises(expected_exception, expected_http_code,
client, client.instances.log_disable,
self.instance_info.id, log_name,
discard=discard)
- # we may not be using the main client, so check explicitly here
- self.assert_client_code(client, expected_http_code)
def run_test_log_show_unauth_user(self,
expected_exception=exceptions.NotFound,
expected_http_code=404):
+ log_name = self._get_exposed_user_log_name()
self.assert_log_show_fails(
self.unauth_client,
expected_exception, expected_http_code,
- self._get_exposed_user_log_name())
+ log_name)
def assert_log_show_fails(self, client,
expected_exception, expected_http_code,
log_name):
- self.assert_raises(expected_exception, None,
+ self.assert_raises(expected_exception, expected_http_code,
client, client.instances.log_show,
self.instance_info.id, log_name)
- # we may not be using the main client, so check explicitly here
- self.assert_client_code(client, expected_http_code)
def run_test_log_list_unauth_user(self,
expected_exception=exceptions.NotFound,
@@ -351,73 +352,85 @@ class GuestLogRunner(TestRunner):
client, client.instances.log_list,
self.instance_info.id)
- def run_test_log_generator_unauth_user(self):
+ def run_test_log_generator_unauth_user(
+ self, expected_exception=exceptions.NotFound,
+ expected_http_code=404):
+ log_name = self._get_exposed_user_log_name()
self.assert_log_generator_unauth_user(
- self.unauth_client, self._get_exposed_user_log_name())
-
- def assert_log_generator_unauth_user(self, client, log_name, publish=None):
- try:
- client.instances.log_generator(
- self.instance_info.id, log_name, publish=publish)
- raise("Client allowed unauthorized access to log_generator")
- except Exception:
- pass
-
- def run_test_log_generator_publish_unauth_user(self):
+ self.unauth_client, log_name,
+ expected_exception, expected_http_code)
+
+ def assert_log_generator_unauth_user(self, client, log_name,
+ expected_exception,
+ expected_http_code,
+ publish=None):
+ raise SkipKnownBug(runners.BUG_UNAUTH_TEST_WRONG)
+ # self.assert_raises(expected_exception, expected_http_code,
+ # client, client.instances.log_generator,
+ # self.instance_info.id, log_name, publish=publish)
+
+ def run_test_log_generator_publish_unauth_user(
+ self, expected_exception=exceptions.NotFound,
+ expected_http_code=404):
+ log_name = self._get_exposed_user_log_name()
self.assert_log_generator_unauth_user(
- self.unauth_client, self._get_exposed_user_log_name(),
+ self.unauth_client, log_name,
+ expected_exception, expected_http_code,
publish=True)
def run_test_log_show_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_show_fails(
self.auth_client,
expected_exception, expected_http_code,
- self._get_unexposed_sys_log_name())
+ log_name)
def run_test_log_enable_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_enable_fails(
self.auth_client,
expected_exception, expected_http_code,
- self._get_unexposed_sys_log_name())
+ log_name)
def run_test_log_disable_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_disable_fails(
self.auth_client,
expected_exception, expected_http_code,
- self._get_unexposed_sys_log_name())
+ log_name)
def run_test_log_publish_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_publish_fails(
self.auth_client,
expected_exception, expected_http_code,
- self._get_unexposed_sys_log_name())
+ log_name)
def assert_log_publish_fails(self, client,
expected_exception, expected_http_code,
log_name,
disable=None, discard=None):
- self.assert_raises(expected_exception, None,
+ self.assert_raises(expected_exception, expected_http_code,
client, client.instances.log_publish,
self.instance_info.id, log_name,
disable=disable, discard=discard)
- # we may not be using the main client, so check explicitly here
- self.assert_client_code(client, expected_http_code)
def run_test_log_discard_unexposed_user(
self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_discard_fails(
self.auth_client,
expected_exception, expected_http_code,
- self._get_unexposed_sys_log_name())
+ log_name)
def assert_log_discard_fails(self, client,
expected_exception, expected_http_code,
@@ -615,8 +628,9 @@ class GuestLogRunner(TestRunner):
expected_published=0, expected_pending=1)
def run_test_log_show_after_stop_details(self):
+ log_name = self._get_exposed_user_log_name()
self.stopped_log_details = self.auth_client.instances.log_show(
- self.instance_info.id, self._get_exposed_user_log_name())
+ self.instance_info.id, log_name)
self.assert_is_not_none(self.stopped_log_details)
def run_test_add_data_again_after_stop(self):
@@ -627,8 +641,9 @@ class GuestLogRunner(TestRunner):
self.test_helper.verify_data(DataType.micro3, self.get_instance_host())
def run_test_log_show_after_stop(self):
+ log_name = self._get_exposed_user_log_name()
self.assert_log_show(
- self.auth_client, self._get_exposed_user_log_name(),
+ self.auth_client, log_name,
expected_published=self.stopped_log_details.published,
expected_pending=self.stopped_log_details.pending)
@@ -638,9 +653,10 @@ class GuestLogRunner(TestRunner):
if self.test_helper.log_enable_requires_restart():
expected_status = guest_log.LogStatus.Restart_Required.name
+ log_name = self._get_exposed_user_log_name()
self.assert_log_enable(
self.auth_client,
- self._get_exposed_user_log_name(),
+ log_name,
expected_status=expected_status,
expected_published=0, expected_pending=expected_pending)
@@ -665,16 +681,18 @@ class GuestLogRunner(TestRunner):
expected_status = guest_log.LogStatus.Disabled.name
if self.test_helper.log_enable_requires_restart():
expected_status = guest_log.LogStatus.Restart_Required.name
+ log_name = self._get_exposed_user_log_name()
self.assert_log_disable(
self.auth_client,
- self._get_exposed_user_log_name(), discard=True,
+ log_name, discard=True,
expected_status=expected_status,
expected_published=0, expected_pending=1)
def run_test_log_show_sys(self):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_show(
self.admin_client,
- self._get_unexposed_sys_log_name(),
+ log_name,
expected_type=guest_log.LogType.SYS.name,
expected_status=guest_log.LogStatus.Ready.name,
expected_published=0, expected_pending=1)
@@ -699,39 +717,45 @@ class GuestLogRunner(TestRunner):
expected_pending=1)
def run_test_log_generator_sys(self):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_generator(
self.admin_client,
- self._get_unexposed_sys_log_name(),
+ log_name,
lines=4, expected_lines=4)
def run_test_log_generator_publish_sys(self):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_generator(
self.admin_client,
- self._get_unexposed_sys_log_name(), publish=True,
+ log_name, publish=True,
lines=4, expected_lines=4)
def run_test_log_generator_swift_client_sys(self):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_generator(
self.admin_client,
- self._get_unexposed_sys_log_name(), publish=True,
+ log_name, publish=True,
lines=4, expected_lines=4,
swift_client=self.swift_client)
def run_test_log_save_sys(self):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_test_log_save(
self.admin_client,
- self._get_unexposed_sys_log_name())
+ log_name)
def run_test_log_save_publish_sys(self):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_test_log_save(
self.admin_client,
- self._get_unexposed_sys_log_name(),
+ log_name,
publish=True)
def run_test_log_discard_sys(self):
+ log_name = self._get_unexposed_sys_log_name()
self.assert_log_discard(
self.admin_client,
- self._get_unexposed_sys_log_name(),
+ log_name,
expected_type=guest_log.LogType.SYS.name,
expected_status=guest_log.LogStatus.Ready.name,
expected_published=0, expected_pending=1)
@@ -740,7 +764,8 @@ class GuestLogRunner(TestRunner):
class CassandraGuestLogRunner(GuestLogRunner):
def run_test_log_show(self):
+ log_name = self._get_exposed_user_log_name()
self.assert_log_show(self.auth_client,
- self._get_exposed_user_log_name(),
+ log_name,
expected_published=0,
expected_pending=None)
diff --git a/trove/tests/scenario/runners/instance_create_runners.py b/trove/tests/scenario/runners/instance_create_runners.py
index 879197bc..eb4bc259 100644
--- a/trove/tests/scenario/runners/instance_create_runners.py
+++ b/trove/tests/scenario/runners/instance_create_runners.py
@@ -197,6 +197,7 @@ class InstanceCreateRunner(TestRunner):
locality=locality)
self.assert_client_code(client, expected_http_code)
self.assert_instance_action(instance.id, expected_states[0:1])
+ self.register_debug_inst_ids(instance.id)
instance_info.id = instance.id
@@ -256,9 +257,11 @@ class InstanceCreateRunner(TestRunner):
self.report.log("Test helpers are ready.")
def run_add_initialized_instance_data(self):
- self.init_inst_data = DataType.small
- self.init_inst_host = self.get_instance_host(self.init_inst_info.id)
- self.test_helper.add_data(self.init_inst_data, self.init_inst_host)
+ if self.init_inst_info:
+ self.init_inst_data = DataType.small
+ self.init_inst_host = self.get_instance_host(
+ self.init_inst_info.id)
+ self.test_helper.add_data(self.init_inst_data, self.init_inst_host)
def run_validate_initialized_instance(self):
if self.init_inst_info:
diff --git a/trove/tests/scenario/runners/instance_force_delete_runners.py b/trove/tests/scenario/runners/instance_force_delete_runners.py
index 70ebc87c..6b96bdda 100644
--- a/trove/tests/scenario/runners/instance_force_delete_runners.py
+++ b/trove/tests/scenario/runners/instance_force_delete_runners.py
@@ -15,6 +15,8 @@
from proboscis import SkipTest
+from trove.tests.scenario import runners
+from trove.tests.scenario.runners.test_runners import SkipKnownBug
from trove.tests.scenario.runners.test_runners import TestRunner
@@ -52,5 +54,6 @@ class InstanceForceDeleteRunner(TestRunner):
self.assert_client_code(client, expected_http_code)
def run_wait_for_force_delete(self):
- if self.build_inst_id:
- self.assert_all_gone([self.build_inst_id], ['SHUTDOWN'])
+ raise SkipKnownBug(runners.BUG_FORCE_DELETE_FAILS)
+ # if self.build_inst_id:
+ # self.assert_all_gone([self.build_inst_id], ['SHUTDOWN'])
diff --git a/trove/tests/scenario/runners/module_runners.py b/trove/tests/scenario/runners/module_runners.py
index 669a48a1..e3302ee0 100644
--- a/trove/tests/scenario/runners/module_runners.py
+++ b/trove/tests/scenario/runners/module_runners.py
@@ -42,6 +42,28 @@ class ModuleRunner(TestRunner):
self.MODULE_BINARY_CONTENTS = Crypto.Random.new().read(20)
self.MODULE_BINARY_CONTENTS2 = '\x00\xFF\xea\x9c\x11\xfeok\xb1\x8ax'
+ self.module_name_order = [
+ {'suffix': self.MODULE_BINARY_SUFFIX,
+ 'priority': True, 'order': 1},
+ {'suffix': self.MODULE_BINARY_SUFFIX2,
+ 'priority': True, 'order': 2},
+ {'suffix': '_hidden_all_tenant_auto_priority',
+ 'priority': True, 'order': 3},
+ {'suffix': '_hidden', 'priority': True, 'order': 4},
+ {'suffix': '_auto', 'priority': True, 'order': 5},
+ {'suffix': '_live', 'priority': True, 'order': 6},
+ {'suffix': '_priority', 'priority': True, 'order': 7},
+ {'suffix': '_ds', 'priority': False, 'order': 1},
+ {'suffix': '_ds_ver', 'priority': False, 'order': 2},
+ {'suffix': '_all_tenant_ds_ver', 'priority': False, 'order': 3},
+ {'suffix': '', 'priority': False, 'order': 4},
+ {'suffix': '_ds_diff', 'priority': False, 'order': 5},
+ {'suffix': '_diff_tenant', 'priority': False, 'order': 6},
+ {'suffix': '_full_access', 'priority': False, 'order': 7},
+ {'suffix': '_for_update', 'priority': False, 'order': 8},
+ {'suffix': '_updated', 'priority': False, 'order': 8},
+ ]
+
self.mod_inst_id = None
self.temp_module = None
self._module_type = None
@@ -82,12 +104,19 @@ class ModuleRunner(TestRunner):
def update_test_module(self):
return self._get_test_module(1)
- def build_module_args(self, extra=None):
- extra = extra or ''
- name = self.MODULE_NAME + extra
- desc = self.MODULE_DESC + extra.replace('_', ' ')
- cont = self.get_module_contents(name)
- return name, desc, cont
+ def build_module_args(self, name_order=None):
+ suffix = "_unknown"
+ priority = False
+ order = 5
+ if name_order is not None:
+ name_rec = self.module_name_order[name_order]
+ suffix = name_rec['suffix']
+ priority = name_rec['priority']
+ order = name_rec['order']
+ name = self.MODULE_NAME + suffix
+ description = self.MODULE_DESC + suffix.replace('_', ' ')
+ contents = self.get_module_contents(name)
+ return name, description, contents, priority, order
def get_module_contents(self, name=None):
message = self.get_module_message(name=name)
@@ -102,7 +131,8 @@ class ModuleRunner(TestRunner):
return not mod.visible and mod.tenant_id and not mod.auto_apply
return self._find_module(_match, "Could not find invisible module")
- def _find_module(self, match_fn, not_found_message, find_all=False):
+ def _find_module(self, match_fn, not_found_message, find_all=False,
+ fail_on_not_found=True):
found = [] if find_all else None
for test_module in self.test_modules:
if match_fn(test_module):
@@ -112,7 +142,10 @@ class ModuleRunner(TestRunner):
found = test_module
break
if not found:
- self.fail(not_found_message)
+ if fail_on_not_found:
+ self.fail(not_found_message)
+ else:
+ SkipTest(not_found_message)
return found
def _find_auto_apply_module(self):
@@ -125,6 +158,21 @@ class ModuleRunner(TestRunner):
return mod.tenant_id is None and mod.visible
return self._find_module(_match, "Could not find all tenant module")
+ def _find_priority_apply_module(self):
+ def _match(mod):
+ return mod.priority_apply and mod.tenant_id and mod.visible
+ return self._find_module(_match,
+ "Could not find priority-apply module")
+
+ def _find_diff_datastore_module(self):
+ def _match(mod):
+ return (mod.datastore and
+ mod.datastore != models.Modules.MATCH_ALL_NAME and
+ mod.datastore != self.instance_info.dbaas_datastore)
+ return self._find_module(_match,
+ "Could not find different datastore module",
+ fail_on_not_found=False)
+
def _find_all_auto_apply_modules(self, visible=None):
def _match(mod):
return mod.auto_apply and (
@@ -132,6 +180,12 @@ class ModuleRunner(TestRunner):
return self._find_module(
_match, "Could not find all auto apply modules", find_all=True)
+ def _find_module_by_id(self, module_id):
+ def _match(mod):
+ return mod.id == module_id
+ return self._find_module(_match, "Could not find module with id %s" %
+ module_id)
+
# Tests start here
def run_module_delete_existing(self):
modules = self.admin_client.modules.list()
@@ -178,6 +232,36 @@ class ModuleRunner(TestRunner):
self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
visible=False)
+ def run_module_create_non_admin_priority(
+ self, expected_exception=exceptions.Forbidden,
+ expected_http_code=403):
+ client = self.auth_client
+ self.assert_raises(
+ expected_exception, expected_http_code,
+ client, client.modules.create,
+ self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
+ priority_apply=True)
+
+ def run_module_create_non_admin_no_full_access(
+ self, expected_exception=exceptions.Forbidden,
+ expected_http_code=403):
+ client = self.auth_client
+ self.assert_raises(
+ expected_exception, expected_http_code,
+ client, client.modules.create,
+ self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
+ full_access=False)
+
+ def run_module_create_full_access_with_admin_opt(
+ self, expected_exception=exceptions.BadRequest,
+ expected_http_code=400):
+ client = self.admin_client
+ self.assert_raises(
+ expected_exception, expected_http_code,
+ client, client.modules.create,
+ self.MODULE_NAME, self.module_type, self.MODULE_NEG_CONTENTS,
+ full_access=True, auto_apply=True)
+
def run_module_create_bad_datastore(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
@@ -228,33 +312,45 @@ class ModuleRunner(TestRunner):
self.admin_client.modules.list())
self.module_other_count_prior_to_create = len(
self.unauth_client.modules.list())
- name, description, contents = self.build_module_args()
- self.assert_module_create(
- self.auth_client,
- name=name,
- module_type=self.module_type,
- contents=contents,
- description=description)
+ self.assert_module_create(self.auth_client, 10)
- def assert_module_create(self, client, name=None, module_type=None,
+ def assert_module_create(self, client, name_order,
+ name=None, module_type=None,
contents=None, description=None,
all_tenants=False,
datastore=None, datastore_version=None,
auto_apply=False,
- live_update=False, visible=True):
+ live_update=False, visible=True,
+ priority_apply=None,
+ apply_order=None,
+ full_access=None):
+ (temp_name, temp_description, temp_contents,
+ temp_priority, temp_order) = self.build_module_args(name_order)
+ name = name if name is not None else temp_name
+ description = (
+ description if description is not None else temp_description)
+ contents = contents if contents is not None else temp_contents
+ priority_apply = (
+ priority_apply if priority_apply is not None else temp_priority)
+ apply_order = apply_order if apply_order is not None else temp_order
+ module_type = module_type or self.module_type
result = client.modules.create(
name, module_type, contents,
description=description,
all_tenants=all_tenants,
datastore=datastore, datastore_version=datastore_version,
auto_apply=auto_apply,
- live_update=live_update, visible=visible)
+ live_update=live_update, visible=visible,
+ priority_apply=priority_apply,
+ apply_order=apply_order,
+ full_access=full_access)
username = client.real_client.client.username
if (('alt' in username and 'admin' not in username) or
('admin' in username and visible)):
self.module_create_count += 1
if datastore:
- self.module_ds_create_count += 1
+ if datastore == self.instance_info.dbaas_datastore:
+ self.module_ds_create_count += 1
else:
self.module_ds_all_create_count += 1
elif not visible:
@@ -286,7 +382,8 @@ class ModuleRunner(TestRunner):
expected_datastore=datastore,
expected_datastore_version=datastore_version,
expected_auto_apply=auto_apply,
- expected_contents=contents)
+ expected_contents=contents,
+ expected_is_admin=('admin' in username and not full_access))
def validate_module(self, module, validate_all=False,
expected_name=None,
@@ -304,7 +401,11 @@ class ModuleRunner(TestRunner):
expected_auto_apply=None,
expected_live_update=None,
expected_visible=None,
- expected_contents=None):
+ expected_contents=None,
+ expected_priority_apply=None,
+ expected_apply_order=None,
+ expected_is_admin=None,
+ expected_full_access=None):
if expected_all_tenants:
expected_tenant = expected_tenant or models.Modules.MATCH_ALL_NAME
@@ -339,6 +440,18 @@ class ModuleRunner(TestRunner):
if expected_auto_apply is not None:
self.assert_equal(expected_auto_apply, module.auto_apply,
'Unexpected auto_apply')
+ if expected_priority_apply is not None:
+ self.assert_equal(expected_priority_apply, module.priority_apply,
+ 'Unexpected priority_apply')
+ if expected_apply_order is not None:
+ self.assert_equal(expected_apply_order, module.apply_order,
+ 'Unexpected apply_order')
+ if expected_is_admin is not None:
+ self.assert_equal(expected_is_admin, module.is_admin,
+ 'Unexpected is_admin')
+ if expected_full_access is not None:
+ self.assert_equal(expected_full_access, not module.is_admin,
+ 'Unexpected full_access')
if validate_all:
if expected_datastore_id:
self.assert_equal(expected_datastore_id, module.datastore_id,
@@ -355,13 +468,7 @@ class ModuleRunner(TestRunner):
'Unexpected visible')
def run_module_create_for_update(self):
- name, description, contents = self.build_module_args('_for_update')
- self.assert_module_create(
- self.auth_client,
- name=name,
- module_type=self.module_type,
- contents=contents,
- description=description)
+ self.assert_module_create(self.auth_client, 14)
def run_module_create_dupe(
self, expected_exception=exceptions.BadRequest,
@@ -383,28 +490,16 @@ class ModuleRunner(TestRunner):
datastore_version=self.instance_info.dbaas_datastore_version)
def run_module_create_bin(self):
- name, description, contents = self.build_module_args(
- self.MODULE_BINARY_SUFFIX)
self.assert_module_create(
- self.admin_client,
- name=name,
- module_type=self.module_type,
+ self.admin_client, 0,
contents=self.MODULE_BINARY_CONTENTS,
- description=description,
- auto_apply=True,
- visible=False)
+ auto_apply=True, visible=False)
def run_module_create_bin2(self):
- name, description, contents = self.build_module_args(
- self.MODULE_BINARY_SUFFIX2)
self.assert_module_create(
- self.admin_client,
- name=name,
- module_type=self.module_type,
+ self.admin_client, 1,
contents=self.MODULE_BINARY_CONTENTS2,
- description=description,
- auto_apply=True,
- visible=False)
+ auto_apply=True, visible=False)
def run_module_show(self):
test_module = self.main_test_module
@@ -419,7 +514,10 @@ class ModuleRunner(TestRunner):
expected_datastore_version=test_module.datastore_version,
expected_auto_apply=test_module.auto_apply,
expected_live_update=False,
- expected_visible=True)
+ expected_visible=True,
+ expected_priority_apply=test_module.priority_apply,
+ expected_apply_order=test_module.apply_order,
+ expected_is_admin=test_module.is_admin)
def run_module_show_unauth_user(
self, expected_exception=exceptions.NotFound,
@@ -434,28 +532,29 @@ class ModuleRunner(TestRunner):
self.auth_client,
self.module_count_prior_to_create + self.module_create_count)
- def assert_module_list(self, client, expected_count, datastore=None,
- skip_validation=False):
+ def assert_module_list(self, client, expected_count, datastore=None):
if datastore:
module_list = client.modules.list(datastore=datastore)
else:
module_list = client.modules.list()
self.assert_equal(expected_count, len(module_list),
"Wrong number of modules for list")
- if not skip_validation:
- for module in module_list:
- if module.name != self.MODULE_NAME:
- continue
- test_module = self.main_test_module
+ for module in module_list:
+ # only validate the test modules
+ if module.name.startswith(self.MODULE_NAME):
+ test_module = self._find_module_by_id(module.id)
self.validate_module(
- module, validate_all=False,
+ module, validate_all=True,
expected_name=test_module.name,
expected_module_type=test_module.type,
expected_description=test_module.description,
expected_tenant=test_module.tenant,
expected_datastore=test_module.datastore,
expected_datastore_version=test_module.datastore_version,
- expected_auto_apply=test_module.auto_apply)
+ expected_auto_apply=test_module.auto_apply,
+ expected_priority_apply=test_module.priority_apply,
+ expected_apply_order=test_module.apply_order,
+ expected_is_admin=test_module.is_admin)
def run_module_list_unauth_user(self):
self.assert_module_list(
@@ -465,95 +564,103 @@ class ModuleRunner(TestRunner):
self.module_other_create_count))
def run_module_create_admin_all(self):
- name, description, contents = self.build_module_args(
- '_hidden_all_tenant_auto')
self.assert_module_create(
- self.admin_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description,
+ self.admin_client, 2,
all_tenants=True,
visible=False,
auto_apply=True)
def run_module_create_admin_hidden(self):
- name, description, contents = self.build_module_args('_hidden')
self.assert_module_create(
- self.admin_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description,
+ self.admin_client, 3,
visible=False)
def run_module_create_admin_auto(self):
- name, description, contents = self.build_module_args('_auto')
self.assert_module_create(
- self.admin_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description,
+ self.admin_client, 4,
auto_apply=True)
def run_module_create_admin_live_update(self):
- name, description, contents = self.build_module_args('_live')
self.assert_module_create(
- self.admin_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description,
+ self.admin_client, 5,
live_update=True)
+ def run_module_create_admin_priority_apply(self):
+ self.assert_module_create(
+ self.admin_client, 6)
+
def run_module_create_datastore(self):
- name, description, contents = self.build_module_args('_ds')
self.assert_module_create(
- self.admin_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description,
+ self.admin_client, 7,
datastore=self.instance_info.dbaas_datastore)
+ def run_module_create_different_datastore(self):
+ diff_datastore = self._get_different_datastore()
+ if not diff_datastore:
+ raise SkipTest("Could not find a different datastore")
+ self.assert_module_create(
+ self.auth_client, 11,
+ datastore=diff_datastore)
+
+ def _get_different_datastore(self):
+ different_datastore = None
+ datastores = self.admin_client.datastores.list()
+ for datastore in datastores:
+ self.report.log("Found datastore: %s" % datastore.name)
+ if datastore.name != self.instance_info.dbaas_datastore:
+ different_datastore = datastore.name
+ break
+ return different_datastore
+
def run_module_create_ds_version(self):
- name, description, contents = self.build_module_args('_ds_ver')
self.assert_module_create(
- self.admin_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description,
+ self.admin_client, 8,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
def run_module_create_all_tenant(self):
- name, description, contents = self.build_module_args(
- '_all_tenant_ds_ver')
self.assert_module_create(
- self.admin_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description,
+ self.admin_client, 9,
all_tenants=True,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
def run_module_create_different_tenant(self):
- name, description, contents = self.build_module_args()
self.assert_module_create(
- self.unauth_client,
- name=name, module_type=self.module_type, contents=contents,
- description=description)
+ self.unauth_client, 12)
+
+ def run_module_create_full_access(self):
+ self.assert_module_create(
+ self.admin_client, 13,
+ full_access=True)
+
+ def run_module_full_access_toggle(self):
+ self.assert_module_update(
+ self.admin_client,
+ self.main_test_module.id,
+ full_access=False)
+ self.assert_module_update(
+ self.admin_client,
+ self.main_test_module.id,
+ full_access=True)
def run_module_list_again(self):
self.assert_module_list(
self.auth_client,
- self.module_count_prior_to_create + self.module_create_count,
- skip_validation=True)
+ self.module_count_prior_to_create + self.module_create_count)
def run_module_list_ds(self):
self.assert_module_list(
self.auth_client,
self.module_ds_count_prior_to_create + self.module_ds_create_count,
- datastore=self.instance_info.dbaas_datastore,
- skip_validation=True)
+ datastore=self.instance_info.dbaas_datastore)
def run_module_list_ds_all(self):
self.assert_module_list(
self.auth_client,
(self.module_ds_all_count_prior_to_create +
self.module_ds_all_create_count),
- datastore=models.Modules.MATCH_ALL_NAME,
- skip_validation=True)
+ datastore=models.Modules.MATCH_ALL_NAME)
def run_module_show_invisible(
self, expected_exception=exceptions.NotFound,
@@ -570,8 +677,7 @@ class ModuleRunner(TestRunner):
(self.module_admin_count_prior_to_create +
self.module_create_count +
self.module_admin_create_count +
- self.module_other_create_count),
- skip_validation=True)
+ self.module_other_create_count))
def run_module_update(self):
self.assert_module_update(
@@ -579,6 +685,25 @@ class ModuleRunner(TestRunner):
self.main_test_module.id,
description=self.MODULE_DESC + " modified")
+ def assert_module_update(self, client, module_id, **kwargs):
+ result = client.modules.update(module_id, **kwargs)
+ found = False
+ index = -1
+ for test_module in self.test_modules:
+ index += 1
+ if test_module.id == module_id:
+ found = True
+ break
+ if not found:
+ self.fail("Could not find updated module in module list")
+ self.test_modules[index] = result
+
+ expected_args = {}
+ for key, value in kwargs.items():
+ new_key = 'expected_' + key
+ expected_args[new_key] = value
+ self.validate_module(result, **expected_args)
+
def run_module_update_same_contents(self):
old_md5 = self.main_test_module.md5
self.assert_module_update(
@@ -588,55 +713,65 @@ class ModuleRunner(TestRunner):
self.assert_equal(old_md5, self.main_test_module.md5,
"MD5 changed with same contents")
- def run_module_update_auto_toggle(self):
+ def run_module_update_auto_toggle(self,
+ expected_exception=exceptions.Forbidden,
+ expected_http_code=403):
module = self._find_auto_apply_module()
toggle_off_args = {'auto_apply': False}
toggle_on_args = {'auto_apply': True}
- self.assert_module_toggle(module, toggle_off_args, toggle_on_args)
+ self.assert_module_toggle(module, toggle_off_args, toggle_on_args,
+ expected_exception=expected_exception,
+ expected_http_code=expected_http_code)
- def assert_module_toggle(self, module, toggle_off_args, toggle_on_args):
+ def assert_module_toggle(self, module, toggle_off_args, toggle_on_args,
+ expected_exception, expected_http_code):
# First try to update the module based on the change
- # (this should toggle the state and allow non-admin access)
- self.assert_module_update(
- self.admin_client, module.id, **toggle_off_args)
- # Now we can update using the non-admin client
+ # (this should toggle the state but still not allow non-admin access)
+ client = self.admin_client
+ self.assert_module_update(client, module.id, **toggle_off_args)
+ # The non-admin client should fail to update
+ non_admin_client = self.auth_client
+ self.assert_raises(
+ expected_exception, expected_http_code,
+ non_admin_client, non_admin_client.modules.update, module.id,
+ description='Updated by non-admin')
+ # Make sure we can still update with the admin client
self.assert_module_update(
- self.auth_client, module.id, description='Updated by auth')
+ client, module.id, description='Updated by admin')
# Now set it back
self.assert_module_update(
- self.admin_client, module.id, description=module.description,
+ client, module.id, description=module.description,
**toggle_on_args)
- def run_module_update_all_tenant_toggle(self):
+ def run_module_update_all_tenant_toggle(
+ self, expected_exception=exceptions.Forbidden,
+ expected_http_code=403):
module = self._find_all_tenant_module()
toggle_off_args = {'all_tenants': False}
toggle_on_args = {'all_tenants': True}
- self.assert_module_toggle(module, toggle_off_args, toggle_on_args)
+ self.assert_module_toggle(module, toggle_off_args, toggle_on_args,
+ expected_exception=expected_exception,
+ expected_http_code=expected_http_code)
- def run_module_update_invisible_toggle(self):
+ def run_module_update_invisible_toggle(
+ self, expected_exception=exceptions.Forbidden,
+ expected_http_code=403):
module = self._find_invisible_module()
toggle_off_args = {'visible': True}
toggle_on_args = {'visible': False}
- self.assert_module_toggle(module, toggle_off_args, toggle_on_args)
-
- def assert_module_update(self, client, module_id, **kwargs):
- result = client.modules.update(module_id, **kwargs)
- found = False
- index = -1
- for test_module in self.test_modules:
- index += 1
- if test_module.id == module_id:
- found = True
- break
- if not found:
- self.fail("Could not find updated module in module list")
- self.test_modules[index] = result
+ self.assert_module_toggle(module, toggle_off_args, toggle_on_args,
+ expected_exception=expected_exception,
+ expected_http_code=expected_http_code)
- expected_args = {}
- for key, value in kwargs.items():
- new_key = 'expected_' + key
- expected_args[new_key] = value
- self.validate_module(result, **expected_args)
+ def run_module_update_priority_toggle(
+ self, expected_exception=exceptions.Forbidden,
+ expected_http_code=403):
+ module = self._find_priority_apply_module()
+ toggle_off_args = {'priority_apply': False}
+ toggle_on_args = {'priority_apply': True}
+ self.assert_module_toggle(module, toggle_off_args, toggle_on_args,
+ expected_exception=expected_exception,
+ expected_http_code=expected_http_code)
def run_module_update_unauth(
self, expected_exception=exceptions.NotFound,
@@ -775,32 +910,47 @@ class ModuleRunner(TestRunner):
self.assert_equal(expected_count, count,
"Wrong number of modules from query")
expected_results = expected_results or {}
+ name_index = len(self.module_name_order)
for modquery in modquery_list:
if modquery.name in expected_results:
+ self.report.log("Validating module '%s'" % modquery.name)
expected = expected_results[modquery.name]
- self.validate_module_info(
+ self.validate_module_apply_info(
modquery,
expected_status=expected['status'],
expected_message=expected['message'])
+ # make sure we're in the correct order
+ found = False
+ while name_index > 0:
+ name_index -= 1
+ name_order_rec = self.module_name_order[name_index]
+ order_name = self.MODULE_NAME + name_order_rec['suffix']
+ self.report.log("Next module order '%s'" % order_name)
+ if order_name == modquery.name:
+ self.report.log("Match found")
+ found = True
+ break
+ if name_index == 0 and not found:
+ self.fail("Module '%s' was not found in the correct order"
+ % modquery.name)
def run_module_apply(self):
self.assert_module_apply(self.auth_client, self.instance_info.id,
self.main_test_module)
def assert_module_apply(self, client, instance_id, module,
+ expected_is_admin=False,
expected_status=None, expected_message=None,
expected_contents=None,
expected_http_code=200):
module_apply_list = client.instances.module_apply(
instance_id, [module.id])
self.assert_client_code(client, expected_http_code)
- admin_only = (not module.visible or module.auto_apply or
- not module.tenant_id)
expected_status = expected_status or 'OK'
expected_message = (expected_message or
self.get_module_message(module.name))
for module_apply in module_apply_list:
- self.validate_module_info(
+ self.validate_module_apply_info(
module_apply,
expected_name=module.name,
expected_module_type=module.type,
@@ -808,22 +958,22 @@ class ModuleRunner(TestRunner):
expected_datastore_version=module.datastore_version,
expected_auto_apply=module.auto_apply,
expected_visible=module.visible,
- expected_admin_only=admin_only,
expected_contents=expected_contents,
expected_status=expected_status,
- expected_message=expected_message)
-
- def validate_module_info(self, module_apply,
- expected_name=None,
- expected_module_type=None,
- expected_datastore=None,
- expected_datastore_version=None,
- expected_auto_apply=None,
- expected_visible=None,
- expected_admin_only=None,
- expected_contents=None,
- expected_message=None,
- expected_status=None):
+ expected_message=expected_message,
+ expected_is_admin=expected_is_admin)
+
+ def validate_module_apply_info(self, module_apply,
+ expected_name=None,
+ expected_module_type=None,
+ expected_datastore=None,
+ expected_datastore_version=None,
+ expected_auto_apply=None,
+ expected_visible=None,
+ expected_contents=None,
+ expected_message=None,
+ expected_status=None,
+ expected_is_admin=None):
prefix = "Module: %s -" % expected_name
if expected_name:
@@ -845,9 +995,6 @@ class ModuleRunner(TestRunner):
if expected_visible is not None:
self.assert_equal(expected_visible, module_apply.visible,
'%s Unexpected visible' % prefix)
- if expected_admin_only is not None:
- self.assert_equal(expected_admin_only, module_apply.admin_only,
- '%s Unexpected admin_only' % prefix)
if expected_contents is not None:
self.assert_equal(expected_contents, module_apply.contents,
'%s Unexpected contents' % prefix)
@@ -859,6 +1006,20 @@ class ModuleRunner(TestRunner):
if expected_status is not None:
self.assert_equal(expected_status, module_apply.status,
'%s Unexpected status' % prefix)
+ if expected_is_admin is not None:
+ self.assert_equal(expected_is_admin, module_apply.is_admin,
+ '%s Unexpected is_admin' % prefix)
+
+ def run_module_apply_wrong_module(
+ self, expected_exception=exceptions.BadRequest,
+ expected_http_code=400):
+ module = self._find_diff_datastore_module()
+ self.report.log("Found 'wrong' module: %s" % module.name)
+ client = self.auth_client
+ self.assert_raises(
+ expected_exception, expected_http_code,
+ client, client.instances.module_apply,
+ self.instance_info.id, [module.id])
def run_module_list_instance_after_apply(self):
self.assert_module_list_instance(
@@ -873,7 +1034,8 @@ class ModuleRunner(TestRunner):
self.auth_client, self.instance_info.id, 2)
def run_module_update_after_remove(self):
- name, description, contents = self.build_module_args('_updated')
+ name, description, contents, priority, order = (
+ self.build_module_args(15))
self.assert_module_update(
self.auth_client,
self.update_test_module.id,
@@ -949,8 +1111,27 @@ class ModuleRunner(TestRunner):
modules=[module_id],
)
self.assert_client_code(client, expected_http_code)
+ self.register_debug_inst_ids(inst.id)
return inst.id
+ def run_create_inst_with_wrong_module(
+ self, expected_exception=exceptions.BadRequest,
+ expected_http_code=400):
+ module = self._find_diff_datastore_module()
+ self.report.log("Found 'wrong' module: %s" % module.name)
+
+ client = self.auth_client
+ self.assert_raises(
+ expected_exception, expected_http_code,
+ client, client.instances.create,
+ self.instance_info.name + '_wrong_ds',
+ self.instance_info.dbaas_flavor_href,
+ self.instance_info.volume,
+ datastore=self.instance_info.dbaas_datastore,
+ datastore_version=self.instance_info.dbaas_datastore_version,
+ nics=self.instance_info.nics,
+ modules=[module.id])
+
def run_module_delete_applied(
self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
diff --git a/trove/tests/scenario/runners/replication_runners.py b/trove/tests/scenario/runners/replication_runners.py
index 9f9bc76c..f798e1c0 100644
--- a/trove/tests/scenario/runners/replication_runners.py
+++ b/trove/tests/scenario/runners/replication_runners.py
@@ -72,6 +72,7 @@ class ReplicationRunner(TestRunner):
nics=self.instance_info.nics,
locality='anti-affinity').id
self.assert_client_code(client, expected_http_code)
+ self.register_debug_inst_ids(self.non_affinity_master_id)
def run_create_single_replica(self, expected_http_code=200):
self.master_backup_count = len(
@@ -91,6 +92,7 @@ class ReplicationRunner(TestRunner):
nics=self.instance_info.nics,
replica_count=replica_count)
self.assert_client_code(client, expected_http_code)
+ self.register_debug_inst_ids(replica.id)
return replica.id
def run_wait_for_single_replica(self, expected_states=['BUILD', 'ACTIVE']):
@@ -153,6 +155,7 @@ class ReplicationRunner(TestRunner):
replica_of=self.non_affinity_master_id,
replica_count=1).id
self.assert_client_code(client, expected_http_code)
+ self.register_debug_inst_ids(self.non_affinity_repl_id)
def run_create_multiple_replicas(self, expected_http_code=200):
self.replica_2_id = self.assert_replica_create(
diff --git a/trove/tests/scenario/runners/test_runners.py b/trove/tests/scenario/runners/test_runners.py
index 018ed418..bc9292d4 100644
--- a/trove/tests/scenario/runners/test_runners.py
+++ b/trove/tests/scenario/runners/test_runners.py
@@ -18,7 +18,9 @@ import inspect
import netaddr
import os
import proboscis
+import six
import time as timer
+import types
from oslo_config.cfg import NoSuchOptError
from proboscis import asserts
@@ -179,6 +181,105 @@ class InstanceTestInfo(object):
self.helper_database = None # Test helper database if exists.
+class LogOnFail(type):
+
+ """Class to log info on failure.
+ This will decorate all methods that start with 'run_' with a log wrapper
+ that will do a show and attempt to pull back the guest log on all
+ registered IDs.
+ Use by setting up as a metaclass and calling the following:
+ add_inst_ids(): Instance ID or list of IDs to report on
+ set_client(): Admin client object
+ set_report(): Report object
+ The TestRunner class shows how this can be done in register_debug_inst_ids.
+ """
+
+ _data = {}
+
+ def __new__(mcs, name, bases, attrs):
+ for attr_name, attr_value in attrs.items():
+ if (isinstance(attr_value, types.FunctionType) and
+ attr_name.startswith('run_')):
+ attrs[attr_name] = mcs.log(attr_value)
+ return super(LogOnFail, mcs).__new__(mcs, name, bases, attrs)
+
+ @classmethod
+ def get_inst_ids(mcs):
+ return set(mcs._data.get('inst_ids', []))
+
+ @classmethod
+ def add_inst_ids(mcs, inst_ids):
+ if not utils.is_collection(inst_ids):
+ inst_ids = [inst_ids]
+ debug_inst_ids = mcs.get_inst_ids()
+ debug_inst_ids |= set(inst_ids)
+ mcs._data['inst_ids'] = debug_inst_ids
+
+ @classmethod
+ def reset_inst_ids(mcs):
+ mcs._data['inst_ids'] = []
+
+ @classmethod
+ def set_client(mcs, client):
+ mcs._data['client'] = client
+
+ @classmethod
+ def get_client(mcs):
+ return mcs._data['client']
+
+ @classmethod
+ def set_report(mcs, report):
+ mcs._data['report'] = report
+
+ @classmethod
+ def get_report(mcs):
+ return mcs._data['report']
+
+ @classmethod
+ def log(mcs, fn):
+
+ def wrapper(*args, **kwargs):
+ inst_ids = mcs.get_inst_ids()
+ client = mcs.get_client()
+ report = mcs.get_report()
+ try:
+ return fn(*args, **kwargs)
+ except proboscis.SkipTest:
+ raise
+ except Exception as test_ex:
+ msg_prefix = "*** LogOnFail: "
+ if inst_ids:
+ report.log(msg_prefix + "Exception detected, "
+ "dumping info for IDs: %s." % inst_ids)
+ else:
+ report.log(msg_prefix + "Exception detected, "
+ "but no instance IDs are registered to log.")
+
+ for inst_id in inst_ids:
+ try:
+ client.instances.get(inst_id)
+ except Exception as ex:
+ report.log(msg_prefix + "Error in instance show "
+ "for %s:\n%s" % (inst_id, ex))
+ try:
+ log_gen = client.instances.log_generator(
+ inst_id, 'guest',
+ publish=True, lines=0, swift=None)
+ log_contents = "".join([chunk for chunk in log_gen()])
+ report.log(msg_prefix + "Guest log for %s:\n%s" %
+ (inst_id, log_contents))
+ except Exception as ex:
+ report.log(msg_prefix + "Error in guest log "
+ "retrieval for %s:\n%s" % (inst_id, ex))
+
+ # Only report on the first error that occurs
+ mcs.reset_inst_ids()
+ raise test_ex
+
+ return wrapper
+
+
+@six.add_metaclass(LogOnFail)
class TestRunner(object):
"""
@@ -246,6 +347,14 @@ class TestRunner(object):
self._test_helper = None
self._servers = {}
+ # Attempt to register the main instance. If it doesn't
+ # exist, this will still set the 'report' and 'client' objects
+ # correctly in LogOnFail
+ inst_ids = []
+ if hasattr(self.instance_info, 'id') and self.instance_info.id:
+ inst_ids = [self.instance_info.id]
+ self.register_debug_inst_ids(inst_ids)
+
@classmethod
def fail(cls, message):
asserts.fail(message)
@@ -372,6 +481,15 @@ class TestRunner(object):
def nova_client(self):
return create_nova_client(self.instance_info.user)
+ def register_debug_inst_ids(self, inst_ids):
+ """Method to 'register' an instance ID (or list of instance IDs)
+ for debug purposes on failure. Note that values are only appended
+ here, not overridden. The LogOnFail class will handle 'missing' IDs.
+ """
+ LogOnFail.add_inst_ids(inst_ids)
+ LogOnFail.set_client(self.admin_client)
+ LogOnFail.set_report(self.report)
+
def get_client_tenant(self, client):
tenant_name = client.real_client.client.tenant
service_url = client.real_client.client.service_url
diff --git a/trove/tests/tempest/tests/api/database/base.py b/trove/tests/tempest/tests/api/database/base.py
index 98fe7278..a0d58bdc 100644
--- a/trove/tests/tempest/tests/api/database/base.py
+++ b/trove/tests/tempest/tests/api/database/base.py
@@ -39,22 +39,31 @@ class BaseDatabaseTest(tempest.test.BaseTestCase):
@classmethod
def setup_clients(cls):
super(BaseDatabaseTest, cls).setup_clients()
+ default_params = config.service_client_config()
+
+ # NOTE: Tempest uses timeout values of compute API if project specific
+ # timeout values don't exist.
+ default_params_with_timeout_values = {
+ 'build_interval': CONF.compute.build_interval,
+ 'build_timeout': CONF.compute.build_timeout
+ }
+ default_params_with_timeout_values.update(default_params)
cls.database_flavors_client = flavors_client.DatabaseFlavorsClient(
cls.os.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
- **cls.os.default_params_with_timeout_values)
+ **default_params_with_timeout_values)
cls.os_flavors_client = cls.os.flavors_client
cls.database_limits_client = limits_client.DatabaseLimitsClient(
cls.os.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
- **cls.os.default_params_with_timeout_values)
+ **default_params_with_timeout_values)
cls.database_versions_client = versions_client.DatabaseVersionsClient(
cls.os.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
- **cls.os.default_params_with_timeout_values)
+ **default_params_with_timeout_values)
@classmethod
def resource_setup(cls):
diff --git a/trove/tests/unittests/backup/test_backup_models.py b/trove/tests/unittests/backup/test_backup_models.py
index da034079..315a508e 100644
--- a/trove/tests/unittests/backup/test_backup_models.py
+++ b/trove/tests/unittests/backup/test_backup_models.py
@@ -216,7 +216,8 @@ class BackupCreateTest(trove_testtools.TestCase):
self.context, self.instance_id,
BACKUP_NAME, BACKUP_DESC)
- def test_create_backup_creation_error(self):
+ @patch('trove.backup.models.LOG')
+ def test_create_backup_creation_error(self, mock_logging):
instance = MagicMock()
instance.cluster_id = None
with patch.object(instance_models.BuiltInstance, 'load',
diff --git a/trove/tests/unittests/backup/test_backupagent.py b/trove/tests/unittests/backup/test_backupagent.py
index e0644653..c692cb0a 100644
--- a/trove/tests/unittests/backup/test_backupagent.py
+++ b/trove/tests/unittests/backup/test_backupagent.py
@@ -230,7 +230,8 @@ class BackupAgentTest(trove_testtools.TestCase):
self.assertIsNotNone(inno_backup_ex.cmd)
str_innobackup_cmd = ('sudo innobackupex'
' --stream=xbstream'
- ' %(extra_opts)s'
+ ' %(extra_opts)s '
+ ' --user=os_admin --password=123'
' /var/lib/mysql/data 2>/tmp/innobackupex.log'
' | gzip |'
' openssl enc -aes-256-cbc -salt '
@@ -356,7 +357,8 @@ class BackupAgentTest(trove_testtools.TestCase):
@patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock()))
@patch.object(conductor_api.API, 'update_backup',
Mock(return_value=Mock()))
- def test_execute_bad_process_backup(self):
+ @patch('trove.guestagent.backup.backupagent.LOG')
+ def test_execute_bad_process_backup(self, mock_logging):
agent = backupagent.BackupAgent()
backup_info = {'id': '123',
'location': 'fake-location',
@@ -516,7 +518,8 @@ class BackupAgentTest(trove_testtools.TestCase):
ANY, ANY, metadata=expected_metadata)
@patch.object(conductor_api.API, 'get_client', Mock(return_value=Mock()))
- def test_backup_incremental_bad_metadata(self):
+ @patch('trove.guestagent.backup.backupagent.LOG')
+ def test_backup_incremental_bad_metadata(self, mock_logging):
with patch.object(backupagent, 'get_storage_strategy',
return_value=MockSwift):
diff --git a/trove/tests/unittests/common/test_conductor_serializer.py b/trove/tests/unittests/common/test_conductor_serializer.py
new file mode 100644
index 00000000..ae5e5ca0
--- /dev/null
+++ b/trove/tests/unittests/common/test_conductor_serializer.py
@@ -0,0 +1,110 @@
+# Copyright 2016 Tesora, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import mock
+
+from trove.common import cfg
+from trove.common.rpc import conductor_guest_serializer as gsz
+from trove.common.rpc import conductor_host_serializer as hsz
+
+from trove.tests.unittests import trove_testtools
+
+
+CONF = cfg.CONF
+
+
+class FakeInstance(object):
+ def __init__(self):
+ self.uuid = 'a3af1652-686a-4574-a916-2ef7e85136e5'
+
+ @property
+ def key(self):
+ return 'mo79Y86Bp3bzQDWR31ihhVGfLBmeac'
+
+
+class FakeContext(object):
+ def __init__(self, instance_id=None, fields=None):
+ self.instance_id = instance_id
+ self.fields = fields
+
+
+class TestConductorSerializer(trove_testtools.TestCase):
+
+ def setUp(self):
+ self.uuid = 'a3af1652-686a-4574-a916-2ef7e85136e5'
+ self.key = 'mo79Y86Bp3bzQDWR31ihhVGfLBmeac'
+ self.data = 'ELzWd81qtgcj2Gxc1ipbh0HgbvHGrgptDj3n4GNMBN0F2WtNdr'
+ self.context = {'a': 'ij2J8AJLyz0rDqbjxy4jPVINhnK2jsBGpWRKIe3tUnUD',
+ 'b': 32,
+ 'c': {'a': 21, 'b': 22}}
+ self.old_guest_id = gsz.CONF.guest_id
+ gsz.CONF.guest_id = self.uuid
+ super(TestConductorSerializer, self).setUp()
+
+ def tearDown(self):
+ gsz.CONF.guest_id = self.old_guest_id
+ super(TestConductorSerializer, self).tearDown()
+
+ def test_gsz_serialize_entity_nokey(self):
+ sz = gsz.ConductorGuestSerializer(None, None)
+ self.assertEqual(sz.serialize_entity(self.context, self.data),
+ self.data)
+
+ def test_gsz_serialize_context_nokey(self):
+ sz = gsz.ConductorGuestSerializer(None, None)
+ self.assertEqual(sz.serialize_context(self.context),
+ self.context)
+
+ @mock.patch('trove.common.rpc.conductor_host_serializer.'
+ 'get_instance_encryption_key',
+ return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac')
+ def test_hsz_serialize_entity_nokey_noinstance(self, _):
+ sz = hsz.ConductorHostSerializer(None, None)
+ ctxt = FakeContext(instance_id=None)
+ self.assertEqual(sz.serialize_entity(ctxt, self.data),
+ self.data)
+
+ @mock.patch('trove.common.rpc.conductor_host_serializer.'
+ 'get_instance_encryption_key',
+ return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac')
+ def test_hsz_serialize_context_nokey_noinstance(self, _):
+ sz = hsz.ConductorHostSerializer(None, None)
+ ctxt = FakeContext(instance_id=None)
+ self.assertEqual(sz.serialize_context(ctxt), ctxt)
+
+ @mock.patch('trove.common.rpc.conductor_host_serializer.'
+ 'get_instance_encryption_key',
+ return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac')
+ def test_conductor_entity(self, _):
+ guestsz = gsz.ConductorGuestSerializer(None, self.key)
+ hostsz = hsz.ConductorHostSerializer(None, None)
+ encrypted_entity = guestsz.serialize_entity(self.context, self.data)
+ self.assertNotEqual(encrypted_entity, self.data)
+ entity = hostsz.deserialize_entity(self.context, encrypted_entity)
+ self.assertEqual(entity, self.data)
+
+ @mock.patch('trove.common.rpc.conductor_host_serializer.'
+ 'get_instance_encryption_key',
+ return_value='mo79Y86Bp3bzQDWR31ihhVGfLBmeac')
+ def test_conductor_context(self, _):
+ guestsz = gsz.ConductorGuestSerializer(None, self.key)
+ hostsz = hsz.ConductorHostSerializer(None, None)
+ encrypted_context = guestsz.serialize_context(self.context)
+ self.assertNotEqual(encrypted_context, self.context)
+ context = hostsz.deserialize_context(encrypted_context)
+ self.assertEqual(context.get('instance_id'), self.uuid)
+ context.pop('instance_id')
+ self.assertDictEqual(context, self.context)
diff --git a/trove/tests/unittests/common/test_remote.py b/trove/tests/unittests/common/test_remote.py
index 7b0ffec6..e58562d8 100644
--- a/trove/tests/unittests/common/test_remote.py
+++ b/trove/tests/unittests/common/test_remote.py
@@ -421,8 +421,8 @@ class TestCreateNovaClient(trove_testtools.TestCase):
TroveContext(user=admin_user,
auth_token=admin_pass,
tenant=admin_tenant_id))
- self.assertEqual(admin_user, admin_client.client.user)
- self.assertEqual(admin_pass, admin_client.client.password)
+ # self.assertEqual(admin_user, admin_client.client.user)
+ # self.assertEqual(admin_pass, admin_client.client.password)
self.assertEqual('%s%s' % (nova_url_from_conf, admin_tenant_id),
admin_client.client.management_url)
diff --git a/trove/tests/unittests/common/test_secure_serializer.py b/trove/tests/unittests/common/test_secure_serializer.py
new file mode 100644
index 00000000..2eafe96c
--- /dev/null
+++ b/trove/tests/unittests/common/test_secure_serializer.py
@@ -0,0 +1,64 @@
+# Copyright 2016 Tesora, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from trove.common.rpc import secure_serializer as ssz
+from trove.tests.unittests import trove_testtools
+
+
+class TestSecureSerializer(trove_testtools.TestCase):
+
+ def setUp(self):
+ self.key = 'xuUyAKn5mDANoM5sRxQsb6HGiugWVD'
+ self.data = '5rzFfaKU630rRxL1g3c80EHnHDf534'
+ self.context = {'fld1': 3, 'fld2': 'abc'}
+ super(TestSecureSerializer, self).setUp()
+
+ def tearDown(self):
+ super(TestSecureSerializer, self).tearDown()
+
+ def test_sz_nokey_serialize_entity(self):
+ sz = ssz.SecureSerializer(base=None, key=None)
+ en = sz.serialize_entity(self.context, self.data)
+ self.assertEqual(en, self.data)
+
+ def test_sz_nokey_deserialize_entity(self):
+ sz = ssz.SecureSerializer(base=None, key=None)
+ en = sz.deserialize_entity(self.context, self.data)
+ self.assertEqual(en, self.data)
+
+ def test_sz_nokey_serialize_context(self):
+ sz = ssz.SecureSerializer(base=None, key=None)
+ en = sz.serialize_context(self.context)
+ self.assertEqual(en, self.context)
+
+ def test_sz_nokey_deserialize_context(self):
+ sz = ssz.SecureSerializer(base=None, key=None)
+ en = sz.deserialize_context(self.context)
+ self.assertEqual(en, self.context)
+
+ def test_sz_entity(self):
+ sz = ssz.SecureSerializer(base=None, key=self.key)
+ en = sz.serialize_entity(self.context, self.data)
+ self.assertNotEqual(en, self.data)
+ self.assertEqual(sz.deserialize_entity(self.context, en),
+ self.data)
+
+ def test_sz_context(self):
+ sz = ssz.SecureSerializer(base=None, key=self.key)
+ sctxt = sz.serialize_context(self.context)
+ self.assertNotEqual(sctxt, self.context)
+ self.assertEqual(sz.deserialize_context(sctxt),
+ self.context)
diff --git a/trove/tests/unittests/common/test_serializer.py b/trove/tests/unittests/common/test_serializer.py
new file mode 100644
index 00000000..ab4696b3
--- /dev/null
+++ b/trove/tests/unittests/common/test_serializer.py
@@ -0,0 +1,127 @@
+# Copyright 2016 Tesora, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+import mock
+
+from trove.common.rpc import serializer
+from trove.tests.unittests import trove_testtools
+
+
+class TestSerializer(trove_testtools.TestCase):
+
+ def setUp(self):
+ self.data = 'abcdefghijklmnopqrstuvwxyz'
+ self.context = {}
+ super(TestSerializer, self).setUp()
+
+ def tearDown(self):
+ super(TestSerializer, self).tearDown()
+
+ def test_serialize_1(self):
+ base = mock.Mock()
+ sz = serializer.TroveSerializer(base=base)
+ sz.serialize_entity(self.context, self.data)
+ base.serialize_entity.assert_called_with(self.context, self.data)
+
+ def test_serialize_2(self):
+ base = mock.Mock()
+ sz1 = serializer.TroveSerializer(base=base)
+ sz = serializer.TroveSerializer(base=sz1)
+ sz.serialize_entity(self.context, self.data)
+ base.serialize_entity.assert_called_with(self.context, self.data)
+
+ def test_serialize_3(self):
+ base = mock.Mock()
+ sz = serializer.TroveSerializer(base=base)
+ sz.deserialize_entity(self.context, self.data)
+ base.deserialize_entity.assert_called_with(self.context, self.data)
+
+ def test_serialize_4(self):
+ base = mock.Mock()
+ sz1 = serializer.TroveSerializer(base=base)
+ sz = serializer.TroveSerializer(base=sz1)
+ sz.deserialize_entity(self.context, self.data)
+ base.deserialize_entity.assert_called_with(self.context, self.data)
+
+ def test_serialize_5(self):
+ base = mock.Mock()
+ sz = serializer.TroveSerializer(base=base)
+ sz.serialize_context(self.context)
+ base.serialize_context.assert_called_with(self.context)
+
+ def test_serialize_6(self):
+ base = mock.Mock()
+ sz1 = serializer.TroveSerializer(base=base)
+ sz = serializer.TroveSerializer(base=sz1)
+ sz.serialize_context(self.context)
+ base.serialize_context.assert_called_with(self.context)
+
+ def test_serialize_7(self):
+ base = mock.Mock()
+ sz = serializer.TroveSerializer(base=base)
+ sz.deserialize_context(self.context)
+ base.deserialize_context.assert_called_with(self.context)
+
+ def test_serialize_8(self):
+ base = mock.Mock()
+ sz1 = serializer.TroveSerializer(base=base)
+ sz = serializer.TroveSerializer(base=sz1)
+ sz.deserialize_context(self.context)
+ base.deserialize_context.assert_called_with(self.context)
+
+ def test_serialize_9(self):
+ sz = serializer.TroveSerializer(base=None)
+ self.assertEqual(sz.serialize_entity(self.context, self.data),
+ self.data)
+
+ def test_serialize_10(self):
+ sz = serializer.TroveSerializer(base=None)
+ self.assertEqual(sz.deserialize_entity(self.context, self.data),
+ self.data)
+
+ def test_serialize_11(self):
+ sz = serializer.TroveSerializer(base=None)
+ self.assertEqual(sz.serialize_context(self.context),
+ self.context)
+
+ def test_serialize_12(self):
+ sz = serializer.TroveSerializer(base=None)
+ self.assertEqual(sz.deserialize_context(self.context),
+ self.context)
+
+ def test_serialize_13(self):
+ bz = serializer.TroveSerializer(base=None)
+ sz = serializer.TroveSerializer(base=bz)
+ self.assertEqual(sz.serialize_entity(self.context, self.data),
+ self.data)
+
+ def test_serialize_14(self):
+ bz = serializer.TroveSerializer(base=None)
+ sz = serializer.TroveSerializer(base=bz)
+ self.assertEqual(sz.deserialize_entity(self.context, self.data),
+ self.data)
+
+ def test_serialize_15(self):
+ bz = serializer.TroveSerializer(base=None)
+ sz = serializer.TroveSerializer(base=bz)
+ self.assertEqual(sz.serialize_context(self.context),
+ self.context)
+
+ def test_serialize_16(self):
+ bz = serializer.TroveSerializer(base=None)
+ sz = serializer.TroveSerializer(base=bz)
+ self.assertEqual(sz.deserialize_context(self.context),
+ self.context)
diff --git a/trove/tests/unittests/conductor/test_conf.py b/trove/tests/unittests/conductor/test_conf.py
index c4305bbe..924dc693 100644
--- a/trove/tests/unittests/conductor/test_conf.py
+++ b/trove/tests/unittests/conductor/test_conf.py
@@ -32,7 +32,8 @@ def mocked_conf(manager):
'conductor_manager': manager,
'trove_conductor_workers': 1,
'host': 'mockhost',
- 'report_interval': 1})
+ 'report_interval': 1,
+ 'instance_rpc_encr_key': ''})
class NoopManager(object):
diff --git a/trove/tests/unittests/guestagent/test_api.py b/trove/tests/unittests/guestagent/test_api.py
index 5390fbea..71efb963 100644
--- a/trove/tests/unittests/guestagent/test_api.py
+++ b/trove/tests/unittests/guestagent/test_api.py
@@ -50,7 +50,9 @@ def _mock_call(cmd, timeout, version=None, username=None, hostname=None,
class ApiTest(trove_testtools.TestCase):
@mock.patch.object(rpc, 'get_client')
- def setUp(self, *args):
+ @mock.patch('trove.instance.models.get_instance_encryption_key',
+ return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08')
+ def setUp(self, mock_get_encryption_key, *args):
super(ApiTest, self).setUp()
self.context = context.TroveContext()
self.guest = api.API(self.context, 0)
@@ -58,6 +60,7 @@ class ApiTest(trove_testtools.TestCase):
self.guest._call = _mock_call
self.api = api.API(self.context, "instance-id-x23d2d")
self._mock_rpc_client()
+ mock_get_encryption_key.assert_called()
def test_change_passwords(self):
self.assertIsNone(self.guest.change_passwords("dummy"))
diff --git a/trove/tests/unittests/guestagent/test_backups.py b/trove/tests/unittests/guestagent/test_backups.py
index 1f4644b1..565fd1ce 100644
--- a/trove/tests/unittests/guestagent/test_backups.py
+++ b/trove/tests/unittests/guestagent/test_backups.py
@@ -77,13 +77,16 @@ ZIP = "gzip"
UNZIP = "gzip -d -c"
ENCRYPT = "openssl enc -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
DECRYPT = "openssl enc -d -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
-XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s"
+XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s "
+ " --user=os_admin --password=password"
" /var/lib/mysql/data 2>/tmp/innobackupex.log")
XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''}
XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'}
XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream'
' --incremental --incremental-lsn=%(lsn)s'
- ' %(extra_opts)s /var/lib/mysql/data'
+ ' %(extra_opts)s '
+ ' --user=os_admin --password=password'
+ ' /var/lib/mysql/data'
' 2>/tmp/innobackupex.log')
SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s "
"--opt --password=password -u os_admin"
diff --git a/trove/tests/unittests/guestagent/test_dbaas.py b/trove/tests/unittests/guestagent/test_dbaas.py
index 8d4aaa0e..220a35f5 100644
--- a/trove/tests/unittests/guestagent/test_dbaas.py
+++ b/trove/tests/unittests/guestagent/test_dbaas.py
@@ -3167,7 +3167,7 @@ class VerticaAppTest(trove_testtools.TestCase):
def test_failure__enable_db_on_boot(self, *args):
with patch.object(subprocess, 'Popen', side_effect=OSError):
self.assertRaisesRegexp(RuntimeError,
- 'Could not enable db on boot.',
+ 'Could not enable database on boot.',
self.app._enable_db_on_boot)
def test__disable_db_on_boot(self):
@@ -3189,7 +3189,7 @@ class VerticaAppTest(trove_testtools.TestCase):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaisesRegexp(RuntimeError,
- 'Could not disable db on boot.',
+ 'Could not disable database on boot.',
self.app._disable_db_on_boot)
def test_read_config(self):
diff --git a/trove/tests/unittests/guestagent/test_galera_cluster_api.py b/trove/tests/unittests/guestagent/test_galera_cluster_api.py
index 9f79eb56..809d9e1a 100644
--- a/trove/tests/unittests/guestagent/test_galera_cluster_api.py
+++ b/trove/tests/unittests/guestagent/test_galera_cluster_api.py
@@ -37,7 +37,9 @@ def _mock_call(cmd, timeout, version=None, user=None,
class ApiTest(trove_testtools.TestCase):
@mock.patch.object(rpc, 'get_client')
- def setUp(self, *args):
+ @mock.patch('trove.instance.models.get_instance_encryption_key',
+ return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08')
+ def setUp(self, mock_get_encryption_key, *args):
super(ApiTest, self).setUp()
cluster_guest_api = (GaleraCommonGuestAgentStrategy()
.guest_client_class)
@@ -46,6 +48,7 @@ class ApiTest(trove_testtools.TestCase):
self.guest._call = _mock_call
self.api = cluster_guest_api(self.context, "instance-id-x23d2d")
self._mock_rpc_client()
+ mock_get_encryption_key.assert_called()
def test_get_routing_key(self):
self.assertEqual('guestagent.instance-id-x23d2d',
diff --git a/trove/tests/unittests/guestagent/test_manager.py b/trove/tests/unittests/guestagent/test_manager.py
index ec3a0ed0..2fcf7be4 100644
--- a/trove/tests/unittests/guestagent/test_manager.py
+++ b/trove/tests/unittests/guestagent/test_manager.py
@@ -24,6 +24,7 @@ from mock import Mock
from mock import patch
from oslo_utils import encodeutils
from proboscis.asserts import assert_equal
+from proboscis.asserts import assert_is_none
from proboscis.asserts import assert_true
from trove.common.context import TroveContext
@@ -31,6 +32,7 @@ from trove.common import exception
from trove.guestagent.common import operating_system
from trove.guestagent.datastore import manager
from trove.guestagent import guest_log
+from trove.guestagent.module import module_manager
from trove import rpc
from trove.tests.unittests import trove_testtools
@@ -110,6 +112,12 @@ class ManagerTest(trove_testtools.TestCase):
self.expected_details_sys['type'] = 'SYS'
self.expected_details_sys['status'] = 'Enabled'
self.expected_details_sys['name'] = self.log_name_sys
+ self.expected_module_details = {
+ 'name': 'mymod',
+ 'type': 'ping',
+ 'contents': 'e262cfe36134'
+ }
+ self.manager.module_manager = Mock()
def tearDown(self):
super(ManagerTest, self).tearDown()
@@ -475,3 +483,36 @@ class ManagerTest(trove_testtools.TestCase):
self.manager.status.end_install(
error_occurred=True,
post_processing=ANY)
+
+ def test_module_list(self):
+ with patch.object(module_manager.ModuleManager, 'read_module_results',
+ return_value=[
+ self.expected_module_details]) as mock_rmr:
+ module_list = self.manager.module_list(self.context)
+ expected = [self.expected_module_details]
+ assert_equal(self._flatten_list_of_dicts(expected),
+ self._flatten_list_of_dicts(module_list),
+ "Wrong list: %s (Expected: %s)" % (
+ self._flatten_list_of_dicts(module_list),
+ self._flatten_list_of_dicts(expected)))
+ assert_equal(1, mock_rmr.call_count)
+
+ def test_module_apply(self):
+ with patch.object(
+ module_manager.ModuleManager, 'apply_module',
+ return_value=[self.expected_module_details]) as mock_am:
+ module_details = self.manager.module_apply(
+ self.context,
+ [{'module': self.expected_module_details}])
+ assert_equal([[self.expected_module_details]], module_details)
+ assert_equal(1, mock_am.call_count)
+
+ def test_module_remove(self):
+ with patch.object(
+ module_manager.ModuleManager, 'remove_module',
+ return_value=[self.expected_module_details]) as mock_rm:
+ module_details = self.manager.module_remove(
+ self.context,
+ {'module': self.expected_module_details})
+ assert_is_none(module_details)
+ assert_equal(1, mock_rm.call_count)
diff --git a/trove/tests/unittests/guestagent/test_vertica_api.py b/trove/tests/unittests/guestagent/test_vertica_api.py
index b67b9e5f..7c47cc3d 100644
--- a/trove/tests/unittests/guestagent/test_vertica_api.py
+++ b/trove/tests/unittests/guestagent/test_vertica_api.py
@@ -37,13 +37,17 @@ def _mock_call(cmd, timeout, version=None, user=None,
class ApiTest(trove_testtools.TestCase):
@mock.patch.object(rpc, 'get_client')
- def setUp(self, *args):
+ @mock.patch('trove.instance.models.get_instance_encryption_key',
+ return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08')
+ def setUp(self, mock_get_encryption_key, *args):
super(ApiTest, self).setUp()
self.context = context.TroveContext()
self.guest = VerticaGuestAgentAPI(self.context, 0)
+
self.guest._call = _mock_call
self.api = VerticaGuestAgentAPI(self.context, "instance-id-x23d2d")
self._mock_rpc_client()
+ mock_get_encryption_key.assert_called()
def test_get_routing_key(self):
self.assertEqual('guestagent.instance-id-x23d2d',
diff --git a/trove/tests/unittests/guestagent/test_volume.py b/trove/tests/unittests/guestagent/test_volume.py
index 6ef16065..e8f47a93 100644
--- a/trove/tests/unittests/guestagent/test_volume.py
+++ b/trove/tests/unittests/guestagent/test_volume.py
@@ -12,194 +12,191 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
+from mock import ANY, call, DEFAULT, patch, mock_open
-from mock import Mock, MagicMock, patch, mock_open
-import pexpect
-
-from trove.common.exception import GuestError, ProcessExecutionError
+from trove.common import exception
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent import volume
from trove.tests.unittests import trove_testtools
-def _setUp_fake_spawn(return_val=0):
- fake_spawn = pexpect.spawn('echo')
- fake_spawn.expect = Mock(return_value=return_val)
- pexpect.spawn = Mock(return_value=fake_spawn)
- return fake_spawn
-
-
class VolumeDeviceTest(trove_testtools.TestCase):
def setUp(self):
super(VolumeDeviceTest, self).setUp()
self.volumeDevice = volume.VolumeDevice('/dev/vdb')
+ self.exec_patcher = patch.object(
+ utils, 'execute', return_value=('has_journal', ''))
+ self.mock_exec = self.exec_patcher.start()
+ self.addCleanup(self.exec_patcher.stop)
+ self.ismount_patcher = patch.object(operating_system, 'is_mount')
+ self.mock_ismount = self.ismount_patcher.start()
+ self.addCleanup(self.ismount_patcher.stop)
+
def tearDown(self):
super(VolumeDeviceTest, self).tearDown()
- @patch.object(pexpect, 'spawn', Mock())
def test_migrate_data(self):
- origin_execute = utils.execute
- utils.execute = Mock()
- origin_os_path_exists = os.path.exists
- os.path.exists = Mock()
- fake_spawn = _setUp_fake_spawn()
-
- origin_unmount = self.volumeDevice.unmount
- self.volumeDevice.unmount = MagicMock()
- self.volumeDevice.migrate_data('/')
- self.assertEqual(1, fake_spawn.expect.call_count)
- self.assertEqual(1, utils.execute.call_count)
- self.assertEqual(1, self.volumeDevice.unmount.call_count)
- utils.execute = origin_execute
- self.volumeDevice.unmount = origin_unmount
- os.path.exists = origin_os_path_exists
+ with patch.multiple(self.volumeDevice,
+ mount=DEFAULT, unmount=DEFAULT) as mocks:
+ self.volumeDevice.migrate_data('/')
+ self.assertEqual(1, mocks['mount'].call_count)
+ self.assertEqual(1, mocks['unmount'].call_count)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('rsync', '--safe-links', '--perms', '--recursive',
+ '--owner', '--group', '--xattrs',
+ '--sparse', '/', '/mnt/volume',
+ root_helper='sudo', run_as_root=True),
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test__check_device_exists(self):
- origin_execute = utils.execute
- utils.execute = Mock()
self.volumeDevice._check_device_exists()
- self.assertEqual(1, utils.execute.call_count)
- utils.execute = origin_execute
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
@patch('trove.guestagent.volume.LOG')
def test_fail__check_device_exists(self, mock_logging):
- with patch.object(utils, 'execute', side_effect=ProcessExecutionError):
- self.assertRaises(GuestError,
+ with patch.object(utils, 'execute',
+ side_effect=exception.ProcessExecutionError):
+ self.assertRaises(exception.GuestError,
self.volumeDevice._check_device_exists)
- @patch.object(pexpect, 'spawn', Mock())
def test__check_format(self):
- fake_spawn = _setUp_fake_spawn()
-
self.volumeDevice._check_format()
- self.assertEqual(1, fake_spawn.expect.call_count)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
- @patch.object(pexpect, 'spawn', Mock())
- def test__check_format_2(self):
- fake_spawn = _setUp_fake_spawn(return_val=1)
-
- self.assertEqual(0, fake_spawn.expect.call_count)
- self.assertRaises(IOError, self.volumeDevice._check_format)
+ @patch('trove.guestagent.volume.LOG')
+ def test__check_format_2(self, mock_logging):
+ self.assertEqual(0, self.mock_exec.call_count)
+ proc_err = exception.ProcessExecutionError()
+ proc_err.stderr = 'Wrong magic number'
+ self.mock_exec.side_effect = proc_err
+ self.assertRaises(exception.GuestError,
+ self.volumeDevice._check_format)
- @patch.object(pexpect, 'spawn', Mock())
def test__format(self):
- fake_spawn = _setUp_fake_spawn()
-
self.volumeDevice._format()
-
- self.assertEqual(1, fake_spawn.expect.call_count)
- self.assertEqual(1, pexpect.spawn.call_count)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb',
+ root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_format(self):
- origin_check_device_exists = self.volumeDevice._check_device_exists
- origin_format = self.volumeDevice._format
- origin_check_format = self.volumeDevice._check_format
- self.volumeDevice._check_device_exists = MagicMock()
- self.volumeDevice._check_format = MagicMock()
- self.volumeDevice._format = MagicMock()
-
self.volumeDevice.format()
- self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
- self.assertEqual(1, self.volumeDevice._format.call_count)
- self.assertEqual(1, self.volumeDevice._check_format.call_count)
-
- self.volumeDevice._check_device_exists = origin_check_device_exists
- self.volumeDevice._format = origin_format
- self.volumeDevice._check_format = origin_check_format
+ self.assertEqual(3, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ call('mkfs', '--type', 'ext3', '-m', '5', '/dev/vdb',
+ root_helper='sudo', run_as_root=True),
+ call('dumpe2fs', '/dev/vdb', root_helper='sudo', run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_mount(self):
- origin_ = volume.VolumeMountPoint.mount
- volume.VolumeMountPoint.mount = Mock()
- origin_os_path_exists = os.path.exists
- os.path.exists = Mock()
- origin_write_to_fstab = volume.VolumeMountPoint.write_to_fstab
- volume.VolumeMountPoint.write_to_fstab = Mock()
-
- self.volumeDevice.mount(Mock)
- self.assertEqual(1, volume.VolumeMountPoint.mount.call_count)
- self.assertEqual(1, volume.VolumeMountPoint.write_to_fstab.call_count)
- volume.VolumeMountPoint.mount = origin_
- volume.VolumeMountPoint.write_to_fstab = origin_write_to_fstab
- os.path.exists = origin_os_path_exists
+ with patch.multiple(volume.VolumeMountPoint,
+ mount=DEFAULT, write_to_fstab=DEFAULT) as mocks:
+ self.volumeDevice.mount('/dev/vba')
+ self.assertEqual(1, mocks['mount'].call_count,
+ "Wrong number of calls to mount()")
+ self.assertEqual(1, mocks['write_to_fstab'].call_count,
+ "Wrong number of calls to write_to_fstab()")
+ self.mock_exec.assert_not_called()
def test_resize_fs(self):
- origin_check_device_exists = self.volumeDevice._check_device_exists
- origin_execute = utils.execute
- utils.execute = Mock()
- self.volumeDevice._check_device_exists = MagicMock()
- origin_os_path_exists = os.path.exists
- os.path.exists = Mock()
-
- self.volumeDevice.resize_fs('/mnt/volume')
-
- self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
- self.assertEqual(2, utils.execute.call_count)
- self.volumeDevice._check_device_exists = origin_check_device_exists
- os.path.exists = origin_os_path_exists
- utils.execute = origin_execute
-
- @patch.object(os.path, 'ismount', return_value=True)
- @patch.object(utils, 'execute', side_effect=ProcessExecutionError)
+ with patch.object(operating_system, 'is_mount', return_value=True):
+ mount_point = '/mnt/volume'
+ self.volumeDevice.resize_fs(mount_point)
+ self.assertEqual(4, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ call("umount", mount_point, run_as_root=True,
+ root_helper='sudo'),
+ call('e2fsck', '-f', '-p', '/dev/vdb', root_helper='sudo',
+ run_as_root=True),
+ call('resize2fs', '/dev/vdb', root_helper='sudo',
+ run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
+
+ @patch.object(utils, 'execute',
+ side_effect=exception.ProcessExecutionError)
@patch('trove.guestagent.volume.LOG')
- def test_fail_resize_fs(self, mock_logging, mock_execute, mock_mount):
+ def test_fail_resize_fs(self, mock_logging, mock_execute):
with patch.object(self.volumeDevice, '_check_device_exists'):
- self.assertRaises(GuestError,
+ self.assertRaises(exception.GuestError,
self.volumeDevice.resize_fs, '/mnt/volume')
self.assertEqual(1,
self.volumeDevice._check_device_exists.call_count)
- self.assertEqual(1, mock_mount.call_count)
+ self.assertEqual(2, self.mock_ismount.call_count)
def test_unmount_positive(self):
self._test_unmount()
def test_unmount_negative(self):
- self._test_unmount(False)
-
- @patch.object(pexpect, 'spawn', Mock())
- def _test_unmount(self, positive=True):
- origin_is_mount = operating_system.is_mount
- operating_system.is_mount = MagicMock(return_value=positive)
- fake_spawn = _setUp_fake_spawn()
-
- self.volumeDevice.unmount('/mnt/volume')
- COUNT = 1
- if not positive:
- COUNT = 0
- self.assertEqual(COUNT, fake_spawn.expect.call_count)
- operating_system.is_mount = origin_is_mount
-
- @patch.object(utils, 'execute')
- def test_mount_points(self, mock_execute):
- mock_execute.return_value = (
+ self._test_unmount(has_mount=False)
+
+ def _test_unmount(self, has_mount=True):
+ with patch.object(operating_system, 'is_mount',
+ return_value=has_mount):
+ self.volumeDevice.unmount('/mnt/volume')
+ if has_mount:
+ self.assertEqual(1, self.mock_exec.call_count)
+ else:
+ self.mock_exec.assert_not_called()
+
+ def test_mount_points(self):
+ self.mock_exec.return_value = (
("/dev/vdb /var/lib/mysql xfs rw 0 0", ""))
mount_point = self.volumeDevice.mount_points('/dev/vdb')
self.assertEqual(['/var/lib/mysql'], mount_point)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call("grep '^/dev/vdb ' /etc/mtab", check_exit_code=[0, 1],
+ shell=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_set_readahead_size(self):
- origin_check_device_exists = self.volumeDevice._check_device_exists
- self.volumeDevice._check_device_exists = MagicMock()
- mock_execute = MagicMock(return_value=None)
readahead_size = 2048
- self.volumeDevice.set_readahead_size(readahead_size,
- execute_function=mock_execute)
- blockdev = mock_execute.call_args_list[0]
-
- blockdev.assert_called_with("sudo", "blockdev", "--setra",
- readahead_size, "/dev/vdb")
- self.volumeDevice._check_device_exists = origin_check_device_exists
+ self.volumeDevice.set_readahead_size(readahead_size)
+ self.assertEqual(2, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ call('blockdev', '--setra', readahead_size, '/dev/vdb',
+ root_helper='sudo', run_as_root=True),
+ ]
+ self.mock_exec.assert_has_calls(calls)
@patch('trove.guestagent.volume.LOG')
def test_fail_set_readahead_size(self, mock_logging):
- mock_execute = MagicMock(side_effect=ProcessExecutionError)
+ self.mock_exec.side_effect = exception.ProcessExecutionError
readahead_size = 2048
- with patch.object(self.volumeDevice, '_check_device_exists'):
- self.assertRaises(GuestError, self.volumeDevice.set_readahead_size,
- readahead_size, execute_function=mock_execute)
- self.volumeDevice._check_device_exists.assert_any_call()
+ self.assertRaises(exception.GuestError,
+ self.volumeDevice.set_readahead_size,
+ readahead_size)
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('blockdev', '--getsize64', '/dev/vdb', attempts=3,
+ root_helper='sudo', run_as_root=True),
+ ]
+ self.mock_exec.assert_has_calls(calls)
class VolumeMountPointTest(trove_testtools.TestCase):
@@ -208,32 +205,35 @@ class VolumeMountPointTest(trove_testtools.TestCase):
super(VolumeMountPointTest, self).setUp()
self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device',
'/dev/vdb')
+ self.exec_patcher = patch.object(utils, 'execute',
+ return_value=('', ''))
+ self.mock_exec = self.exec_patcher.start()
+ self.addCleanup(self.exec_patcher.stop)
def tearDown(self):
super(VolumeMountPointTest, self).tearDown()
- @patch.object(pexpect, 'spawn', Mock())
def test_mount(self):
- origin_ = os.path.exists
- os.path.exists = MagicMock(return_value=False)
- fake_spawn = _setUp_fake_spawn()
-
- with patch.object(utils, 'execute_with_timeout',
- return_value=('0', '')):
+ with patch.object(operating_system, 'exists', return_value=False):
self.volumeMountPoint.mount()
-
- self.assertEqual(1, os.path.exists.call_count)
- self.assertEqual(1, utils.execute_with_timeout.call_count)
- self.assertEqual(1, fake_spawn.expect.call_count)
-
- os.path.exists = origin_
+ self.assertEqual(2, self.mock_exec.call_count)
+ calls = [
+ call('mkdir', '-p', '/dev/vdb', root_helper='sudo',
+ run_as_root=True),
+ call('mount', '-t', 'ext3', '-o', 'defaults,noatime',
+ '/mnt/device', '/dev/vdb', root_helper='sudo',
+ run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
def test_write_to_fstab(self):
- origin_execute = utils.execute
- utils.execute = Mock()
- m = mock_open()
- with patch('%s.open' % volume.__name__, m, create=True):
+ mock_file = mock_open()
+ with patch('%s.open' % volume.__name__, mock_file, create=True):
self.volumeMountPoint.write_to_fstab()
-
- self.assertEqual(1, utils.execute.call_count)
- utils.execute = origin_execute
+ self.assertEqual(1, self.mock_exec.call_count)
+ calls = [
+ call('install', '-o', 'root', '-g', 'root', '-m', '644',
+ ANY, '/etc/fstab', root_helper='sudo',
+ run_as_root=True)
+ ]
+ self.mock_exec.assert_has_calls(calls)
diff --git a/trove/tests/unittests/instance/test_instance_models.py b/trove/tests/unittests/instance/test_instance_models.py
index 3dbde5a4..c089daaa 100644
--- a/trove/tests/unittests/instance/test_instance_models.py
+++ b/trove/tests/unittests/instance/test_instance_models.py
@@ -17,6 +17,7 @@ from mock import Mock, patch
from trove.backup import models as backup_models
from trove.common import cfg
+from trove.common import crypto_utils
from trove.common import exception
from trove.common.instance import ServiceStatuses
from trove.datastore import models as datastore_models
@@ -25,6 +26,7 @@ from trove.instance.models import DBInstance
from trove.instance.models import DBInstanceFault
from trove.instance.models import filter_ips
from trove.instance.models import Instance
+from trove.instance.models import instance_encryption_key_cache
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import SimpleInstance
from trove.instance.tasks import InstanceTasks
@@ -292,7 +294,8 @@ class TestInstanceUpgrade(trove_testtools.TestCase):
@patch.object(task_api.API, 'get_client', Mock(return_value=Mock()))
@patch.object(task_api.API, 'upgrade')
- def test_upgrade(self, task_upgrade):
+ @patch('trove.tests.fakes.nova.LOG')
+ def test_upgrade(self, mock_logging, task_upgrade):
instance_model = DBInstance(
InstanceTasks.NONE,
id=str(uuid.uuid4()),
@@ -402,3 +405,118 @@ class TestReplication(trove_testtools.TestCase):
None, 'name', 2, "UUID", [], [], None,
self.datastore_version, 1,
None, slave_of_id=self.replica_info.id)
+
+
+class TestModules(trove_testtools.TestCase):
+
+ def setUp(self):
+ super(TestModules, self).setUp()
+
+ def tearDown(self):
+ super(TestModules, self).tearDown()
+
+ def _build_module(self, ds_id, ds_ver_id):
+ module = Mock()
+ module.datastore_id = ds_id
+ module.datastore_version_id = ds_ver_id
+ module.contents = crypto_utils.encode_data(
+ crypto_utils.encrypt_data(
+ 'VGhpc2lzbXlkYXRhc3RyaW5n',
+ 'thisismylongkeytouse'))
+ return module
+
+ def test_validate_modules_for_apply(self):
+ data = [
+ [[self._build_module('ds', 'ds_ver')], 'ds', 'ds_ver', True],
+ [[self._build_module('ds', None)], 'ds', 'ds_ver', True],
+ [[self._build_module(None, None)], 'ds', 'ds_ver', True],
+
+ [[self._build_module('ds', 'ds_ver')], 'ds', 'ds2_ver', False,
+ exception.TroveError],
+ [[self._build_module('ds', 'ds_ver')], 'ds2', 'ds_ver', False,
+ exception.TroveError],
+ [[self._build_module('ds', 'ds_ver')], 'ds2', 'ds2_ver', False,
+ exception.TroveError],
+ [[self._build_module('ds', None)], 'ds2', 'ds2_ver', False,
+ exception.TroveError],
+ [[self._build_module(None, None)], 'ds2', 'ds2_ver', True],
+
+ [[self._build_module(None, 'ds_ver')], 'ds2', 'ds_ver', True],
+ ]
+ for datum in data:
+ modules = datum[0]
+ ds_id = datum[1]
+ ds_ver_id = datum[2]
+ match = datum[3]
+ expected_exception = None
+ if not match:
+ expected_exception = datum[4]
+ ds = Mock()
+ ds.id = ds_id
+ ds.name = ds_id
+ ds_ver = Mock()
+ ds_ver.id = ds_ver_id
+ ds_ver.name = ds_ver_id
+ ds_ver.datastore_id = ds_id
+ with patch.object(datastore_models.Datastore, 'load',
+ return_value=ds):
+ with patch.object(datastore_models.DatastoreVersion, 'load',
+ return_value=ds_ver):
+ if match:
+ models.validate_modules_for_apply(
+ modules, ds_id, ds_ver_id)
+ else:
+ self.assertRaises(
+ expected_exception,
+ models.validate_modules_for_apply,
+ modules, ds_id, ds_ver_id)
+
+
+def trivial_key_function(id):
+ return id * id
+
+
+class TestInstanceKeyCaching(trove_testtools.TestCase):
+
+ def setUp(self):
+ super(TestInstanceKeyCaching, self).setUp()
+
+ def tearDown(self):
+ super(TestInstanceKeyCaching, self).tearDown()
+
+ def test_basic_caching(self):
+ keycache = instance_encryption_key_cache(trivial_key_function, 5)
+ self.assertEqual(keycache[5], 25)
+ self.assertEqual(keycache[5], 25)
+ self.assertEqual(keycache[25], 625)
+
+ def test_caching(self):
+ keyfn = Mock(return_value=123)
+ keycache = instance_encryption_key_cache(keyfn, 5)
+ self.assertEqual(keycache[5], 123)
+ self.assertEqual(keyfn.call_count, 1)
+ self.assertEqual(keycache[5], 123)
+ self.assertEqual(keyfn.call_count, 1)
+ self.assertEqual(keycache[6], 123)
+ self.assertEqual(keyfn.call_count, 2)
+ self.assertEqual(keycache[7], 123)
+ self.assertEqual(keyfn.call_count, 3)
+ self.assertEqual(keycache[8], 123)
+ self.assertEqual(keyfn.call_count, 4)
+ self.assertEqual(keycache[9], 123)
+ self.assertEqual(keyfn.call_count, 5)
+ self.assertEqual(keycache[10], 123)
+ self.assertEqual(keyfn.call_count, 6)
+ self.assertEqual(keycache[10], 123)
+ self.assertEqual(keyfn.call_count, 6)
+ self.assertEqual(keycache[5], 123)
+ self.assertEqual(keyfn.call_count, 7)
+
+ # BUG(1650518): Cleanup in the Pike release
+ def test_not_caching_none(self):
+ keyfn = Mock(return_value=None)
+ keycache = instance_encryption_key_cache(keyfn, 5)
+ self.assertIsNone(keycache[30])
+ self.assertEqual(keyfn.call_count, 1)
+ self.assertIsNone(keycache[30])
+ self.assertEqual(keyfn.call_count, 2)
diff --git a/trove/tests/unittests/module/test_module_controller.py b/trove/tests/unittests/module/test_module_controller.py
index e4c62193..149693e4 100644
--- a/trove/tests/unittests/module/test_module_controller.py
+++ b/trove/tests/unittests/module/test_module_controller.py
@@ -30,6 +30,8 @@ class TestModuleController(trove_testtools.TestCase):
"name": 'test_module',
"module_type": 'test',
"contents": 'my_contents\n',
+ "priority_apply": 0,
+ "apply_order": 5
}
}
@@ -44,7 +46,7 @@ class TestModuleController(trove_testtools.TestCase):
validator = jsonschema.Draft4Validator(schema)
self.assertTrue(validator.is_valid(body))
- def test_validate_create_blankname(self):
+ def test_validate_create_blank_name(self):
body = self.module
body['module']['name'] = " "
schema = self.controller.get_schema('create', body)
@@ -65,3 +67,14 @@ class TestModuleController(trove_testtools.TestCase):
self.assertEqual(1, len(errors))
self.assertIn("'$#$%^^' does not match '^.*[0-9a-zA-Z]+.*$'",
errors[0].message)
+
+ def test_validate_create_invalid_apply_order(self):
+ body = self.module
+ body['module']['apply_order'] = 12
+ schema = self.controller.get_schema('create', body)
+ validator = jsonschema.Draft4Validator(schema)
+ self.assertFalse(validator.is_valid(body))
+ errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
+ self.assertEqual(1, len(errors))
+ self.assertIn("12 is greater than the maximum of 9",
+ errors[0].message)
diff --git a/trove/tests/unittests/module/test_module_models.py b/trove/tests/unittests/module/test_module_models.py
index a4d608b2..20ce5c20 100644
--- a/trove/tests/unittests/module/test_module_models.py
+++ b/trove/tests/unittests/module/test_module_models.py
@@ -14,8 +14,11 @@
# under the License.
#
+import copy
from mock import Mock, patch
+from trove.common import exception
+from trove.datastore import models as datastore_models
from trove.module import models
from trove.taskmanager import api as task_api
from trove.tests.unittests import trove_testtools
@@ -38,10 +41,101 @@ class CreateModuleTest(trove_testtools.TestCase):
def tearDown(self):
super(CreateModuleTest, self).tearDown()
- def test_can_create_module(self):
+ def test_can_create_update_module(self):
module = models.Module.create(
self.context,
self.name, self.module_type, self.contents,
- 'my desc', 'my_tenant', None, None, False, True, False)
+ 'my desc', 'my_tenant', None, None, False, True, False,
+ False, 5, True)
self.assertIsNotNone(module)
+ new_module = copy.copy(module)
+ models.Module.update(self.context, new_module, module, False)
module.delete()
+
+ def test_validate_action(self):
+ # tenant_id, auto_apply, visible, priority_apply, full_access,
+ # valid, exception, works_for_admin
+ data = [
+ ['tenant', False, True, False, None,
+ True],
+
+ ['tenant', True, True, False, None,
+ False, exception.ModuleAccessForbidden],
+ ['tenant', False, False, False, None,
+ False, exception.ModuleAccessForbidden],
+ ['tenant', False, True, True, None,
+ False, exception.ModuleAccessForbidden],
+ ['tenant', False, True, False, True,
+ False, exception.ModuleAccessForbidden, False],
+ ['tenant', False, True, False, False,
+ False, exception.ModuleAccessForbidden],
+ ['tenant', True, False, True, False,
+ False, exception.ModuleAccessForbidden],
+
+ ['tenant', True, False, True, True,
+ False, exception.InvalidModelError, False],
+ ]
+ for datum in data:
+ tenant = datum[0]
+ auto_apply = datum[1]
+ visible = datum[2]
+ priority_apply = datum[3]
+ full_access = datum[4]
+ valid = datum[5]
+ expected_exception = None
+ if not valid:
+ expected_exception = datum[6]
+ context = Mock()
+ context.is_admin = False
+ works_for_admin = True
+ if len(datum) > 7:
+ works_for_admin = datum[7]
+ if valid:
+ models.Module.validate_action(
+ context, 'action', tenant, auto_apply, visible,
+ priority_apply, full_access)
+ else:
+ self.assertRaises(
+ expected_exception,
+ models.Module.validate_action, context, 'action', tenant,
+ auto_apply, visible, priority_apply, full_access)
+ # also make sure that it works for admin
+ if works_for_admin:
+ context.is_admin = True
+ models.Module.validate_action(
+ context, 'action', tenant, auto_apply, visible,
+ priority_apply, full_access)
+
+ def test_validate_datastore(self):
+ # datastore, datastore_version, valid, exception
+ data = [
+ [None, None, True],
+ ['ds', None, True],
+ ['ds', 'ds_ver', True],
+ [None, 'ds_ver', False,
+ exception.BadRequest],
+ ]
+ for datum in data:
+ ds_id = datum[0]
+ ds_ver_id = datum[1]
+ valid = datum[2]
+ expected_exception = None
+ if not valid:
+ expected_exception = datum[3]
+ ds = Mock()
+ ds.id = ds_id
+ ds.name = ds_id
+ ds_ver = Mock()
+ ds_ver.id = ds_ver_id
+ ds_ver.name = ds_ver_id
+ ds_ver.datastore_id = ds_id
+ with patch.object(datastore_models.Datastore, 'load',
+ return_value=ds):
+ with patch.object(datastore_models.DatastoreVersion, 'load',
+ return_value=ds_ver):
+ if valid:
+ models.Module.validate_datastore(ds_id, ds_ver_id)
+ else:
+ self.assertRaises(
+ expected_exception,
+ models.Module.validate_datastore, ds_id, ds_ver_id)
diff --git a/trove/tests/unittests/module/test_module_views.py b/trove/tests/unittests/module/test_module_views.py
index ddcb8256..97edc330 100644
--- a/trove/tests/unittests/module/test_module_views.py
+++ b/trove/tests/unittests/module/test_module_views.py
@@ -43,6 +43,9 @@ class DetailedModuleViewTest(trove_testtools.TestCase):
self.module.datastore_version = '5.6'
self.module.auto_apply = False
self.module.tenant_id = 'my_tenant'
+ self.module.is_admin = False
+ self.module.priority_apply = False
+ self.module.apply_order = 5
def tearDown(self):
super(DetailedModuleViewTest, self).tearDown()
@@ -69,3 +72,9 @@ class DetailedModuleViewTest(trove_testtools.TestCase):
result['module']['auto_apply'])
self.assertEqual(self.module.tenant_id,
result['module']['tenant_id'])
+ self.assertEqual(self.module.is_admin,
+ result['module']['is_admin'])
+ self.assertEqual(self.module.priority_apply,
+ result['module']['priority_apply'])
+ self.assertEqual(self.module.apply_order,
+ result['module']['apply_order'])
diff --git a/trove/tests/unittests/taskmanager/test_models.py b/trove/tests/unittests/taskmanager/test_models.py
index 47297199..1140eaf3 100644
--- a/trove/tests/unittests/taskmanager/test_models.py
+++ b/trove/tests/unittests/taskmanager/test_models.py
@@ -245,7 +245,8 @@ class FreshInstanceTasksTest(trove_testtools.TestCase):
None, None, None, datastore_manager, None, None, None)
self.assertEqual(server.userdata, self.userdata)
- def test_create_instance_guestconfig(self):
+ @patch.object(DBInstance, 'get_by')
+ def test_create_instance_guestconfig(self, patch_get_by):
def fake_conf_getter(*args, **kwargs):
if args[0] == 'guest_config':
return self.guestconfig
@@ -268,7 +269,8 @@ class FreshInstanceTasksTest(trove_testtools.TestCase):
self.guestconfig_content,
files['/etc/trove/conf.d/trove-guestagent.conf'])
- def test_create_instance_guestconfig_compat(self):
+ @patch.object(DBInstance, 'get_by')
+ def test_create_instance_guestconfig_compat(self, patch_get_by):
def fake_conf_getter(*args, **kwargs):
if args[0] == 'guest_config':
return self.guestconfig
@@ -460,7 +462,8 @@ class FreshInstanceTasksTest(trove_testtools.TestCase):
@patch.object(trove.guestagent.api.API, 'attach_replication_slave')
@patch.object(rpc, 'get_client')
- def test_attach_replication_slave(self, mock_get_client,
+ @patch.object(DBInstance, 'get_by')
+ def test_attach_replication_slave(self, mock_get_by, mock_get_client,
mock_attach_replication_slave):
mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'}
snapshot = {'replication_strategy': 'MysqlGTIDReplication',
@@ -483,6 +486,7 @@ class FreshInstanceTasksTest(trove_testtools.TestCase):
@patch.object(trove.guestagent.api.API, 'attach_replication_slave',
side_effect=GuestError)
@patch('trove.taskmanager.models.LOG')
+ @patch.object(DBInstance, 'get_by')
def test_error_attach_replication_slave(self, *args):
mock_flavor = {'id': 8, 'ram': 768, 'name': 'bigger_flavor'}
snapshot = {'replication_strategy': 'MysqlGTIDReplication',
diff --git a/trove/tests/unittests/upgrade/test_models.py b/trove/tests/unittests/upgrade/test_models.py
index f0459462..9a859de6 100644
--- a/trove/tests/unittests/upgrade/test_models.py
+++ b/trove/tests/unittests/upgrade/test_models.py
@@ -66,7 +66,11 @@ class TestUpgradeModel(trove_testtools.TestCase):
@patch('trove.guestagent.api.API.upgrade')
@patch.object(rpc, 'get_client')
- def _assert_create_with_metadata(self, mock_client, api_upgrade_mock,
+ @patch('trove.instance.models.get_instance_encryption_key',
+ return_value='2LMDgren5citVxmSYNiRFCyFfVDjJtDaQT9LYV08')
+ def _assert_create_with_metadata(self, mock_get_encryption_key,
+ mock_client,
+ api_upgrade_mock,
metadata=None):
"""Exercise UpgradeMessageSender.create() call.
"""
@@ -85,3 +89,4 @@ class TestUpgradeModel(trove_testtools.TestCase):
func() # This call should translate to the API call asserted below.
api_upgrade_mock.assert_called_once_with(instance_version, location,
metadata)
+ mock_get_encryption_key.assert_called()
diff --git a/trove/tests/util/__init__.py b/trove/tests/util/__init__.py
index f4529336..9dae5271 100644
--- a/trove/tests/util/__init__.py
+++ b/trove/tests/util/__init__.py
@@ -166,8 +166,11 @@ def create_nova_client(user, service_type=None):
from novaclient.client import Client
if not service_type:
service_type = test_config.nova_client['nova_service_type']
- openstack = Client(CONF.nova_client_version, user.auth_user, user.auth_key,
- user.tenant, test_config.nova_client['auth_url'],
+ openstack = Client(CONF.nova_client_version,
+ user.auth_user,
+ user.auth_key,
+ project_name=user.tenant,
+ auth_url=test_config.nova_client['auth_url'],
service_type=service_type, no_cache=True,
cacert=test_config.values.get('cacert', None))
openstack.authenticate()