summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2020-02-02 22:45:40 +0000
committerGerrit Code Review <review@openstack.org>2020-02-02 22:45:40 +0000
commit2d301d0a21863c6c0fbb9e854c7eb8ad8f19bbc1 (patch)
treea13795d2aa56dab51e4dd723954332c110ade546
parentaf5eea5c6cc203f2e2ab26f97defbe66069a2378 (diff)
parent602c4d42de77d1be0397185b04f2790f4bae87b7 (diff)
downloadtrove-2d301d0a21863c6c0fbb9e854c7eb8ad8f19bbc1.tar.gz
Merge "Improve the function tests"
-rw-r--r--.zuul.yaml7
-rw-r--r--devstack/plugin.sh23
-rw-r--r--devstack/settings2
-rw-r--r--doc/source/contributor/how_to_create_a_trove_instance.rst360
-rw-r--r--doc/source/contributor/index.rst1
-rw-r--r--doc/source/index.rst19
-rw-r--r--etc/tests/core.test.conf2
-rw-r--r--integration/scripts/conf/test_begin.conf12
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/15-trove-dep (renamed from integration/scripts/files/elements/fedora-guest/extra-data.d/15-trove-dep)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/20-guest-systemd (renamed from integration/scripts/files/elements/fedora-guest/extra-data.d/20-guest-systemd)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/62-ssh-key (renamed from integration/scripts/files/elements/fedora-guest/extra-data.d/62-ssh-key)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/install.d/15-trove-dep (renamed from integration/scripts/files/elements/fedora-guest/install.d/15-trove-dep)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/install.d/20-etc (renamed from integration/scripts/files/elements/fedora-guest/install.d/20-etc)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/install.d/21-use-fedora-certificates (renamed from integration/scripts/files/elements/fedora-guest/install.d/21-use-fedora-certificates)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/install.d/50-user (renamed from integration/scripts/files/elements/fedora-guest/install.d/50-user)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/install.d/62-ssh-key (renamed from integration/scripts/files/elements/fedora-guest/install.d/62-ssh-key)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/post-install.d/05-ipforwarding (renamed from integration/scripts/files/elements/fedora-guest/post-install.d/05-ipforwarding)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/post-install.d/62-trove-guest-sudoers (renamed from integration/scripts/files/elements/fedora-guest/post-install.d/62-trove-guest-sudoers)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-guest/post-install.d/90-yum-update (renamed from integration/scripts/files/elements/fedora-guest/post-install.d/90-yum-update)0
-rw-r--r--integration/scripts/files/deprecated-elements/fedora-mariadb/README.md (renamed from integration/scripts/files/elements/fedora-mariadb/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-mariadb/install.d/10-mariadb (renamed from integration/scripts/files/elements/fedora-mariadb/install.d/10-mariadb)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-mariadb/pre-install.d/10-percona-copr (renamed from integration/scripts/files/elements/fedora-mariadb/pre-install.d/10-percona-copr)0
-rw-r--r--integration/scripts/files/deprecated-elements/fedora-mongodb/README.md (renamed from integration/scripts/files/elements/fedora-mongodb/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-mongodb/install.d/10-mongodb (renamed from integration/scripts/files/elements/fedora-mongodb/install.d/10-mongodb)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-mongodb/install.d/25-trove-mongo-dep (renamed from integration/scripts/files/elements/fedora-mongodb/install.d/25-trove-mongo-dep)0
-rw-r--r--integration/scripts/files/deprecated-elements/fedora-mysql/README.md (renamed from integration/scripts/files/elements/fedora-mysql/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-mysql/install.d/10-mysql (renamed from integration/scripts/files/elements/fedora-mysql/install.d/10-mysql)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-mysql/install.d/40-xtrabackup (renamed from integration/scripts/files/elements/fedora-mysql/install.d/40-xtrabackup)0
-rw-r--r--integration/scripts/files/deprecated-elements/fedora-mysql/post-install.d/30-register-mysql-service (renamed from integration/scripts/files/elements/fedora-mysql/post-install.d/30-register-mysql-service)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-percona/install.d/05-percona-server (renamed from integration/scripts/files/elements/fedora-percona/install.d/05-percona-server)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-percona/install.d/10-mysql (renamed from integration/scripts/files/elements/fedora-percona/install.d/10-mysql)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-postgresql/install.d/10-postgresql (renamed from integration/scripts/files/elements/fedora-postgresql/install.d/10-postgresql)0
-rw-r--r--integration/scripts/files/deprecated-elements/fedora-redis/README.md (renamed from integration/scripts/files/elements/fedora-redis/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/fedora-redis/install.d/10-redis (renamed from integration/scripts/files/elements/fedora-redis/install.d/10-redis)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-cassandra/install.d/10-cassandra (renamed from integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-couchbase/install.d/10-couchbase (renamed from integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-couchdb/install.d/10-couchdb (renamed from integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-db2/README.md (renamed from integration/scripts/files/elements/ubuntu-db2/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs (renamed from integration/scripts/files/elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-db2/install.d/10-db2 (renamed from integration/scripts/files/elements/ubuntu-db2/install.d/10-db2)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-mongodb/README.md (renamed from integration/scripts/files/elements/ubuntu-mongodb/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key (renamed from integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-percona/install.d/30-mysql (renamed from integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/10-percona-apt-key (renamed from integration/scripts/files/elements/ubuntu-percona/pre-install.d/10-percona-apt-key)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local (renamed from integration/scripts/files/elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-pxc/install.d/30-mysql (renamed from integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/10-percona-apt-key (renamed from integration/scripts/files/elements/ubuntu-pxc/pre-install.d/10-percona-apt-key)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local (renamed from integration/scripts/files/elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-redis/README.md (renamed from integration/scripts/files/elements/ubuntu-redis/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-redis/install.d/30-redis (renamed from integration/scripts/files/elements/ubuntu-redis/install.d/30-redis)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env (renamed from integration/scripts/files/elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-vertica/README.md (renamed from integration/scripts/files/elements/ubuntu-vertica/README.md)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb (renamed from integration/scripts/files/elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-vertica/install.d/97-vertica (renamed from integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-xenial-cassandra/element-deps (renamed from integration/scripts/files/elements/ubuntu-xenial-cassandra/element-deps)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-xenial-couchbase/element-deps (renamed from integration/scripts/files/elements/ubuntu-xenial-couchbase/element-deps)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-xenial-couchdb/element-deps (renamed from integration/scripts/files/elements/ubuntu-xenial-couchdb/element-deps)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/element-deps (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/element-deps)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/20-mongodb (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/20-mongodb)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/35-check-numa (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/35-check-numa)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd (renamed from integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/element-deps (renamed from integration/scripts/files/elements/ubuntu-xenial-percona/element-deps)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf (renamed from integration/scripts/files/elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/element-deps (renamed from integration/scripts/files/elements/ubuntu-xenial-pxc/element-deps)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf (renamed from integration/scripts/files/elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf)0
-rw-r--r--integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/element-deps (renamed from integration/scripts/files/elements/ubuntu-xenial-redis/element-deps)0
-rwxr-xr-xintegration/scripts/files/deprecated-elements/ubuntu-xenial-redis/install.d/31-fix-init-file (renamed from integration/scripts/files/elements/ubuntu-xenial-redis/install.d/31-fix-init-file)0
-rw-r--r--integration/scripts/files/elements/ubuntu-mysql/README.md3
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates7
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps2
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb7
-rwxr-xr-xintegration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql38
-rw-r--r--integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps2
-rw-r--r--integration/scripts/files/trove-guest.systemd.conf26
-rw-r--r--integration/scripts/functions_qemu2
-rwxr-xr-xintegration/scripts/trovestack183
-rw-r--r--integration/tests/integration/core.test.conf8
-rw-r--r--integration/tests/integration/int_tests.py47
-rwxr-xr-xintegration/tests/integration/run_local.sh57
-rw-r--r--integration/tests/integration/setup.py30
-rw-r--r--integration/tests/integration/tests/README1
-rw-r--r--integration/tests/integration/tests/api/__init__.py0
-rw-r--r--integration/tests/integration/tests/api/delete_all.py32
-rw-r--r--integration/tests/integration/tests/api/instances_pagination.py219
-rw-r--r--integration/tests/integration/tests/api/instances_states.py76
-rw-r--r--integration/tests/integration/tests/dns/__init__.py0
-rw-r--r--integration/tests/integration/tests/dns/check_domain.py171
-rw-r--r--integration/tests/integration/tests/dns/concurrency.py111
-rw-r--r--integration/tests/integration/tests/dns/conversion.py103
-rw-r--r--integration/tests/integration/tests/dns/dns.py104
-rw-r--r--integration/tests/integration/tests/initialize.py107
-rw-r--r--integration/tests/integration/tests/smoke/__init__.py0
-rw-r--r--integration/tests/integration/tests/smoke/instance.py103
-rw-r--r--integration/tests/integration/tests/volumes/__init__.py25
-rw-r--r--integration/tests/integration/tests/volumes/driver.py547
-rw-r--r--integration/tests/integration/tox.ini28
-rw-r--r--roles/trove-devstack/defaults/main.yml3
-rw-r--r--roles/trove-devstack/tasks/main.yml2
-rw-r--r--run_tests.py6
-rw-r--r--trove/cmd/guest.py3
-rw-r--r--trove/common/cfg.py2
-rw-r--r--trove/common/utils.py4
-rw-r--r--trove/guestagent/datastore/mysql_common/manager.py2
-rw-r--r--trove/guestagent/datastore/mysql_common/service.py50
-rw-r--r--trove/guestagent/strategies/backup/experimental/mariadb_impl.py6
-rw-r--r--trove/guestagent/strategies/backup/mysql_impl.py10
-rwxr-xr-xtrove/taskmanager/models.py33
-rw-r--r--trove/tests/__init__.py17
-rw-r--r--trove/tests/api/backups.py45
-rw-r--r--trove/tests/api/configurations.py56
-rw-r--r--trove/tests/api/databases.py34
-rw-r--r--trove/tests/api/datastores.py37
-rw-r--r--trove/tests/api/header.py55
-rw-r--r--trove/tests/api/instances.py676
-rw-r--r--trove/tests/api/instances_actions.py109
-rw-r--r--trove/tests/api/instances_delete.py195
-rw-r--r--trove/tests/api/instances_mysql_down.py129
-rw-r--r--trove/tests/api/mgmt/admin_required.py77
-rw-r--r--trove/tests/api/mgmt/configurations.py3
-rw-r--r--trove/tests/api/mgmt/datastore_versions.py6
-rw-r--r--trove/tests/api/mgmt/instances.py282
-rw-r--r--trove/tests/api/mgmt/malformed_json.py345
-rw-r--r--trove/tests/api/replication.py54
-rw-r--r--trove/tests/api/root.py89
-rw-r--r--trove/tests/api/root_on_create.py132
-rw-r--r--trove/tests/api/user_access.py32
-rw-r--r--trove/tests/api/users.py46
-rw-r--r--trove/tests/api/versions.py6
-rw-r--r--trove/tests/config.py1
-rw-r--r--trove/tests/int_tests.py203
-rw-r--r--trove/tests/scenario/groups/__init__.py4
-rw-r--r--trove/tests/scenario/groups/backup_group.py73
-rw-r--r--trove/tests/scenario/groups/configuration_group.py28
-rw-r--r--trove/tests/scenario/groups/database_actions_group.py22
-rw-r--r--trove/tests/scenario/groups/guest_log_group.py6
-rw-r--r--trove/tests/scenario/groups/instance_actions_group.py23
-rw-r--r--trove/tests/scenario/groups/instance_create_group.py23
-rw-r--r--trove/tests/scenario/groups/instance_delete_group.py30
-rw-r--r--trove/tests/scenario/groups/instance_error_create_group.py12
-rw-r--r--trove/tests/scenario/groups/instance_force_delete_group.py7
-rw-r--r--trove/tests/scenario/groups/negative_cluster_actions_group.py46
-rw-r--r--trove/tests/scenario/groups/replication_group.py34
-rw-r--r--trove/tests/scenario/groups/root_actions_group.py22
-rw-r--r--trove/tests/scenario/groups/user_actions_group.py19
-rw-r--r--trove/tests/scenario/runners/backup_runners.py5
-rw-r--r--trove/tests/unittests/backup/test_backupagent.py5
-rw-r--r--trove/tests/unittests/guestagent/test_backups.py10
-rw-r--r--trove/tests/unittests/guestagent/test_dbaas.py18
-rw-r--r--trove/tests/unittests/taskmanager/test_models.py13
-rw-r--r--trove/tests/util/__init__.py2
-rw-r--r--trove/tests/util/mysql.py7
157 files changed, 769 insertions, 4857 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index c9fd5ea0..10c1fc3b 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -142,6 +142,9 @@
devstack_localrc:
TROVE_RESIZE_TIME_OUT: 1800
trove_resize_time_out: 1800
+ trove_test_datastore: 'mysql'
+ trove_test_group: 'mysql'
+ trove_test_datastore_version: '5.7'
- job:
name: trove-grenade
@@ -196,6 +199,7 @@
vars:
trove_test_datastore: mariadb
trove_test_group: mariadb-supported-single
+ trove_test_datastore_version: 10.4
devstack_localrc:
TROVE_ENABLE_IMAGE_BUILD: false
@@ -205,6 +209,7 @@
vars:
trove_test_datastore: mariadb
trove_test_group: mariadb-supported-multi
+ trove_test_datastore_version: 10.4
devstack_localrc:
TROVE_ENABLE_IMAGE_BUILD: false
@@ -214,6 +219,7 @@
vars:
trove_test_datastore: mysql
trove_test_group: mysql-supported-single
+ trove_test_datastore_version: 5.7
- job:
name: trove-scenario-mysql-multi
@@ -221,6 +227,7 @@
vars:
trove_test_datastore: mysql
trove_test_group: mysql-supported-multi
+ trove_test_datastore_version: 5.7
- job:
name: trove-scenario-percona-multi
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 12b63304..ecd861de 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -390,9 +390,11 @@ function setup_mgmt_network() {
die_if_not_set $LINENO network_id "Failed to create network: $NET_NAME, project: ${PROJECT_ID}"
if [[ "$IP_VERSION" =~ 4.* ]]; then
- NEW_SUBNET_ID=$(create_mgmt_subnet_v4 ${PROJECT_ID} ${network_id} ${SUBNET_NAME} ${SUBNET_RANGE})
- openstack router add subnet $ROUTER_ID $NEW_SUBNET_ID
+ net_subnet_id=$(create_mgmt_subnet_v4 ${PROJECT_ID} ${network_id} ${SUBNET_NAME} ${SUBNET_RANGE})
+ # 'openstack router add' has a bug that cound't show the error message
+ # openstack router add subnet ${ROUTER_ID} ${net_subnet_id} --debug
fi
+
# Trove doesn't support IPv6 for now.
# if [[ "$IP_VERSION" =~ .*6 ]]; then
# NEW_IPV6_SUBNET_ID=$(create_subnet_v6 ${PROJECT_ID} ${network_id} ${IPV6_SUBNET_NAME})
@@ -454,32 +456,25 @@ function create_guest_image {
${TROVE_IMAGE_OS_RELEASE} \
true
- image_name="trove-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE}"
+ image_name="trove-datastore-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE}"
image_file=$HOME/images/${image_name}.qcow2
if [ ! -f ${image_file} ]; then
- echo "Image file was not found at ${image_file}. Probably it was not created."
+ echo "Image file was not found at ${image_file}"
return 1
fi
- ACTIVE=1
- INACTIVE=0
-
echo "Add the image to glance"
glance_image_id=$(openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} \
--os-project-name service --os-username trove \
- image create ${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE} \
+ image create ${image_name} \
--disk-format qcow2 --container-format bare --property hw_rng_model='virtio' --file ${image_file} \
-c id -f value)
echo "Register the image in datastore"
$TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE ""
- $TROVE_MANAGE datastore_version_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION $TROVE_DATASTORE_TYPE $glance_image_id "" $ACTIVE
+ $TROVE_MANAGE datastore_version_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION $TROVE_DATASTORE_TYPE $glance_image_id "" 1
$TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION
- # just for tests
- $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "manager1" $glance_image_id "" $INACTIVE
- $TROVE_MANAGE datastore_update Test_Datastore_1 ""
-
echo "Add parameter validation rules if available"
if [ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]; then
$TROVE_MANAGE db_load_datastore_config_parameters "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" \
@@ -546,6 +541,8 @@ function config_trove_network {
openstack network list
echo "Neutron subnet list:"
openstack subnet list
+ echo "Neutron router:"
+ openstack router show ${ROUTER_ID} -f yaml
echo "ip route:"
sudo ip route
diff --git a/devstack/settings b/devstack/settings
index 15623836..23d69a43 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -56,6 +56,8 @@ if is_service_enabled neutron; then
TROVE_MGMT_NETWORK_NAME=${TROVE_MGMT_NETWORK_NAME:-"trove-mgmt"}
TROVE_MGMT_SUBNET_NAME=${TROVE_MGMT_SUBNET_NAME:-${TROVE_MGMT_NETWORK_NAME}-subnet}
TROVE_MGMT_SUBNET_RANGE=${TROVE_MGMT_SUBNET_RANGE:-"192.168.254.0/24"}
+ TROVE_MGMT_SUBNET_START=${TROVE_MGMT_SUBNET_START:-"192.168.254.2"}
+ TROVE_MGMT_SUBNET_END=${TROVE_MGMT_SUBNET_END:-"192.168.254.200"}
else
TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
fi
diff --git a/doc/source/contributor/how_to_create_a_trove_instance.rst b/doc/source/contributor/how_to_create_a_trove_instance.rst
deleted file mode 100644
index 9b169242..00000000
--- a/doc/source/contributor/how_to_create_a_trove_instance.rst
+++ /dev/null
@@ -1,360 +0,0 @@
-.. _create_trove_instance:
-
-==============================
-How to create a trove instance
-==============================
-
-While creating a trove instance, I often have problems with cinder
-volumes and nova servers, this is due to my lack of knowledge in the area.
-This post is to help describe my journey on creating a trove instance.
-
-----------------
-Installing trove
-----------------
-
-I use the integration tools provided by trove to install the required services.
-This is already covered in the install guide.
-
-Install trove
-
-.. code-block:: bash
-
- /trove/integration/scripts$ ./trovestack install
-
-Once that completes, I use the command kick-start that gets a datastore
-ready for us to use and target for our trove instance. This shows the
-mysql datastore.
-
-.. code-block:: bash
-
- /trove/integration/scripts$ ./trovestack kick-start mysql
-
-Note: This command doesn't give you a completion message.
-
-You can view the available datastores by running the following command
-
-.. code-block:: bash
-
- $ trove datastore-list
-
- +--------------------------------------+------------------+
- | ID | Name |
- +--------------------------------------+------------------+
- | 137c27ee-d491-4a54-90ab-06307e9f6bf6 | mysql |
- | aea3d4c5-9c2e-48ae-b100-527b18d4eb02 | Test_Datastore_1 |
- | b8583e8c-8177-480e-889e-a73c5290b558 | test_ds |
- +--------------------------------------+------------------+
-
-Once that is done, view the image that was built for the datastore you have
-kick-started and identify the resources required for it.
-
-.. code-block:: bash
-
- $ openstack image list
-
- +--------------------------------------+--------------------------+--------+
- | ID | Name | Status |
- +--------------------------------------+--------------------------+--------+
- | 37d4b996-14c2-4981-820e-3ac87bb4c5a2 | cirros-0.3.5-x86_64-disk | active |
- | 2d7d930a-d606-4934-8602-851207546fee | ubuntu_mysql | active |
- +--------------------------------------+--------------------------+--------+
-
-Grab the ID from the list and run the following command to view the size of
-the image.
-
-.. code-block:: bash
-
- $ openstack image show ubuntu_mysql
-
- +------------------+------------------------------------------------------+
- | Field | Value |
- +------------------+------------------------------------------------------+
- | checksum | 9facdf0670ccb58ea27bf665e4fdcdf5 |
- | container_format | bare |
- | created_at | 2017-05-26T14:35:39Z |
- | disk_format | qcow2 |
- | file | /v2/images/2d7d930a-d606-4934-8602-851207546fee/file |
- | id | 2d7d930a-d606-4934-8602-851207546fee |
- | min_disk | 0 |
- | min_ram | 0 |
- | name | ubuntu_mysql |
- | owner | e765230cd96f47f294f910551ec3c1f4 |
- | protected | False |
- | schema | /v2/schemas/image |
- | size | 633423872 |
- | status | active |
- | tags | |
- | updated_at | 2017-05-26T14:35:42Z |
- | virtual_size | None |
- | visibility | public |
- +------------------+------------------------------------------------------+
-
-Take the value that says size, this is 633423872 in bytes. Cinder volumes are
-in gigabytes so 633423872 becomes:
-
-633423872 / 1024
-618578 # KB
-618578 / 1024
-604 # MB
-604 / 1024
-0 # < 1 GB so we will round up.
-
-Then test that you can create the cinder volume:
-
-.. code-block:: bash
-
- $ cinder create --name my-v 1
-
- +--------------------------------+--------------------------------------+
- | Property | Value |
- +--------------------------------+--------------------------------------+
- | attachments | [] |
- | availability_zone | nova |
- | bootable | false |
- | consistencygroup_id | None |
- | created_at | 2017-05-26T16:37:55.000000 |
- | description | None |
- | encrypted | False |
- | id | 7a2da60f-cc1b-4798-ba7a-1f0215c74615 |
- | metadata | {} |
- | migration_status | None |
- | multiattach | False |
- | name | my-v |
- | os-vol-host-attr:host | None |
- | os-vol-mig-status-attr:migstat | None |
- | os-vol-mig-status-attr:name_id | None |
- | os-vol-tenant-attr:tenant_id | e765230cd96f47f294f910551ec3c1f4 |
- | replication_status | None |
- | size | 1 |
- | snapshot_id | None |
- | source_volid | None |
- | status | creating |
- | updated_at | None |
- | user_id | cf1e59dc2e4d4aeca51aa050faac15c2 |
- | volume_type | lvmdriver-1 |
- +--------------------------------+--------------------------------------+
-
-Next, verify the cinder volume status has moved from creating to available.
-
-.. code-block:: bash
-
- $ cinder show my-v
-
- +--------------------------------+--------------------------------------+
- | Property | Value |
- +--------------------------------+--------------------------------------+
- | attachments | [] |
- | availability_zone | nova |
- | bootable | false |
- | consistencygroup_id | None |
- | created_at | 2017-05-26T16:37:55.000000 |
- | description | None |
- | encrypted | False |
- | id | 7a2da60f-cc1b-4798-ba7a-1f0215c74615 |
- | metadata | {} |
- | migration_status | None |
- | multiattach | False |
- | name | my-v |
- | os-vol-host-attr:host | ubuntu@lvmdriver-1#lvmdriver-1 |
- | os-vol-mig-status-attr:migstat | None |
- | os-vol-mig-status-attr:name_id | None |
- | os-vol-tenant-attr:tenant_id | e765230cd96f47f294f910551ec3c1f4 |
- | replication_status | None |
- | size | 1 |
- | snapshot_id | None |
- | source_volid | None |
- | status | available |
- | updated_at | 2017-05-26T16:37:56.000000 |
- | user_id | cf1e59dc2e4d4aeca51aa050faac15c2 |
- | volume_type | lvmdriver-1 |
- +--------------------------------+--------------------------------------+
-
-Ok, now we know that works so lets delete it.
-
-.. code-block:: bash
-
- $ cinder delete my-v
-
-Next is to choose a server flavor that fits the requirements of your datastore
-and do not exceed your computer hardware limitations.
-
-.. code-block:: bash
-
- $ trove flavor-list
-
- +------+--------------------------+--------+-------+------+-----------+
- | ID | Name | RAM | vCPUs | Disk | Ephemeral |
- +------+--------------------------+--------+-------+------+-----------+
- | 1 | m1.tiny | 512 | 1 | 1 | 0 |
- | 10 | test.tiny-3 | 512 | 1 | 3 | 0 |
- | 10e | test.eph.tiny-3 | 512 | 1 | 3 | 1 |
- | 10er | test.eph.tiny-3.resize | 528 | 2 | 3 | 1 |
- | 10r | test.tiny-3.resize | 528 | 2 | 3 | 0 |
- | 15 | test.small-3 | 768 | 1 | 3 | 0 |
- | 15e | test.eph.small-3 | 768 | 1 | 3 | 1 |
- | 15er | test.eph.small-3.resize | 784 | 2 | 3 | 1 |
- | 15r | test.small-3.resize | 784 | 2 | 3 | 0 |
- | 16 | test.small-4 | 768 | 1 | 4 | 0 |
- | 16e | test.eph.small-4 | 768 | 1 | 4 | 1 |
- | 16er | test.eph.small-4.resize | 784 | 2 | 4 | 1 |
- | 16r | test.small-4.resize | 784 | 2 | 4 | 0 |
- | 17 | test.small-5 | 768 | 1 | 5 | 0 |
- | 17e | test.eph.small-5 | 768 | 1 | 5 | 1 |
- | 17er | test.eph.small-5.resize | 784 | 2 | 5 | 1 |
- | 17r | test.small-5.resize | 784 | 2 | 5 | 0 |
- | 2 | m1.small | 2048 | 1 | 20 | 0 |
- | 20 | test.medium-4 | 1024 | 1 | 4 | 0 |
- | 20e | test.eph.medium-4 | 1024 | 1 | 4 | 1 |
- | 20er | test.eph.medium-4.resize | 1040 | 2 | 4 | 1 |
- | 20r | test.medium-4.resize | 1040 | 2 | 4 | 0 |
- | 21 | test.medium-5 | 1024 | 1 | 5 | 0 |
- | 21e | test.eph.medium-5 | 1024 | 1 | 5 | 1 |
- | 21er | test.eph.medium-5.resize | 1040 | 2 | 5 | 1 |
- | 21r | test.medium-5.resize | 1040 | 2 | 5 | 0 |
- | 25 | test.large-5 | 2048 | 1 | 5 | 0 |
- | 25e | test.eph.large-5 | 2048 | 1 | 5 | 1 |
- | 25er | test.eph.large-5.resize | 2064 | 2 | 5 | 1 |
- | 25r | test.large-5.resize | 2064 | 2 | 5 | 0 |
- | 26 | test.large-10 | 2048 | 1 | 10 | 0 |
- | 26e | test.eph.large-10 | 2048 | 1 | 10 | 1 |
- | 26er | test.eph.large-10.resize | 2064 | 2 | 10 | 1 |
- | 26r | test.large-10.resize | 2064 | 2 | 10 | 0 |
- | 27 | test.large-15 | 2048 | 1 | 15 | 0 |
- | 27e | test.eph.large-15 | 2048 | 1 | 15 | 1 |
- | 27er | test.eph.large-15.resize | 2064 | 2 | 15 | 1 |
- | 27r | test.large-15.resize | 2064 | 2 | 15 | 0 |
- | 3 | m1.medium | 4096 | 2 | 40 | 0 |
- | 30 | test.fault_1-1 | 512 | 1 | 1 | 0 |
- | 30e | test.eph.fault_1-1 | 512 | 1 | 1 | 1 |
- | 31 | test.fault_2-5 | 131072 | 1 | 5 | 0 |
- | 31e | test.eph.fault_2-5 | 131072 | 1 | 5 | 1 |
- | 4 | m1.large | 8192 | 4 | 80 | 0 |
- | 42 | m1.nano | 64 | 1 | 0 | 0 |
- | 451 | m1.heat | 512 | 1 | 0 | 0 |
- | 5 | m1.xlarge | 16384 | 8 | 160 | 0 |
- | 84 | m1.micro | 128 | 1 | 0 | 0 |
- | c1 | cirros256 | 256 | 1 | 0 | 0 |
- | d1 | ds512M | 512 | 1 | 5 | 0 |
- | d2 | ds1G | 1024 | 1 | 10 | 0 |
- | d3 | ds2G | 2048 | 2 | 10 | 0 |
- | d4 | ds4G | 4096 | 4 | 20 | 0 |
- +------+--------------------------+--------+-------+------+-----------+
-
-
-The flavor sizes are in megabytes, check your computer disk space and pick a
-flavor less than your limitations.
-
-.. code-block:: bash
-
- $ df -h
-
- Filesystem Size Used Avail Use% Mounted on
- udev 7.9G 0 7.9G 0% /dev
- tmpfs 1.6G 162M 1.5G 11% /run
- /dev/mapper/ubuntu--vg-root 33G 11G 21G 34% /
- tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
- tmpfs 5.0M 0 5.0M 0% /run/lock
- tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
- /dev/vda1 472M 102M 346M 23% /boot
- tmpfs 1.6G 0 1.6G 0% /run/user/1000
- /dev/loop0 6.0G 650M 5.4G 11% /opt/stack/data/swift/drives/sdb1
-
-I have a lot of partitions I don't understand but ubuntu--vg-root is the one
-setup by LVM during the install and it is the largest one so I'm going to use 21G
-as my upper limit. Now I only need 1G, this information is still good to know when
-you are dealing with multiple instances, larger images, or limited disk space.
-
-Flavors also use RAM so it's important to check your free memory.
-
-.. code-block:: bash
-
- $ free -h
-
- total used free shared buff/cache available
- Mem: 15G 5.1G 5.0G 150M 5.5G 10G
- Swap: 15G 4.1M 15G
-
-I have given my VM 16GB RAM and it shows I have 5GB free. So In order to be safe,
-I will choose test-small-3 (ID 15), this is RAM 768 and disk size 3GB. The disk size must be
-greater than 604MB from the ubuntu_mysql image requirements, but we rounded to 1GB to
-be safe.
-
-After all of this we are ready to create our trove instance.
-
-.. code-block:: bash
-
- $ trove create my-inst 15 --size 1
-
- +-------------------------+--------------------------------------+
- | Property | Value |
- +-------------------------+--------------------------------------+
- | created | 2017-05-26T16:53:06 |
- | datastore | mysql |
- | datastore_version | 5.6 |
- | encrypted_rpc_messaging | True |
- | flavor | 15 |
- | id | 39f8ac9e-2935-40fb-8b09-8a963fb235bd |
- | name | my-inst |
- | region | RegionOne |
- | server_id | None |
- | status | BUILD |
- | tenant_id | e765230cd96f47f294f910551ec3c1f4 |
- | updated | 2017-05-26T16:53:06 |
- | volume | 1 |
- | volume_id | None |
- +-------------------------+--------------------------------------+
-
-Now we view the details to see if it is successful.
-
-.. code-block:: bash
-
- $ trove show my-inst
-
- +-------------------------+--------------------------------------+
- | Property | Value |
- +-------------------------+--------------------------------------+
- | created | 2017-05-26T16:53:07 |
- | datastore | mysql |
- | datastore_version | 5.6 |
- | encrypted_rpc_messaging | True |
- | flavor | 15 |
- | id | 39f8ac9e-2935-40fb-8b09-8a963fb235bd |
- | name | my-inst |
- | region | RegionOne |
- | server_id | 62399b7e-dec1-4606-9297-3b3711a62d68 |
- | status | BUILD |
- | tenant_id | e765230cd96f47f294f910551ec3c1f4 |
- | updated | 2017-05-26T16:53:13 |
- | volume | 1 |
- | volume_id | da3b3951-7f7a-4c71-86b9-f0059da814f8 |
- +-------------------------+--------------------------------------+
-
-Notice, status still says BUILD but we now have a server_id and volume_id.
-
-After waiting a few moments, check it again.
-
-.. code-block:: bash
-
- $ trove show my-inst
-
- +-------------------------+--------------------------------------+
- | Property | Value |
- +-------------------------+--------------------------------------+
- | created | 2017-05-26T16:53:07 |
- | datastore | mysql |
- | datastore_version | 5.6 |
- | encrypted_rpc_messaging | True |
- | flavor | 15 |
- | id | 39f8ac9e-2935-40fb-8b09-8a963fb235bd |
- | name | my-inst |
- | region | RegionOne |
- | server_id | 62399b7e-dec1-4606-9297-3b3711a62d68 |
- | status | ACTIVE |
- | tenant_id | e765230cd96f47f294f910551ec3c1f4 |
- | updated | 2017-05-26T16:53:13 |
- | volume | 1 |
- | volume_id | da3b3951-7f7a-4c71-86b9-f0059da814f8 |
- | volume_used | 0.1 |
- +-------------------------+--------------------------------------+
-
-The status is now set to ACTIVE and you are done! \ No newline at end of file
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
index 36f1cfc1..1eef881f 100644
--- a/doc/source/contributor/index.rst
+++ b/doc/source/contributor/index.rst
@@ -11,4 +11,3 @@ functionality, the following resources are provided.
design
testing
- how_to_create_a_trove_instance.rst
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 923cae16..7a65fccf 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -33,21 +33,8 @@ For an in-depth look at the project's design and structure, see the
- `Trove`_
- `Trove Client`_
-* `Trove Wiki`_ on OpenStack
* `Trove API Documentation`_ on docs.openstack.org
-* `Trove Blueprints`_ on storyboard.openstack.org
-* `Trove Bugs`_ on storyboard.openstack.org
-
-
-Guest Images
-============
-
-In order to use Trove, you need to have Guest Images for each
-datastore and version. These images are loaded into Glance and
-registered with Trove.
-
-For those wishing to develop guest images, please refer to the
-:ref:`build_guest_images` page.
+* `Trove storyboard`_ on storyboard.openstack.org
Search Trove Documentation
@@ -56,9 +43,7 @@ Search Trove Documentation
* :ref:`search`
-.. _Trove Wiki: https://wiki.openstack.org/wiki/Trove
.. _Trove: https://opendev.org/openstack/trove
.. _Trove Client: https://opendev.org/openstack/python-troveclient
.. _Trove API Documentation: https://docs.openstack.org/api-ref/database/
-.. _Trove Blueprints: https://storyboard.openstack.org/#!/project/openstack/trove
-.. _Trove Bugs: https://storyboard.openstack.org/#!/project/openstack/trove
+.. _Trove storyboard: https://storyboard.openstack.org/#!/project/openstack/trove
diff --git a/etc/tests/core.test.conf b/etc/tests/core.test.conf
index 2c3b563d..9b5f68df 100644
--- a/etc/tests/core.test.conf
+++ b/etc/tests/core.test.conf
@@ -1,6 +1,5 @@
{
"report_directory":"rdli-test-report",
- "start_services": false,
"test_mgmt":false,
"use_local_ovz":false,
@@ -16,7 +15,6 @@
"nova_conf":"/home/vagrant/nova.conf",
"keystone_code_root":"/opt/stack/keystone",
"keystone_conf":"/etc/keystone/keystone.conf",
- "keystone_use_combined":true,
"trove_code_root":"/opt/stack/trove",
"trove_conf":"/tmp/trove.conf",
"trove_version":"v1.0",
diff --git a/integration/scripts/conf/test_begin.conf b/integration/scripts/conf/test_begin.conf
index fa0a1e1f..c0b6c513 100644
--- a/integration/scripts/conf/test_begin.conf
+++ b/integration/scripts/conf/test_begin.conf
@@ -37,23 +37,11 @@
],
"flavors": null,
"white_box":false,
- "start_services": %startservices%,
"test_mgmt":false,
"use_local_ovz":false,
"use_venv":false,
- "glance_code_root":"%glance_path%",
- "glance_api_conf":"/vagrant/conf/glance-api.conf",
- "glance_reg_conf":"/vagrant/conf/glance-reg.conf",
- "glance_images_directory": "/glance_images",
- "glance_image": "debian-squeeze-x86_64-openvz.tar.gz",
"report_directory":"%report_directory%",
"usr_bin_dir":"%bin_path%",
- "nova_code_root":"%nova_path%",
- "nova_conf":"/home/vagrant/nova.conf",
- "keystone_code_root":"%keystone_path%",
- "keystone_conf":"/etc/keystone/keystone.conf",
- "keystone_use_combined":true,
- "trove_code_root":"%trove_path%",
"trove_conf":"/tmp/trove.conf",
"trove_version":"v1.0",
"trove_api_updated":"2012-08-01T00:00:00Z",
diff --git a/integration/scripts/files/elements/fedora-guest/extra-data.d/15-trove-dep b/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/15-trove-dep
index 2c4b8202..2c4b8202 100755
--- a/integration/scripts/files/elements/fedora-guest/extra-data.d/15-trove-dep
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/15-trove-dep
diff --git a/integration/scripts/files/elements/fedora-guest/extra-data.d/20-guest-systemd b/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/20-guest-systemd
index aeedfb6e..aeedfb6e 100755
--- a/integration/scripts/files/elements/fedora-guest/extra-data.d/20-guest-systemd
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/20-guest-systemd
diff --git a/integration/scripts/files/elements/fedora-guest/extra-data.d/62-ssh-key b/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/62-ssh-key
index 63453a75..63453a75 100755
--- a/integration/scripts/files/elements/fedora-guest/extra-data.d/62-ssh-key
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/extra-data.d/62-ssh-key
diff --git a/integration/scripts/files/elements/fedora-guest/install.d/15-trove-dep b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/15-trove-dep
index 3bda0565..3bda0565 100755
--- a/integration/scripts/files/elements/fedora-guest/install.d/15-trove-dep
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/15-trove-dep
diff --git a/integration/scripts/files/elements/fedora-guest/install.d/20-etc b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/20-etc
index bec065ef..bec065ef 100755
--- a/integration/scripts/files/elements/fedora-guest/install.d/20-etc
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/20-etc
diff --git a/integration/scripts/files/elements/fedora-guest/install.d/21-use-fedora-certificates b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/21-use-fedora-certificates
index 8ef6c50a..8ef6c50a 100755
--- a/integration/scripts/files/elements/fedora-guest/install.d/21-use-fedora-certificates
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/21-use-fedora-certificates
diff --git a/integration/scripts/files/elements/fedora-guest/install.d/50-user b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/50-user
index a4b666bf..a4b666bf 100755
--- a/integration/scripts/files/elements/fedora-guest/install.d/50-user
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/50-user
diff --git a/integration/scripts/files/elements/fedora-guest/install.d/62-ssh-key b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/62-ssh-key
index 80c1e65c..80c1e65c 100755
--- a/integration/scripts/files/elements/fedora-guest/install.d/62-ssh-key
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/install.d/62-ssh-key
diff --git a/integration/scripts/files/elements/fedora-guest/post-install.d/05-ipforwarding b/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/05-ipforwarding
index 4824cfcf..4824cfcf 100755
--- a/integration/scripts/files/elements/fedora-guest/post-install.d/05-ipforwarding
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/05-ipforwarding
diff --git a/integration/scripts/files/elements/fedora-guest/post-install.d/62-trove-guest-sudoers b/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/62-trove-guest-sudoers
index 0581fd2b..0581fd2b 100755
--- a/integration/scripts/files/elements/fedora-guest/post-install.d/62-trove-guest-sudoers
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/62-trove-guest-sudoers
diff --git a/integration/scripts/files/elements/fedora-guest/post-install.d/90-yum-update b/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/90-yum-update
index cd2992c1..cd2992c1 100755
--- a/integration/scripts/files/elements/fedora-guest/post-install.d/90-yum-update
+++ b/integration/scripts/files/deprecated-elements/fedora-guest/post-install.d/90-yum-update
diff --git a/integration/scripts/files/elements/fedora-mariadb/README.md b/integration/scripts/files/deprecated-elements/fedora-mariadb/README.md
index 757f00b8..757f00b8 100644
--- a/integration/scripts/files/elements/fedora-mariadb/README.md
+++ b/integration/scripts/files/deprecated-elements/fedora-mariadb/README.md
diff --git a/integration/scripts/files/elements/fedora-mariadb/install.d/10-mariadb b/integration/scripts/files/deprecated-elements/fedora-mariadb/install.d/10-mariadb
index a5cc2c0e..a5cc2c0e 100755
--- a/integration/scripts/files/elements/fedora-mariadb/install.d/10-mariadb
+++ b/integration/scripts/files/deprecated-elements/fedora-mariadb/install.d/10-mariadb
diff --git a/integration/scripts/files/elements/fedora-mariadb/pre-install.d/10-percona-copr b/integration/scripts/files/deprecated-elements/fedora-mariadb/pre-install.d/10-percona-copr
index bcc55205..bcc55205 100755
--- a/integration/scripts/files/elements/fedora-mariadb/pre-install.d/10-percona-copr
+++ b/integration/scripts/files/deprecated-elements/fedora-mariadb/pre-install.d/10-percona-copr
diff --git a/integration/scripts/files/elements/fedora-mongodb/README.md b/integration/scripts/files/deprecated-elements/fedora-mongodb/README.md
index 2518abf2..2518abf2 100644
--- a/integration/scripts/files/elements/fedora-mongodb/README.md
+++ b/integration/scripts/files/deprecated-elements/fedora-mongodb/README.md
diff --git a/integration/scripts/files/elements/fedora-mongodb/install.d/10-mongodb b/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/10-mongodb
index 02ea9535..02ea9535 100755
--- a/integration/scripts/files/elements/fedora-mongodb/install.d/10-mongodb
+++ b/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/10-mongodb
diff --git a/integration/scripts/files/elements/fedora-mongodb/install.d/25-trove-mongo-dep b/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/25-trove-mongo-dep
index 7be7ef95..7be7ef95 100755
--- a/integration/scripts/files/elements/fedora-mongodb/install.d/25-trove-mongo-dep
+++ b/integration/scripts/files/deprecated-elements/fedora-mongodb/install.d/25-trove-mongo-dep
diff --git a/integration/scripts/files/elements/fedora-mysql/README.md b/integration/scripts/files/deprecated-elements/fedora-mysql/README.md
index 39a6ab8c..39a6ab8c 100644
--- a/integration/scripts/files/elements/fedora-mysql/README.md
+++ b/integration/scripts/files/deprecated-elements/fedora-mysql/README.md
diff --git a/integration/scripts/files/elements/fedora-mysql/install.d/10-mysql b/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/10-mysql
index ff7efa36..ff7efa36 100755
--- a/integration/scripts/files/elements/fedora-mysql/install.d/10-mysql
+++ b/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/10-mysql
diff --git a/integration/scripts/files/elements/fedora-mysql/install.d/40-xtrabackup b/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/40-xtrabackup
index 9c9709ca..9c9709ca 100755
--- a/integration/scripts/files/elements/fedora-mysql/install.d/40-xtrabackup
+++ b/integration/scripts/files/deprecated-elements/fedora-mysql/install.d/40-xtrabackup
diff --git a/integration/scripts/files/elements/fedora-mysql/post-install.d/30-register-mysql-service b/integration/scripts/files/deprecated-elements/fedora-mysql/post-install.d/30-register-mysql-service
index a7db5d92..a7db5d92 100644
--- a/integration/scripts/files/elements/fedora-mysql/post-install.d/30-register-mysql-service
+++ b/integration/scripts/files/deprecated-elements/fedora-mysql/post-install.d/30-register-mysql-service
diff --git a/integration/scripts/files/elements/fedora-percona/install.d/05-percona-server b/integration/scripts/files/deprecated-elements/fedora-percona/install.d/05-percona-server
index 9c43c6ef..9c43c6ef 100755
--- a/integration/scripts/files/elements/fedora-percona/install.d/05-percona-server
+++ b/integration/scripts/files/deprecated-elements/fedora-percona/install.d/05-percona-server
diff --git a/integration/scripts/files/elements/fedora-percona/install.d/10-mysql b/integration/scripts/files/deprecated-elements/fedora-percona/install.d/10-mysql
index 284e81ac..284e81ac 100755
--- a/integration/scripts/files/elements/fedora-percona/install.d/10-mysql
+++ b/integration/scripts/files/deprecated-elements/fedora-percona/install.d/10-mysql
diff --git a/integration/scripts/files/elements/fedora-postgresql/install.d/10-postgresql b/integration/scripts/files/deprecated-elements/fedora-postgresql/install.d/10-postgresql
index 0872d6e4..0872d6e4 100755
--- a/integration/scripts/files/elements/fedora-postgresql/install.d/10-postgresql
+++ b/integration/scripts/files/deprecated-elements/fedora-postgresql/install.d/10-postgresql
diff --git a/integration/scripts/files/elements/fedora-redis/README.md b/integration/scripts/files/deprecated-elements/fedora-redis/README.md
index 426072cf..426072cf 100644
--- a/integration/scripts/files/elements/fedora-redis/README.md
+++ b/integration/scripts/files/deprecated-elements/fedora-redis/README.md
diff --git a/integration/scripts/files/elements/fedora-redis/install.d/10-redis b/integration/scripts/files/deprecated-elements/fedora-redis/install.d/10-redis
index 8c23b5f6..8c23b5f6 100755
--- a/integration/scripts/files/elements/fedora-redis/install.d/10-redis
+++ b/integration/scripts/files/deprecated-elements/fedora-redis/install.d/10-redis
diff --git a/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra b/integration/scripts/files/deprecated-elements/ubuntu-cassandra/install.d/10-cassandra
index 77233f4c..77233f4c 100755
--- a/integration/scripts/files/elements/ubuntu-cassandra/install.d/10-cassandra
+++ b/integration/scripts/files/deprecated-elements/ubuntu-cassandra/install.d/10-cassandra
diff --git a/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase b/integration/scripts/files/deprecated-elements/ubuntu-couchbase/install.d/10-couchbase
index 1303fdfd..1303fdfd 100755
--- a/integration/scripts/files/elements/ubuntu-couchbase/install.d/10-couchbase
+++ b/integration/scripts/files/deprecated-elements/ubuntu-couchbase/install.d/10-couchbase
diff --git a/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb b/integration/scripts/files/deprecated-elements/ubuntu-couchdb/install.d/10-couchdb
index b53f7faa..b53f7faa 100755
--- a/integration/scripts/files/elements/ubuntu-couchdb/install.d/10-couchdb
+++ b/integration/scripts/files/deprecated-elements/ubuntu-couchdb/install.d/10-couchdb
diff --git a/integration/scripts/files/elements/ubuntu-db2/README.md b/integration/scripts/files/deprecated-elements/ubuntu-db2/README.md
index 56a3479b..56a3479b 100644
--- a/integration/scripts/files/elements/ubuntu-db2/README.md
+++ b/integration/scripts/files/deprecated-elements/ubuntu-db2/README.md
diff --git a/integration/scripts/files/elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs b/integration/scripts/files/deprecated-elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs
index f82fd32a..f82fd32a 100755
--- a/integration/scripts/files/elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs
+++ b/integration/scripts/files/deprecated-elements/ubuntu-db2/extra-data.d/20-copy-db2-pkgs
diff --git a/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2 b/integration/scripts/files/deprecated-elements/ubuntu-db2/install.d/10-db2
index 4e87733b..4e87733b 100755
--- a/integration/scripts/files/elements/ubuntu-db2/install.d/10-db2
+++ b/integration/scripts/files/deprecated-elements/ubuntu-db2/install.d/10-db2
diff --git a/integration/scripts/files/elements/ubuntu-mongodb/README.md b/integration/scripts/files/deprecated-elements/ubuntu-mongodb/README.md
index 5b9e33bb..5b9e33bb 100644
--- a/integration/scripts/files/elements/ubuntu-mongodb/README.md
+++ b/integration/scripts/files/deprecated-elements/ubuntu-mongodb/README.md
diff --git a/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key b/integration/scripts/files/deprecated-elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
index 06aba978..06aba978 100755
--- a/integration/scripts/files/elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
+++ b/integration/scripts/files/deprecated-elements/ubuntu-mongodb/pre-install.d/10-mongodb-apt-key
diff --git a/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql b/integration/scripts/files/deprecated-elements/ubuntu-percona/install.d/30-mysql
index d5a8ac5b..d5a8ac5b 100755
--- a/integration/scripts/files/elements/ubuntu-percona/install.d/30-mysql
+++ b/integration/scripts/files/deprecated-elements/ubuntu-percona/install.d/30-mysql
diff --git a/integration/scripts/files/elements/ubuntu-percona/pre-install.d/10-percona-apt-key b/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/10-percona-apt-key
index 4e6ec1a5..4e6ec1a5 100755
--- a/integration/scripts/files/elements/ubuntu-percona/pre-install.d/10-percona-apt-key
+++ b/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/10-percona-apt-key
diff --git a/integration/scripts/files/elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local b/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local
index a3e1dc7c..a3e1dc7c 100755
--- a/integration/scripts/files/elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local
+++ b/integration/scripts/files/deprecated-elements/ubuntu-percona/pre-install.d/20-apparmor-mysql-local
diff --git a/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql b/integration/scripts/files/deprecated-elements/ubuntu-pxc/install.d/30-mysql
index d9f2f427..d9f2f427 100755
--- a/integration/scripts/files/elements/ubuntu-pxc/install.d/30-mysql
+++ b/integration/scripts/files/deprecated-elements/ubuntu-pxc/install.d/30-mysql
diff --git a/integration/scripts/files/elements/ubuntu-pxc/pre-install.d/10-percona-apt-key b/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/10-percona-apt-key
index 4e6ec1a5..4e6ec1a5 100755
--- a/integration/scripts/files/elements/ubuntu-pxc/pre-install.d/10-percona-apt-key
+++ b/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/10-percona-apt-key
diff --git a/integration/scripts/files/elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local b/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local
index a3e1dc7c..a3e1dc7c 100755
--- a/integration/scripts/files/elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local
+++ b/integration/scripts/files/deprecated-elements/ubuntu-pxc/pre-install.d/20-apparmor-mysql-local
diff --git a/integration/scripts/files/elements/ubuntu-redis/README.md b/integration/scripts/files/deprecated-elements/ubuntu-redis/README.md
index 426072cf..426072cf 100644
--- a/integration/scripts/files/elements/ubuntu-redis/README.md
+++ b/integration/scripts/files/deprecated-elements/ubuntu-redis/README.md
diff --git a/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis b/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/30-redis
index 24d60d2c..24d60d2c 100755
--- a/integration/scripts/files/elements/ubuntu-redis/install.d/30-redis
+++ b/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/30-redis
diff --git a/integration/scripts/files/elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env b/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env
index 088dc70c..088dc70c 100755
--- a/integration/scripts/files/elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env
+++ b/integration/scripts/files/deprecated-elements/ubuntu-redis/install.d/80-fix-in-guest-agent-env
diff --git a/integration/scripts/files/elements/ubuntu-vertica/README.md b/integration/scripts/files/deprecated-elements/ubuntu-vertica/README.md
index 86202379..86202379 100644
--- a/integration/scripts/files/elements/ubuntu-vertica/README.md
+++ b/integration/scripts/files/deprecated-elements/ubuntu-vertica/README.md
diff --git a/integration/scripts/files/elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb b/integration/scripts/files/deprecated-elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb
index 56c02cda..56c02cda 100755
--- a/integration/scripts/files/elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb
+++ b/integration/scripts/files/deprecated-elements/ubuntu-vertica/extra-data.d/93-copy-vertica-deb
diff --git a/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica b/integration/scripts/files/deprecated-elements/ubuntu-vertica/install.d/97-vertica
index 2af42834..2af42834 100755
--- a/integration/scripts/files/elements/ubuntu-vertica/install.d/97-vertica
+++ b/integration/scripts/files/deprecated-elements/ubuntu-vertica/install.d/97-vertica
diff --git a/integration/scripts/files/elements/ubuntu-xenial-cassandra/element-deps b/integration/scripts/files/deprecated-elements/ubuntu-xenial-cassandra/element-deps
index 28898cf7..28898cf7 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-cassandra/element-deps
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-cassandra/element-deps
diff --git a/integration/scripts/files/elements/ubuntu-xenial-couchbase/element-deps b/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchbase/element-deps
index fa85fc7e..fa85fc7e 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-couchbase/element-deps
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchbase/element-deps
diff --git a/integration/scripts/files/elements/ubuntu-xenial-couchdb/element-deps b/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchdb/element-deps
index abd5561b..abd5561b 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-couchdb/element-deps
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-couchdb/element-deps
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/element-deps b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/element-deps
index 6a5964ec..6a5964ec 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/element-deps
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/element-deps
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp
index e949b06a..e949b06a 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/10-mongodb-thp
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/20-mongodb b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/20-mongodb
index 6320964f..6320964f 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/20-mongodb
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/20-mongodb
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep
index c0488bd0..c0488bd0 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/25-trove-mongo-dep
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf
index 5ccf3259..5ccf3259 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/30-mongodb-conf
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/35-check-numa b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/35-check-numa
index 78162cf5..78162cf5 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/35-check-numa
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/35-check-numa
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd
index b6f76858..b6f76858 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/40-check-numa-systemd
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd
index 257e45a8..257e45a8 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/41-mongod-systemd
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd
index 5073a215..5073a215 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-mongodb/install.d/42-mongos-systemd
diff --git a/integration/scripts/files/elements/ubuntu-xenial-percona/element-deps b/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/element-deps
index bc5f9af6..bc5f9af6 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-percona/element-deps
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/element-deps
diff --git a/integration/scripts/files/elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf b/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf
index 8ae8a91c..8ae8a91c 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-percona/post-install.d/10-fix-mycnf
diff --git a/integration/scripts/files/elements/ubuntu-xenial-pxc/element-deps b/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/element-deps
index 7b1a84c9..7b1a84c9 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-pxc/element-deps
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/element-deps
diff --git a/integration/scripts/files/elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf b/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf
index d3347228..d3347228 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-pxc/install.d/31-fix-my-cnf
diff --git a/integration/scripts/files/elements/ubuntu-xenial-redis/element-deps b/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/element-deps
index 030d85ba..030d85ba 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-redis/element-deps
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/element-deps
diff --git a/integration/scripts/files/elements/ubuntu-xenial-redis/install.d/31-fix-init-file b/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/install.d/31-fix-init-file
index 08442f60..08442f60 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-redis/install.d/31-fix-init-file
+++ b/integration/scripts/files/deprecated-elements/ubuntu-xenial-redis/install.d/31-fix-init-file
diff --git a/integration/scripts/files/elements/ubuntu-mysql/README.md b/integration/scripts/files/elements/ubuntu-mysql/README.md
deleted file mode 100644
index 39a6ab8c..00000000
--- a/integration/scripts/files/elements/ubuntu-mysql/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Sets up a MySQL server install in the image.
-
-TODO: auto-tune settings based on host resources or metadata service.
diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc
index 1a350153..c36eb31b 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc
+++ b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/20-etc
@@ -6,5 +6,3 @@
dd if=/tmp/in_target.d/trove-guest.service of=/etc/systemd/system/trove-guest.service
systemctl enable trove-guest.service
-
-
diff --git a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates
index ab9469ed..b55a0ea2 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates
+++ b/integration/scripts/files/elements/ubuntu-xenial-guest/install.d/21-use-ubuntu-certificates
@@ -6,6 +6,7 @@
GUEST_UNIT_DROPINS="/etc/systemd/system/trove-guest.service.d"
mkdir -v -p ${GUEST_UNIT_DROPINS}
-echo -e '[Service]\nEnvironment=REQUESTS_CA_BUNDLE=/etc/ssl/certs' > ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf
-
-
+cat <<EOF > ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf
+[Service]
+Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs
+EOF
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps b/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps
index 5d7756f9..b215f584 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps
+++ b/integration/scripts/files/elements/ubuntu-xenial-mariadb/element-deps
@@ -1 +1 @@
-ubuntu-mariadb
+ubuntu-mariadb \ No newline at end of file
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb b/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb
index 50eb9256..6d12202c 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb
+++ b/integration/scripts/files/elements/ubuntu-xenial-mariadb/install.d/30-mariadb
@@ -14,17 +14,17 @@ apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74C
curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup |
bash -s -- --mariadb-server-version="mariadb-10.4" --skip-key-import --skip-maxscale
+apt-get install -y -qq apt-transport-https ca-certificates gnupg2
+
# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
-apt-get install -y -qq apt-transport-https ca-certificates
-apt-get update -qq
-
# Disable password prompt
debconf-set-selections <<< "mariadb-server mysql-server/root_password password ''"
debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password ''"
+apt-get update -qq
apt-get install -y -qq --allow-unauthenticated mariadb-server mariadb-client galera-4 libmariadb3 mariadb-backup mariadb-common
cat <<EOF >/etc/mysql/conf.d/no_perf_schema.cnf
@@ -34,5 +34,6 @@ EOF
chown mysql:mysql /etc/mysql/my.cnf
rm -f /etc/init.d/mysql
+
systemctl daemon-reload
systemctl enable mariadb \ No newline at end of file
diff --git a/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql b/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
index 9730decb..e9f2d8c6 100755
--- a/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
+++ b/integration/scripts/files/elements/ubuntu-xenial-mysql/install.d/30-mysql
@@ -8,44 +8,15 @@ set -o xtrace
export DEBIAN_FRONTEND=noninteractive
-apt-get --allow-unauthenticated -y install mysql-client mysql-server
-
-# Xenial provides mysql 5.7 which requires percona-xtrabackup-24
-PXB_VERSION_OVERRIDE=24
-#PKGS=$(apt-cache search percona-xtrabackup-${PXB_VERSION_OVERRIDE})
-#if [[ "$PKGS" == *"percona-xtrabackup-$PXB_VERSION_OVERRIDE"* ]]; then
-# apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE}
-#else
-# # Architecture is not supported by percona website. Compile and install it
-# PXB_VERSION=${PXB_VERSION_OVERRIDE:0:1}.${PXB_VERSION_OVERRIDE:1:1}
-#
-# apt-get --allow-unauthenticated -y install build-essential flex bison automake autoconf \
-# libtool cmake libaio-dev mysql-client libncurses-dev zlib1g-dev \
-# libgcrypt11-dev libev-dev libcurl4-gnutls-dev vim-common
-#
-# pushd /tmp
-#
-# git clone https://github.com/percona/percona-xtrabackup.git
-# cd percona-xtrabackup
-# git checkout $PXB_VERSION
-#
-# mkdir /tmp/boost
-# cmake -DDOWNLOAD_BOOST=1 -DWITH_BOOST=/tmp/boost -DBUILD_CONFIG=xtrabackup_release -DWITH_MAN_PAGES=OFF && make -j4
-# make install
-# ln -s /usr/local/xtrabackup/bin/* /usr/bin/
-#
-# dpkg -P build-essential automake autoconf libtool cmake
-# apt-get -y clean
-#
-# popd
-#
-# rm -rf /tmp/boost /tmp/percona-xtrabackup
-#fi
+apt-get --allow-unauthenticated -y install mysql-client mysql-server gnupg2
# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
apt-get update
+
+# Xenial provides mysql 5.7 which requires percona-xtrabackup-24
+PXB_VERSION_OVERRIDE=24
apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE}
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
@@ -58,7 +29,6 @@ mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf
chown mysql:mysql /etc/mysql/my.cnf
cat >/etc/mysql/my.cnf <<_EOF_
[mysql]
-
!includedir /etc/mysql/conf.d/
_EOF_
diff --git a/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps b/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps
index 98e1bc19..6a0e1b09 100644
--- a/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps
+++ b/integration/scripts/files/elements/ubuntu-xenial-postgresql/element-deps
@@ -1 +1 @@
-ubuntu-postgresql
+ubuntu-postgresql \ No newline at end of file
diff --git a/integration/scripts/files/trove-guest.systemd.conf b/integration/scripts/files/trove-guest.systemd.conf
index ac4182ec..9d1ed6f6 100644
--- a/integration/scripts/files/trove-guest.systemd.conf
+++ b/integration/scripts/files/trove-guest.systemd.conf
@@ -1,7 +1,9 @@
[Unit]
Description=Trove Guest
-After=syslog.target
-After=network.target
+After=syslog.target network.target
+
+[Install]
+WantedBy=multi-user.target
[Service]
Type=simple
@@ -13,25 +15,23 @@ Group=GUEST_USERNAME
# CONTROLLER=192.168.32.151
EnvironmentFile=/etc/trove/controller.conf
-ExecStartPre=/bin/bash -c "sudo mkdir -p GUEST_LOGDIR ; sudo chown GUEST_USERNAME:root GUEST_LOGDIR"
+ExecStartPre=/bin/bash -c "sudo mkdir -p GUEST_LOGDIR"
# If ~/trove-installed does not exist, copy the trove source from
# the user's development environment, then touch the sentinel file
-ExecStartPre=/bin/bash -c "test -e /home/GUEST_USERNAME/trove-installed || sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:PATH_TROVE/ /home/GUEST_USERNAME/trove && touch /home/GUEST_USERNAME/trove-installed"
+ExecStartPre=/bin/bash -c "test -e /home/GUEST_USERNAME/trove-installed || sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:PATH_TROVE/ /home/GUEST_USERNAME/trove && touch /home/GUEST_USERNAME/trove-installed"
# If /etc/trove does not exist, create it and then copy the trove-guestagent.conf
# from /etc/trove on the user's development environment,
-ExecStartPre=/bin/bash -c "test -d /etc/trove/conf.d || sudo mkdir -p /etc/trove/conf.d && sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:/etc/trove/trove-guestagent.conf ~GUEST_USERNAME/ && sudo mv ~GUEST_USERNAME/trove-guestagent.conf /etc/trove/conf.d/trove-guestagent.conf"
+ExecStartPre=/bin/bash -c "test -d /etc/trove/conf.d || sudo mkdir -p /etc/trove/conf.d && sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:/etc/trove/trove-guestagent.conf ~GUEST_USERNAME/ && sudo mv ~GUEST_USERNAME/trove-guestagent.conf /etc/trove/conf.d/trove-guestagent.conf"
-ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove"
+ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove /home/GUEST_USERNAME/trove GUEST_LOGDIR"
-ExecStart=/home/GUEST_USERNAME/trove/contrib/trove-guestagent --config-dir=/etc/trove/conf.d
+# Start trove-guest.service
+ExecStart=/bin/bash -c "/home/GUEST_USERNAME/trove/contrib/trove-guestagent --config-dir=/etc/trove/conf.d"
-# Give a reasonable amount of time for the server to start up/shut down
TimeoutSec=300
+Restart=on-failure
-# PgSql doesn't play nice with PrivateTmp
-PrivateTmp=false
-
-[Install]
-WantedBy=multi-user.target
+# PostgreSQL doesn't play nice with PrivateTmp
+PrivateTmp=false \ No newline at end of file
diff --git a/integration/scripts/functions_qemu b/integration/scripts/functions_qemu
index 39dcd958..f43078d9 100644
--- a/integration/scripts/functions_qemu
+++ b/integration/scripts/functions_qemu
@@ -66,7 +66,9 @@ function build_vm() {
elementes="$elementes pip-cache"
elementes="$elementes guest-agent"
else
+ # Install guest agent dependencies, user, etc.
elementes="$elementes ${guest_os}-guest"
+ # Install guest agent service
elementes="$elementes ${guest_os}-${guest_release}-guest"
fi
diff --git a/integration/scripts/trovestack b/integration/scripts/trovestack
index 4aa66a53..24ab6c70 100755
--- a/integration/scripts/trovestack
+++ b/integration/scripts/trovestack
@@ -514,10 +514,6 @@ function get_field() {
done
}
-function get_glance_id () {
- echo `$@ | grep ' id ' | get_field 2`
-}
-
function set_bin_path() {
if is_fedora; then
sed -i "s|%bin_path%|/usr/bin|g" $TEST_CONF
@@ -526,35 +522,16 @@ function set_bin_path() {
fi
}
-function set_mysql_pkg() {
- if is_fedora; then
- MYSQL_PKG="mysql-community-server"
- MYSQL_VER="5.6"
- else
- if [[ "$RELEASE" == "xenial" || "$RELEASE" == "bionic" ]]; then
- MYSQL_PKG="mysql-server-5.7"
- MYSQL_VER="5.7"
- else
- MYSQL_PKG="mysql-server-5.6"
- MYSQL_VER="5.6"
- fi
- fi
-}
-
-
function cmd_set_datastore() {
local IMAGEID=$1
local DATASTORE_TYPE=$2
- local RESTART_TROVE=${3:-$(get_bool RESTART_TROVE "true")}
# rd_manage datastore_update <datastore_name> <default_version>
rd_manage datastore_update "$DATASTORE_TYPE" ""
PACKAGES=${PACKAGES:-""}
if [ "$DATASTORE_TYPE" == "mysql" ]; then
- set_mysql_pkg
- PACKAGES=${PACKAGES:-$MYSQL_PKG}
- VERSION=$MYSQL_VER
+ VERSION="5.7"
elif [ "$DATASTORE_TYPE" == "percona" ]; then
PACKAGES=${PACKAGES:-"percona-server-server-5.6"}
VERSION="5.6"
@@ -562,7 +539,6 @@ function cmd_set_datastore() {
PACKAGES=${PACKAGES:-"percona-xtradb-cluster-server-5.6"}
VERSION="5.6"
elif [ "$DATASTORE_TYPE" == "mariadb" ]; then
- PACKAGES=${PACKAGES:-"mariadb-server"}
VERSION="10.4"
elif [ "$DATASTORE_TYPE" == "mongodb" ]; then
PACKAGES=${PACKAGES:-"mongodb-org"}
@@ -577,7 +553,6 @@ function cmd_set_datastore() {
PACKAGES=${PACKAGES:-"couchbase-server"}
VERSION="2.2.0"
elif [ "$DATASTORE_TYPE" == "postgresql" ]; then
- PACKAGES=${PACKAGES:-"postgresql-9.6"}
VERSION="9.6"
elif [ "$DATASTORE_TYPE" == "couchdb" ]; then
PACKAGES=${PACKAGES:-"couchdb"}
@@ -593,28 +568,14 @@ function cmd_set_datastore() {
exit 1
fi
- sed -i "s/%datastore_type%/$DATASTORE_TYPE/g" $TEST_CONF
- sed -i "s/%datastore_version%/$VERSION/g" $TEST_CONF
-
- #rd_manage datastore_version_update <datastore_name> <version_name> <datastore_manager> <image_id> <packages> <active>
+ # trove-manage datastore_version_update <datastore_name> <version_name> <datastore_manager> <image_id> <packages> <active>
rd_manage datastore_version_update "$DATASTORE_TYPE" "$VERSION" "$DATASTORE_TYPE" $IMAGEID "$PACKAGES" 1
- rd_manage datastore_version_update "$DATASTORE_TYPE" "inactive_version" "manager1" $IMAGEID "" 0
rd_manage datastore_update "$DATASTORE_TYPE" "$VERSION"
- rd_manage datastore_update Test_Datastore_1 ""
if [ -f "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json ]; then
# add the configuration parameters to the database for the kick-start datastore
rd_manage db_load_datastore_config_parameters "$DATASTORE_TYPE" "$VERSION" "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json
fi
-
- if [[ "${RESTART_TROVE}" == true ]]; then
- cmd_stop
- fi
- iniset $TROVE_CONF DEFAULT default_datastore "$DATASTORE_TYPE"
- sleep 1.5
- if [[ "${RESTART_TROVE}" == true ]]; then
- cmd_start
- fi
}
###############################################################################
@@ -681,7 +642,8 @@ function install_test_packages() {
}
function mod_confs() {
- DATASTORE_TYPE=$1
+ local DATASTORE_TYPE=$1
+ local DATASTORE_VERSION=$2
exclaim "Running mod_confs ..."
sudo install -b --mode 0664 $TROVESTACK_SCRIPTS/conf/test_begin.conf $TEST_CONF
@@ -701,20 +663,9 @@ function mod_confs() {
cat $DATASTORE_CONF | sudo tee -a $TEST_CONF > /dev/null
cat $TROVESTACK_SCRIPTS/conf/test_end.conf | sudo tee -a $TEST_CONF > /dev/null
- #When running in the gate, don't start services
- if [ "${DEVSTACK_GATE_TROVE}" == "1" ]; then
- sed -i "s,%startservices%,false,g" ${TEST_CONF}
- else
- sed -i "s,%startservices%,true,g" ${TEST_CONF}
- fi
#Add the paths to the test conf
sed -i "s,%report_directory%,$TROVE_REPORT_DIR,g" $TEST_CONF
- sed -i "s,%keystone_path%,$PATH_KEYSTONE,g" $TEST_CONF
- sed -i "s,%nova_path%,$PATH_NOVA,g" $TEST_CONF
- sed -i "s,%glance_path%,$PATH_GLANCE,g" $TEST_CONF
- sed -i "s,%trove_path%,$PATH_TROVE,g" $TEST_CONF
sed -i "s,%service_host%,$SERVICE_HOST,g" $TEST_CONF
- sed -i "s,%swifth_path%,$PATH_SWIFT,g" $TEST_CONF
# Add the region name into test.conf
sed -i "s/%region_name%/${REGION_NAME}/g" $TEST_CONF
@@ -759,8 +710,10 @@ function mod_confs() {
iniset $TROVE_CONF $DATASTORE_TYPE num_config_servers_per_cluster 1
fi
- set_bin_path
+ sed -i "s/%datastore_type%/$DATASTORE_TYPE/g" $TEST_CONF
+ sed -i "s/%datastore_version%/${DATASTORE_VERSION}/g" $TEST_CONF
+ set_bin_path
}
function setup_cluster_configs() {
@@ -794,6 +747,7 @@ function add_test_flavors() {
function cmd_test_init() {
local DATASTORE_TYPE=$1
+ local DATASTORE_VERSION=$2
if [ -z "${DATASTORE_TYPE}" ]; then
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
@@ -806,7 +760,7 @@ function cmd_test_init() {
install_test_packages "${DATASTORE_TYPE}"
exclaim "Modifying test.conf and guest.conf with appropriate values."
- mod_confs "${DATASTORE_TYPE}"
+ mod_confs "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
exclaim "Creating Test Flavors."
add_test_flavors
@@ -823,7 +777,7 @@ function cmd_build_image() {
local output=$6
if [[ -z "$output" ]]; then
- image_name="trove-${IMAGE_GUEST_OS}-${IMAGE_GUEST_RELEASE}-${IMAGE_DATASTORE_TYPE}"
+ image_name="trove-datastore-${IMAGE_GUEST_OS}-${IMAGE_GUEST_RELEASE}-${IMAGE_DATASTORE_TYPE}"
image_folder=$HOME/images
output="${image_folder}/${image_name}"
fi
@@ -840,13 +794,17 @@ function cmd_build_image() {
build_guest_image $IMAGE_DATASTORE_TYPE $IMAGE_GUEST_OS $IMAGE_GUEST_RELEASE $DEV_MODE ${guest_username} $output
}
+# Build guest image and upload to Glance, register the datastore and configuration parameters.
+# We could skip the image build and upload by:
+# 1. MYSQL_IMAGE_ID is passed, or
+# 2. There is an image in Glance contains the datastore name
function cmd_build_and_upload_image() {
local datastore_type=$1
- local restart_trove=${2:-$(get_bool RESTART_TROVE "true")}
- local guest_os=${3:-"ubuntu"}
- local guest_release=${4:-"xenial"}
- local dev_mode=${5:-"true"}
- local guest_username=${6:-"ubuntu"}
+ local guest_os=${2:-"ubuntu"}
+ local guest_release=${3:-"xenial"}
+ local dev_mode=${4:-"true"}
+ local guest_username=${5:-"ubuntu"}
+ local output_dir=${6:-"$HOME/images"}
if [ -z "${datastore_type}" ]; then
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
@@ -861,21 +819,20 @@ function cmd_build_and_upload_image() {
glance_imageid=$(openstack $CLOUD_ADMIN_ARG image list | grep "$datastore_type" | awk 'NR==1 {print}' | awk '{print $2}')
if [[ -z $glance_imageid ]]; then
- cmd_build_image ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username}
+ mkdir -p ${output_dir}
+ name=trove-datastore-${guest_os}-${guest_release}-${datastore_type}
+ output=${output_dir}/$name.qcow2
+ cmd_build_image ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} $output
- image_folder=$HOME/images
- qcow_image=`find $image_folder -name '*.qcow2'`
- image_url="file://$qcow_image"
- glance_imageid=`get_glance_id upload_image $image_url`
+ glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image create $name --public --disk-format qcow2 --container-format bare --file $output --property hw_rng_model='virtio' -c id -f value)
[[ -z "$glance_imageid" ]] && echo "Glance upload failed!" && exit 1
- echo "IMAGE ID: $glance_imageid"
fi
fi
- echo "IMAGEID: $glance_imageid"
+ exclaim "Using Glance image ID: $glance_imageid"
exclaim "Updating Datastores"
- cmd_set_datastore "${glance_imageid}" "${datastore_type}" "${restart_trove}"
+ cmd_set_datastore "${glance_imageid}" "${datastore_type}"
}
@@ -1037,7 +994,7 @@ function cmd_int_tests() {
fi
cd $TROVESTACK_SCRIPTS
if [ $# -lt 1 ]; then
- args="--group=blackbox"
+ args="--group=mysql"
else
args="$@"
fi
@@ -1055,23 +1012,6 @@ function cmd_int_tests() {
python $args
}
-function cmd_int_tests_simple() {
- exclaim "Running Trove Simple Integration Tests..."
- cd $TROVESTACK_SCRIPTS
- if [ $# -lt 1 ]; then
- args="--group=simple_blackbox"
- else
- args="$@"
- fi
-
- # -- verbose makes it prettier.
- # -- logging-clear-handlers keeps the novaclient and other things from
- # spewing logs to stdout.
- args="$INT_TEST_OPTIONS -B $TROVESTACK_TESTS/integration/int_tests.py --verbose --logging-clear-handlers $args"
- echo "python $args"
- python $args
-}
-
function cmd_int_tests_white_box() {
export PYTHONPATH=$PYTHONPATH:$PATH_TROVE
export PYTHONPATH=$PYTHONPATH:$PATH_NOVA
@@ -1259,49 +1199,45 @@ function cmd_clean() {
function cmd_kick_start() {
local DATASTORE_TYPE=$1
- local RESTART_TROVE=${2:-$(get_bool RESTART_TROVE "true")}
+ local DATASTORE_VERSION=$2
if [ -z "${DATASTORE_TYPE}" ]; then
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
exit 1
fi
- exclaim "Running kick-start for $DATASTORE_TYPE (restart trove: $RESTART_TROVE)"
+ exclaim "Running kick-start for $DATASTORE_TYPE"
dump_env
- cmd_test_init "${DATASTORE_TYPE}"
-
- export GUEST_OS=${GUEST_OS:-"ubuntu"}
- export GUEST_OS_RELEASE=${GUEST_OS_RELEASE:-"xenial"}
- export GUEST_OS_USERNAME=${GUEST_OS_USERNAME:-"ubuntu"}
- export DEV_MOEE=${DEV_MODE:-"true"}
- cmd_build_and_upload_image "${DATASTORE_TYPE}" "${RESTART_TROVE}" "${GUEST_OS}" "${GUEST_OS_RELEASE}" "${DEV_MOEE}" "${GUEST_OS_USERNAME}"
+ cmd_test_init "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
}
+# Start functional test. The guest image should be created and registered in
+# appropriate datastore before the test, the configuration parameters should
+# also be loaded as well. DevStack has done all of that.
function cmd_gate_tests() {
local DATASTORE_TYPE=${1:-'mysql'}
local TEST_GROUP=${2:-${DATASTORE_TYPE}}
- local HOST_SCP_USERNAME=${3:-$(whoami)}
- local GUEST_USERNAME=${4:-'ubuntu'}
- # We're not using devstack-gate in Zuul v3 job
- if [[ $GATE_JOB_VER == "v2" ]]; then
- local ESCAPED_PATH_TROVE=${5:-'\/opt\/stack\/new\/trove'}
- fi
+ local DATASTORE_VERSION=${3:-'5.7'}
+ local HOST_SCP_USERNAME=${4:-$(whoami)}
+ local GUEST_USERNAME=${5:-'ubuntu'}
exclaim "Running cmd_gate_tests ..."
export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/gate-tests-report/}
export TROVE_REPORT_DIR=$HOME/gate-tests-report/
- TROVESTACK_DUMP_ENV=true
-
+ export TROVESTACK_DUMP_ENV=true
export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"}
- # The user used to connect the db instance.
+ # The user is used to connect with the db instance during testing.
export TROVE_TEST_SSH_USER=${TROVE_TEST_SSH_USER:-"ubuntu"}
- # This var is used to ssh into the db instance during the test.
+ # This var is used to ssh into the db instance during testing.
export TROVE_TEST_SSH_KEY_FILE=${SSH_DIR}/id_rsa
cd $TROVESTACK_SCRIPTS
- local RESTART_TROVE=false
- cmd_kick_start "${DATASTORE_TYPE}" "${RESTART_TROVE}"
+
+ # Build and upload guest image, register datastore version.
+ cmd_build_and_upload_image ${DATASTORE_TYPE}
+
+ cmd_kick_start "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
cmd_int_tests --group=$TEST_GROUP
}
@@ -1441,42 +1377,13 @@ function run_command() {
fi
case "$1" in
- "install" ) cmd_install;;
- "test-init" ) shift; cmd_test_init $@;;
"build-image" ) shift; cmd_build_image $@;;
- "initialize" ) cmd_initialize;;
- "unit-tests" ) cmd_unit_tests;;
- "start-deps" ) cmd_start_deps;;
- "stop-deps" ) cmd_stop_deps;;
- "start" ) cmd_start;;
+ "upload-image" ) shift; cmd_build_and_upload_image $@;;
"int-tests" ) shift; cmd_int_tests $@;;
- "int-tests-wb" ) shift; cmd_int_tests_white_box $@;;
- "simple-tests") shift; cmd_int_tests_simple $@;;
- "stop" ) cmd_stop;;
- "restart" ) cmd_stop; cmd_start;;
- "wipe-logs" ) cmd_wipe_logs;;
- "rd-sql" ) shift; cmd_rd_sql $@;;
- "fake-sql" ) shift; cmd_fake_sql $@;;
- "run-ci" ) shift; cmd_run_ci $@;;
- "vagrant-ssh" ) shift; cmd_vagrant_ssh $@;;
"debug" ) shift; echo "Enabling debugging."; \
set -o xtrace; TROVESTACK_DUMP_ENV=true; run_command $@;;
- "clear" ) shift; cmd_clear $@;;
- "clean" ) shift; cmd_clean $@;;
- "run" ) shift; cmd_run $@;;
- "kick-start" ) shift; cmd_kick_start $@;;
- "dsvm-gate-tests" ) shift; export GATE_JOB_VER=v2; \
- cmd_gate_tests $@;;
"gate-tests" ) shift; cmd_gate_tests $@;;
- "run-fake" ) shift; cmd_run_fake $@;;
- "start-fake" ) shift; cmd_start_fake $@;;
- "update-projects" ) cmd_clone_projects force_update \
- $TROVESTACK_SCRIPTS/projects-list \
- $TROVESTACK_SCRIPTS/image-projects-list;;
- "reset-task" ) shift; cmd_reset_task $@;;
"wipe-queues" ) shift; cmd_wipe_queues $@;;
- "repl" ) shift; cmd_repl $@;;
- "help" ) print_usage;;
* )
echo "'$1' not a valid command"
exit 1
diff --git a/integration/tests/integration/core.test.conf b/integration/tests/integration/core.test.conf
index 2ba05bef..828a2b63 100644
--- a/integration/tests/integration/core.test.conf
+++ b/integration/tests/integration/core.test.conf
@@ -1,7 +1,5 @@
{
"report_directory":"rdli-test-report",
- "start_services": false,
-
"white_box":false,
"test_mgmt":false,
@@ -18,7 +16,6 @@
"nova_conf":"/home/vagrant/nova.conf",
"keystone_code_root":"/opt/stack/keystone",
"keystone_conf":"/etc/keystone/keystone.conf",
- "keystone_use_combined":true,
"trove_code_root":"/opt/stack/trove",
"trove_conf":"/tmp/trove.conf",
"trove_version":"v1.0",
@@ -29,9 +26,8 @@
"trove_max_accepted_volume_size": 1000,
"trove_max_instances_per_user": 55,
"trove_max_volumes_per_user": 100,
- "use_nova_volume": false,
"use_reaper":false,
-"root_removed_from_instance_api": true,
+ "root_removed_from_instance_api": true,
"root_timestamp_disabled": false,
"openvz_disabled": false,
"management_api_disabled": true,
@@ -43,6 +39,6 @@
"users_page_size": 20,
"rabbit_runs_locally":false,
-"dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory",
+ "dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory",
"sentinel": null
}
diff --git a/integration/tests/integration/int_tests.py b/integration/tests/integration/int_tests.py
index d988b723..594688ad 100644
--- a/integration/tests/integration/int_tests.py
+++ b/integration/tests/integration/int_tests.py
@@ -112,44 +112,33 @@ def _clean_up():
def import_tests():
-
- # TODO(tim.simpson): Import these again once white box test functionality
- # is restored.
- # from tests.dns import check_domain
- # from tests.dns import concurrency
- # from tests.dns import conversion
-
# The DNS stuff is problematic. Not loading the other tests allow us to
# run its functional tests only.
ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True'
if not ADD_DOMAINS:
- from tests.api import delete_all
- from tests.api import instances_pagination
- from tests.api import instances_states
- from tests.dns import dns
- from tests import initialize
- from tests.smoke import instance
- from tests.volumes import driver
+ # F401 unused imports needed for tox tests
+ from trove.tests.api import backups # noqa
+ from trove.tests.api import configurations # noqa
+ from trove.tests.api import databases # noqa
+ from trove.tests.api import datastores # noqa
+ from trove.tests.api import instances as rd_instances # noqa
+ from trove.tests.api import instances_actions as acts # noqa
+ from trove.tests.api import instances_delete # noqa
+ from trove.tests.api import instances_resize # noqa
+ from trove.tests.api import limits # noqa
+ from trove.tests.api.mgmt import datastore_versions # noqa
+ from trove.tests.api.mgmt import instances_actions as mgmt_acts # noqa
+ from trove.tests.api import replication # noqa
+ from trove.tests.api import root # noqa
+ from trove.tests.api import user_access # noqa
+ from trove.tests.api import users # noqa
+ from trove.tests.api import versions # noqa
+ from trove.tests.db import migrations # noqa
# Groups that exist as core int-tests are registered from the
# trove.tests.int_tests module
from trove.tests import int_tests
- # Groups defined in trove/integration, or any other externally
- # defined groups can be registered here
- heavy_black_box_groups = [
- "dbaas.api.instances.pagination",
- "dbaas.api.instances.delete",
- "dbaas.api.instances.status",
- "dbaas.api.instances.down",
- "dbaas.api.mgmt.hosts.update",
- "fake.dbaas.api.mgmt.instances",
- "fake.dbaas.api.mgmt.accounts.broken",
- "fake.dbaas.api.mgmt.allaccounts"
- ]
- proboscis.register(groups=["heavy_blackbox"],
- depends_on_groups=heavy_black_box_groups)
-
def run_main(test_importer):
diff --git a/integration/tests/integration/run_local.sh b/integration/tests/integration/run_local.sh
deleted file mode 100755
index 83d313dc..00000000
--- a/integration/tests/integration/run_local.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env bash
-# Specify the path to the Trove repo as argument one.
-# This script will create a .pid file and report in the current directory.
-
-set -e
-if [ $# -lt 1 ]; then
- echo "Please give the path to the Trove repo as argument one."
- exit 5
-else
- TROVE_PATH=$1
-fi
-if [ $# -lt 2 ]; then
- echo "Please give the path to the Trove Client as argument two."
- exit 5
-else
- TROVECLIENT_PATH=$2
-fi
-shift;
-shift;
-
-
-PID_FILE="`pwd`.pid"
-
-function start_server() {
- pushd $TROVE_PATH
- bin/start_server.sh --pid_file=$PID_FILE
- popd
-}
-
-function stop_server() {
- if [ -f $PID_FILE ];
- then
- pushd $TROVE_PATH
- bin/stop_server.sh $PID_FILE
- popd
- else
- echo "The pid file did not exist, so not stopping server."
- fi
-}
-function on_error() {
- echo "Something went wrong!"
- stop_server
-}
-
-trap on_error EXIT # Proceed to trap - END in event of failure.
-
-TROVE_CLIENT_PATH=$TROVECLIENT_PATH tox -e py26
-start_server
-.tox/py26/bin/pip install -U $TROVECLIENT_PATH
-PYTHONPATH=$PYTHONPATH:$TROVECLIENT_PATH .tox/py26/bin/python int_tests.py \
- --conf=localhost.test.conf -- $@
-stop_server
-
-
-trap - EXIT
-echo "Ran tests successfully. :)"
-exit 0
diff --git a/integration/tests/integration/setup.py b/integration/tests/integration/setup.py
deleted file mode 100644
index 52216f61..00000000
--- a/integration/tests/integration/setup.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-from setuptools import setup
-
-
-def read(fname):
- return open(os.path.join(os.path.dirname(__file__), fname)).read()
-
-
-setup(
- name="Trove Integration Tests",
- version="0.0.9.9",
- author='OpenStack',
- description="Runs integration tests on Ridley.",
- license='Apache',
- py_modules=[],
- packages=['tests'],
- scripts=[]
-)
diff --git a/integration/tests/integration/tests/README b/integration/tests/integration/tests/README
deleted file mode 100644
index 05e1db67..00000000
--- a/integration/tests/integration/tests/README
+++ /dev/null
@@ -1 +0,0 @@
-Integration tests.
diff --git a/integration/tests/integration/tests/api/__init__.py b/integration/tests/integration/tests/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/integration/tests/integration/tests/api/__init__.py
+++ /dev/null
diff --git a/integration/tests/integration/tests/api/delete_all.py b/integration/tests/integration/tests/api/delete_all.py
deleted file mode 100644
index 98c67aba..00000000
--- a/integration/tests/integration/tests/api/delete_all.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from proboscis import test
-
-from trove.tests.config import CONFIG
-from trove.tests.util import create_dbaas_client
-from trove.tests.util.users import Requirements
-
-GROUP = "dbaas.api.instances.delete"
-
-
-@test(groups=[GROUP])
-def delete_all():
- """Delete every single one."""
- user = CONFIG.users.find_user(Requirements(is_admin=False))
- dbaas = create_dbaas_client(user)
- instances = dbaas.instances.list()
- for instance in instances:
- instance.delete()
diff --git a/integration/tests/integration/tests/api/instances_pagination.py b/integration/tests/integration/tests/api/instances_pagination.py
deleted file mode 100644
index a21aadd7..00000000
--- a/integration/tests/integration/tests/api/instances_pagination.py
+++ /dev/null
@@ -1,219 +0,0 @@
-
-from proboscis import after_class
-from proboscis import before_class
-from proboscis import test
-from proboscis.asserts import assert_equal
-from proboscis.asserts import assert_is_not
-from proboscis.asserts import assert_is_none
-from proboscis.asserts import assert_true
-
-
-from troveclient.compat import exceptions
-from trove.tests.config import CONFIG
-from trove.tests.util import create_dbaas_client
-from trove.tests.util.users import Requirements
-
-
-class TestBase(object):
-
- def set_up(self):
- """Create a ton of instances."""
- reqs = Requirements(is_admin=False)
- self.user = CONFIG.users.find_user(reqs)
- self.dbaas = create_dbaas_client(self.user)
-
- def delete_instances(self):
- chunk = 0
- while True:
- chunk += 1
- attempts = 0
- instances = self.dbaas.instances.list()
- if len(instances) == 0:
- break
- # Sit around and try to delete this chunk.
- while True:
- instance_results = []
- attempts += 1
- deleted_count = 0
- for instance in instances:
- try:
- instance.delete()
- result = "[w]"
- except exceptions.UnprocessableEntity:
- result = "[W]"
- except exceptions.NotFound:
- result = "[O]"
- deleted_count += 1
- except Exception:
- result = "[X]"
- instance_results.append(result)
- print("Chunk %d, attempt %d : %s"
- % (chunk, attempts, ",".join(instance_results)))
- if deleted_count == len(instances):
- break
-
- def create_instances(self):
- self.ids = []
- for index in range(self.max):
- name = "multi-%03d" % index
- result = self.dbaas.instances.create(name, 1,
- {'size': 1}, [], [])
- self.ids.append(result.id)
- # Sort the list of IDs in order, so we can confirm the lists pagination
- # returns is also sorted correctly.
- self.ids.sort()
-
- @staticmethod
- def assert_instances_sorted_by_ids(instances):
- # Assert that the strings are always increasing.
- last_id = ""
- for instance in instances:
- assert_true(last_id < instance.id)
-
- def print_list(self, instances):
- print("Length = %d" % len(instances))
- print(",".join([instance.id for instance in instances]))
-
- def test_pagination(self, requested_limit, requested_marker,
- expected_length, expected_marker, expected_last_item):
- instances = self.dbaas.instances.list(limit=requested_limit,
- marker=requested_marker)
- marker = instances.next
-
- self.print_list(instances)
-
- # Better get as many as we asked for.
- assert_equal(len(instances), expected_length)
- # The last one should be roughly this one in the list.
- assert_equal(instances[-1].id, expected_last_item)
- # Because limit < count, the marker must be something.
- if expected_marker:
- assert_is_not(marker, None)
- assert_equal(marker, expected_marker)
- else:
- assert_is_none(marker)
- self.assert_instances_sorted_by_ids(instances)
-
-
-@test(runs_after_groups=["dbaas.guest.shutdown"],
- groups=['dbaas.api.instances.pagination'])
-class SimpleCreateAndDestroy(TestBase):
- """
- It turns out a big part of guaranteeing pagination works is to make sure
- we can create a big batch of instances and delete them without problems.
- Even in fake mode though its worth it to check this is the case.
- """
-
- max = 5
-
- @before_class
- def set_up(self):
- """Create a ton of instances."""
- super(SimpleCreateAndDestroy, self).set_up()
- self.delete_instances()
-
- @test
- def spin_up(self):
- self.create_instances()
-
- @after_class(always_run=True)
- def tear_down(self):
- self.delete_instances()
-
-
-@test(runs_after_groups=["dbaas.guest.shutdown"],
- groups=['dbaas.api.instances.pagination'])
-class InstancePagination50(TestBase):
-
- max = 50
-
- @before_class
- def set_up(self):
- """Create a ton of instances."""
- super(InstancePagination50, self).set_up()
- self.delete_instances()
- self.create_instances()
-
- @after_class(always_run=True)
- def tear_down(self):
- """Tear down all instances."""
- self.delete_instances()
-
- @test
- def pagination_short(self):
- self.test_pagination(requested_limit=10, requested_marker=None,
- expected_length=10, expected_marker=self.ids[9],
- expected_last_item=self.ids[9])
-
- @test
- def pagination_default(self):
- self.test_pagination(requested_limit=None, requested_marker=None,
- expected_length=20, expected_marker=self.ids[19],
- expected_last_item=self.ids[19])
-
- @test
- def pagination_full(self):
- self.test_pagination(requested_limit=50, requested_marker=None,
- expected_length=20, expected_marker=self.ids[19],
- expected_last_item=self.ids[19])
-
-
-@test(runs_after_groups=["dbaas.guest.shutdown"],
- groups=['dbaas.api.instances.pagination'])
-class InstancePagination20(TestBase):
-
- max = 20
-
- @before_class
- def set_up(self):
- """Create a ton of instances."""
- super(InstancePagination20, self).set_up()
- self.delete_instances()
- self.create_instances()
-
- @after_class(always_run=True)
- def tear_down(self):
- """Tear down all instances."""
- self.delete_instances()
-
- @test
- def pagination_short(self):
- self.test_pagination(requested_limit=10, requested_marker=None,
- expected_length=10, expected_marker=self.ids[9],
- expected_last_item=self.ids[9])
-
- @test
- def pagination_default(self):
- self.test_pagination(requested_limit=None, requested_marker=None,
- expected_length=20, expected_marker=None,
- expected_last_item=self.ids[19])
-
- @test
- def pagination_full(self):
- self.test_pagination(requested_limit=20, requested_marker=None,
- expected_length=20, expected_marker=None,
- expected_last_item=self.ids[19])
-
- @test
- def pagination_overkill(self):
- self.test_pagination(requested_limit=30, requested_marker=None,
- expected_length=20, expected_marker=None,
- expected_last_item=self.ids[19])
-
- @test
- def pagination_last_half(self):
- self.test_pagination(requested_limit=10, requested_marker=self.ids[9],
- expected_length=10, expected_marker=None,
- expected_last_item=self.ids[19])
-
- @test
- def pagination_third_quarter(self):
- self.test_pagination(requested_limit=5, requested_marker=self.ids[9],
- expected_length=5, expected_marker=self.ids[14],
- expected_last_item=self.ids[14])
-
- @test
- def pagination_fourth_quarter(self):
- self.test_pagination(requested_limit=20, requested_marker=self.ids[14],
- expected_length=5, expected_marker=None,
- expected_last_item=self.ids[19])
diff --git a/integration/tests/integration/tests/api/instances_states.py b/integration/tests/integration/tests/api/instances_states.py
deleted file mode 100644
index ed625a27..00000000
--- a/integration/tests/integration/tests/api/instances_states.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright 2012 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-GROUP = "dbaas.api.instances.status"
-
-from proboscis import before_class
-from proboscis import test
-from proboscis.asserts import assert_equal
-
-from trove.tests.config import CONFIG
-from trove.tests.util import create_dbaas_client
-from trove.tests.util.users import Requirements
-from trove.common.utils import poll_until
-
-
-@test(groups=[GROUP])
-class InstanceStatusTests(object):
-
- @before_class
- def set_up(self):
- reqs = Requirements(is_admin=False)
- self.user = CONFIG.users.find_user(reqs)
- self.dbaas = create_dbaas_client(self.user)
-
- @test
- def test_create_failure_on_volume_prov_failure(self):
- # Fake nova will fail a volume of size 9.
- response = self.dbaas.instances.create('volume_fail', 1,
- {'size': 9}, [])
- poll_until(lambda: self.dbaas.instances.get(response.id),
- lambda instance: instance.status == 'ERROR',
- time_out=10)
- instance = self.dbaas.instances.get(response.id)
- print("Status: %s" % instance.status)
- assert_equal(instance.status, "ERROR",
- "Instance did not drop to error after volume prov failure.")
-
- @test
- def test_create_failure_on_server_failure(self):
- # Fake nova will fail a server ending with 'SERVER_ERROR'."
- response = self.dbaas.instances.create('test_SERVER_ERROR', 1,
- {'size': 1}, [])
- poll_until(lambda: self.dbaas.instances.get(response.id),
- lambda instance: instance.status == 'ERROR',
- time_out=10)
- instance = self.dbaas.instances.get(response.id)
- print("Status: %s" % instance.status)
- assert_equal(instance.status, "ERROR",
- "Instance did not drop to error after server prov failure.")
-
- ###TODO(ed-): We don't at present have a way to test DNS in FAKE_MODE.
- @test(enabled=False)
- def test_create_failure_on_dns_failure(self):
- #TODO(ed-): Throw DNS-specific monkeywrench into works
- response = self.dbaas.instances.create('test_DNS_ERROR', 1,
- {'size': 1}, [])
- poll_until(lambda: self.dbaas.instances.get(response.id),
- lambda instance: instance.status == 'ERROR',
- time_out=10)
- instance = self.dbaas.instances.get(response.id)
- print("Status: %s" % instance.status)
- assert_equal(instance.status, "ERROR",
- "Instance did not drop to error after DNS prov failure.")
diff --git a/integration/tests/integration/tests/dns/__init__.py b/integration/tests/integration/tests/dns/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/integration/tests/integration/tests/dns/__init__.py
+++ /dev/null
diff --git a/integration/tests/integration/tests/dns/check_domain.py b/integration/tests/integration/tests/dns/check_domain.py
deleted file mode 100644
index 82acbbb1..00000000
--- a/integration/tests/integration/tests/dns/check_domain.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Checks that the domain specified in the flag file exists and is valid.
-
-If you define the environment variable ADD_DOMAINS=True when running the tests,
-they will create the domain if its not found (see below for details).
-
-"""
-import time
-from proboscis import test
-from proboscis import before_class
-from proboscis.asserts import assert_equal
-from proboscis.asserts import assert_not_equal
-from proboscis.decorators import time_out
-
-from trove.tests.config import CONFIG
-
-WHITE_BOX = CONFIG.white_box
-RUN_DNS = CONFIG.values.get("trove_dns_support", False)
-
-if WHITE_BOX:
- from nova import utils
- from nova import flags
- import rsdns
- from trove.dns.rsdns.driver import create_client_with_flag_values
- from trove.dns.driver import DnsEntry
- from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
- from trove.dns.rsdns.driver import RsDnsDriver
- from trove.dns.rsdns.driver import RsDnsZone
- from trove.utils import poll_until
- FLAGS = flags.FLAGS
- TEST_CONTENT = "126.1.1.1"
- TEST_NAME = "hiwassup.%s" % FLAGS.dns_domain_name
- DNS_DOMAIN_ID = None
-
-
-@test(groups=["rsdns.domains", "rsdns.show_entries"],
- enabled=WHITE_BOX and RUN_DNS)
-class ClientTests(object):
-
- @before_class
- def increase_logging(self):
- import httplib2
- httplib2.debuglevel = 1
-
- @test
- def can_auth(self):
- self.client = create_client_with_flag_values()
- self.client.authenticate()
-
- @test(depends_on=[can_auth])
- def list_domains(self):
- domains = self.client.domains.list()
- print(domains)
-
-
-@test(groups=["rsdns.domains"], depends_on=[ClientTests],
- enabled=WHITE_BOX and RUN_DNS)
-class RsDnsDriverTests(object):
- """Tests the RS DNS Driver."""
-
- def create_domain_if_needed(self):
- """Adds the domain specified in the flags."""
- print("Creating domain %s" % self.driver.default_dns_zone.name)
- future = self.driver.dns_client.domains.create(
- self.driver.default_dns_zone.name)
- while not future.ready:
- time.sleep(2)
- print("Got something: %s" % future.resource)
- with open('/home/vagrant/dns_resource.txt', 'w') as f:
- f.write('%r\n' % future.result[0].id)
- global DNS_DOMAIN_ID
- DNS_DOMAIN_ID = future.result[0].id
- print("The domain should have been created with id=%s" % DNS_DOMAIN_ID)
-
- @test
- @time_out(2 * 60)
- def ensure_domain_specified_in_flags_exists(self):
- """Make sure the domain in the FLAGS exists."""
- self.driver = RsDnsDriver(raise_if_zone_missing=False)
- assert_not_equal(None, self.driver.default_dns_zone)
-
- def zone_found():
- zones = self.driver.get_dns_zones()
- print("Retrieving zones.")
- for zone in zones:
- print("zone %s" % zone)
- if zone.name == self.driver.default_dns_zone.name:
- self.driver.default_dns_zone.id = zone.id
- global DNS_DOMAIN_ID
- DNS_DOMAIN_ID = zone.id
- return True
- return False
- if zone_found():
- return
- self.create_domain_if_needed()
- for i in range(5):
- if zone_found():
- return
- self.fail("""Could not find default dns zone.
- This happens when they clear the staging DNS service of data.
- To fix it, manually run the tests as follows:
- $ ADD_DOMAINS=True python int_tests.py
- and if all goes well the tests will create a new domain
- record.""")
-
- @test(depends_on=[ensure_domain_specified_in_flags_exists],
- enabled=WHITE_BOX and FLAGS.dns_domain_name != "dbaas.rackspace.com")
- def delete_all_entries(self):
- """Deletes all entries under the default domain."""
- list = self.driver.get_entries()
- for entry in list:
- if entry.type == "A":
- self.driver.delete_entry(name=entry.name, type=entry.type,
- dns_zone=entry.dns_zone)
- # It takes awhile for them to be deleted.
- poll_until(lambda: self.driver.get_entries_by_name(TEST_NAME),
- lambda list: len(list) == 0,
- sleep_time=4, time_out=60)
-
- @test(depends_on=[delete_all_entries])
- def create_test_entry(self):
- fullname = TEST_NAME
- entry = DnsEntry(name=fullname, content=TEST_CONTENT, type="A",
- ttl=3600)
- self.driver.create_entry(entry)
- list = None
- for i in range(500):
- list = self.driver.get_entries_by_name(name=fullname)
- if len(list) > 0:
- break
- time.sleep(1)
- print("This is the list: %r" % list)
- assert_equal(1, len(list))
- list2 = self.driver.get_entries_by_content(content=TEST_CONTENT)
- assert_equal(1, len(list2))
-
- @test(depends_on=[delete_all_entries])
- def create_test_rsdns_entry(self):
- """Create an entry using the RsDnsInstanceEntryFactory."""
- instance = {'uuid': '000136c0-effa-4711-a747-a5b9fbfcb3bd', 'id': '10'}
- ip = "10.100.2.7"
- factory = RsDnsInstanceEntryFactory(dns_domain_id=DNS_DOMAIN_ID)
- entry = factory.create_entry(instance)
- entry.content = ip
- self.driver.create_entry(entry)
- entries = self.driver.get_entries_by_name(name=entry.name)
- assert_equal(1, len(entries))
- assert_equal(ip, entries[0].content)
- assert_equal(FLAGS.dns_ttl, entries[0].ttl)
-
- @test(depends_on=[create_test_entry])
- def delete_test_entry(self):
- fullname = TEST_NAME
- self.driver.delete_entry(fullname, "A")
- # It takes awhile for them to be deleted.
- poll_until(lambda: self.driver.get_entries_by_name(TEST_NAME),
- lambda list: len(list) == 0,
- sleep_time=2, time_out=60)
diff --git a/integration/tests/integration/tests/dns/concurrency.py b/integration/tests/integration/tests/dns/concurrency.py
deleted file mode 100644
index 4fe460b0..00000000
--- a/integration/tests/integration/tests/dns/concurrency.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-This test recreates an issue we had with eventlet. In the logs, we'd see that
-the JSON response was malformed; instead of JSON, it contained the following
-string:
-Second simultaneous read on fileno 5 detected. Unless you really know what
-you're doing, make sure that only one greenthread can read any particular
-socket. Consider using a pools.Pool. If you do know what you're doing and want
-to disable this error, call
-eventlet.debug.hub_multiple_reader_prevention(False)
-
-It is perhaps the most helpful error message ever created.
-
-The root issue was that a subclass of httplib2.Http was created at program
-started and used in all threads.
-
-Using the old (broken) RsDNS client code this test recreates the greatest error
-message ever.
-"""
-
-try:
- import eventlet
- CAN_USE_EVENTLET = True
-except ImportError:
- CAN_USE_EVENTLET = False
-import uuid
-
-from proboscis import before_class
-from proboscis import test
-from proboscis.asserts import assert_true
-
-from trove.tests.config import CONFIG
-
-WHITE_BOX = CONFIG.white_box
-RUN_DNS = CONFIG.values.get("trove_dns_support", False)
-
-
-if CONFIG.white_box:
- from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
- from nova import flags
- from nova import utils
- FLAGS = flags.FLAGS
-
-
-@test(groups=["rsdns.eventlet"], enabled=CAN_USE_EVENTLET)
-class RsdnsEventletTests(object):
- """Makes sure the RSDNS client can be used from multiple green threads."""
-
- def assert_record_created(self, index):
- msg = "Record %d wasn't created!" % index
- assert_true(index in self.new_records, msg)
-
- @before_class(enabled=WHITE_BOX and RUN_DNS)
- def create_driver(self):
- """Creates the DNS Driver used in subsequent tests."""
- self.driver = utils.import_object(FLAGS.dns_driver)
- self.entry_factory = RsDnsInstanceEntryFactory()
- self.test_uuid = uuid.uuid4().hex
- self.new_records = {}
-
- def make_record(self, index):
- """Creates a record with the form 'eventlet-%s-%d'."""
- uuid = "eventlet-%s-%d" % (self.test_uuid, index)
- instance = {'uuid': uuid}
- entry = self.entry_factory.create_entry(instance)
- entry.name = uuid + "." + self.entry_factory.default_dns_zone.name
- entry.content = "123.123.123.123"
- self.driver.create_entry(entry)
- self.new_records[index] = True
-
- @test(enabled=WHITE_BOX and RUN_DNS)
- def use_dns_from_a_single_thread(self):
- """Add DNS records one at a time."""
- self.new_records = {}
- for index in range(-1, -5, -1):
- self.make_record(index)
- self.assert_record_created(index)
-
- @test(enabled=WHITE_BOX and RUN_DNS)
- def use_dns_from_multiple_greenthreads(self):
- """Add multiple DNS records at once."""
- self.new_records = {}
-
- def make_record(index):
- def __cb():
- self.make_record(index)
- self.assert_record_created(index)
- return index
- return __cb
-
- pile = eventlet.GreenPile()
- indices = range(1, 4)
- for index in indices:
- pile.spawn(make_record(index))
-
- list(pile) # Wait for them to finish
- for index in indices:
- self.assert_record_created(index)
diff --git a/integration/tests/integration/tests/dns/conversion.py b/integration/tests/integration/tests/dns/conversion.py
deleted file mode 100644
index 3e1b5fcb..00000000
--- a/integration/tests/integration/tests/dns/conversion.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests classes which convert RS style-entries to Nova DNS entries."""
-
-import hashlib
-import unittest
-from proboscis import test
-
-from trove.tests.config import CONFIG
-
-
-if CONFIG.white_box:
- from nova import flags
- from rsdns.client.records import Record
- from trove.dns.rsdns.driver import EntryToRecordConverter
- from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
- from trove.dns.rsdns.driver import RsDnsZone
- FLAGS = flags.FLAGS
- driver = None
- DEFAULT_ZONE = RsDnsZone(1, "dbaas.rackspace.org")
- TEST_CONTENT = "126.1.1.1"
- TEST_NAME = "hiwassup.dbaas.rackspace.org"
-
-
-@test(groups=["unit", "rsdns.conversion"],
- enabled=CONFIG.white_box)
-class ConvertingNovaEntryNamesToRecordNames(unittest.TestCase):
-
- def setUp(self):
- self.converter = EntryToRecordConverter(DEFAULT_ZONE)
- self.fake_zone = RsDnsZone(id=5, name="blah.org")
-
- def test_normal_name(self):
- long_name = self.converter.name_to_long_name("hi", self.fake_zone)
- self.assertEqual("hi.blah.org", long_name)
-
- def test_short_name(self):
- long_name = self.converter.name_to_long_name("", self.fake_zone)
- self.assertEqual("", long_name)
-
- def test_long_name(self):
- long_name = self.converter.name_to_long_name("blah.org.",
- self.fake_zone)
- self.assertEqual("blah.org..blah.org", long_name)
-
-
-@test(groups=["unit", "rsdns.conversion"],
- enabled=CONFIG.white_box)
-class ConvertingRecordsToEntries(unittest.TestCase):
-
- def setUp(self):
- self.converter = EntryToRecordConverter(DEFAULT_ZONE)
- self.fake_zone = RsDnsZone(id=5, name="blah.org")
-
- def test_normal_name(self):
- record = Record(None, {"id": 5, "name": "hi.blah.org",
- "data": "stacker.com blah@blah 13452378",
- "ttl": 5,
- "type": "SOA"})
- entry = self.converter.record_to_entry(record=record,
- dns_zone=self.fake_zone)
- self.assertEqual("stacker.com blah@blah 13452378", entry.content)
- self.assertEqual("hi.blah.org", entry.name)
- self.assertEqual("5", str(entry.ttl))
- self.assertEqual("SOA", entry.type)
-
-
-@test(groups=["rsdns.conversion"],
- enabled=CONFIG.white_box)
-class WhenCreatingAnEntryForAnInstance(unittest.TestCase):
- # This isn't a unit test because RsDnsInstanceEntryFactory connects to the
- # service.
-
- def setUp(self):
- self.creator = RsDnsInstanceEntryFactory()
-
- def test_should_concatanate_strings(self):
- instance = {'id': '56',
- 'uuid': '000136c0-effa-4711-a747-a5b9fbfcb3bd'}
- entry = self.creator.create_entry(instance)
- expected_name = "%s.%s" % (hashlib.sha1(instance['uuid']).hexdigest(),
- FLAGS.dns_domain_name)
- self.assertEqual(expected_name, entry.name,
- msg="Entry name should match - %s" % entry.name)
- self.assertIsNone(entry.content)
- self.assertEqual("A", entry.type)
- self.assertEqual(FLAGS.dns_ttl, entry.ttl)
- self.assertIsNone(entry.priority)
- self.assertEqual(FLAGS.dns_domain_name, entry.dns_zone.name)
- if not entry.dns_zone.id:
- self.fail(msg="DNS Zone Id should not be empty")
diff --git a/integration/tests/integration/tests/dns/dns.py b/integration/tests/integration/tests/dns/dns.py
deleted file mode 100644
index 3734ad54..00000000
--- a/integration/tests/integration/tests/dns/dns.py
+++ /dev/null
@@ -1,104 +0,0 @@
-
-import unittest
-
-from proboscis import test
-
-from trove.tests.api.instances import instance_info
-from trove.tests.api.instances import GROUP_START as INSTANCE_START
-from trove.tests.api.instances import GROUP_TEST
-from trove.tests.api.instances import GROUP_STOP as INSTANCE_STOP
-from trove.tests.config import CONFIG
-from trove.common.utils import import_object
-from trove.common.utils import poll_until
-
-WHITE_BOX = CONFIG.white_box
-
-if WHITE_BOX:
- # TODO(tim.simpson): Restore this once white box functionality can be
- # added back to this test module.
- pass
- # import rsdns
- # from nova import flags
- # from nova import utils
-
- # from trove import exception
- # from trove.utils import poll_until
-
- # FLAGS = flags.FLAGS
-
-dns_driver = None
-
-GROUP = "dbaas.guest.dns"
-
-
-@test(groups=[GROUP, GROUP_TEST])
-class Setup(unittest.TestCase):
- """Creates the DNS Driver and entry factory used in subsequent tests."""
-
- def test_create_rs_dns_driver(self):
- global dns_driver
- dns_driver = import_object(FLAGS.dns_driver)
-
-
-def expected_dns_entry():
- """Returns expected DNS entry for this instance.
-
- :rtype: Instance of :class:`DnsEntry`.
-
- """
- return create_dns_entry(instance_info.local_id, instance_info.id)
-
-
-@test(depends_on_classes=[Setup],
- depends_on_groups=[INSTANCE_START],
- groups=[GROUP, GROUP_TEST])
-class WhenInstanceIsCreated(unittest.TestCase):
- """Make sure the DNS name was provisioned.
-
- This class actually calls the DNS driver to confirm the entry that should
- exist for the given instance does exist.
-
- """
-
- def test_dns_entry_should_exist(self):
- entry = expected_dns_entry()
- if entry:
- def get_entries():
- return dns_driver.get_entries_by_name(entry.name)
- try:
- poll_until(get_entries, lambda entries: len(entries) > 0,
- sleep_time=2, time_out=60)
- except exception.PollTimeOut:
- self.fail("Did not find name " + entry.name + \
- " in the entries, which were as follows:"
- + str(dns_driver.get_entries()))
-
-
-@test(depends_on_classes=[Setup, WhenInstanceIsCreated],
- depends_on_groups=[INSTANCE_STOP],
- groups=[GROUP])
-class AfterInstanceIsDestroyed(unittest.TestCase):
- """Make sure the DNS name is removed along with an instance.
-
- Because the compute manager calls the DNS manager with RPC cast, it can
- take awhile. So we wait for 30 seconds for it to disappear.
-
- """
-
- def test_dns_entry_exist_should_be_removed_shortly_thereafter(self):
- entry = expected_dns_entry()
-
- if not entry:
- return
-
- def get_entries():
- return dns_driver.get_entries_by_name(entry.name)
-
- try:
- poll_until(get_entries, lambda entries: len(entries) == 0,
- sleep_time=2, time_out=60)
- except exception.PollTimeOut:
- # Manually delete the rogue item
- dns_driver.delete_entry(entry.name, entry.type, entry.dns_zone)
- self.fail("The DNS entry was never deleted when the instance "
- "was destroyed.")
diff --git a/integration/tests/integration/tests/initialize.py b/integration/tests/integration/tests/initialize.py
index 25e687a2..a9ce6ab8 100644
--- a/integration/tests/integration/tests/initialize.py
+++ b/integration/tests/integration/tests/initialize.py
@@ -21,27 +21,14 @@ from tests.util.services import Service
from trove.tests.config import CONFIG
-FAKE = CONFIG.fake_mode
-START_SERVICES = (not FAKE) and CONFIG.values.get('start_services', False)
-START_NOVA_NETWORK = (START_SERVICES and
- not CONFIG.values.get('neutron_enabled',
- False))
-KEYSTONE_ALL = CONFIG.values.get('keystone_use_combined', True)
-USE_NOVA_VOLUME = CONFIG.values.get('use_nova_volume', False)
-
-dbaas_image = None
-instance_name = None
-success_statuses = ["build", "active"]
-
-
def dbaas_url():
return str(CONFIG.values.get("dbaas_url"))
+
def nova_url():
return str(CONFIG.values.get("nova_client")['url'])
-
class Daemon(object):
"""Starts a daemon."""
@@ -74,95 +61,3 @@ class Daemon(object):
self.service = Service(cmds)
if not self.service.is_service_alive():
self.service.start()
-
-@test(groups=["services.initialize"],
- enabled=START_SERVICES and (not KEYSTONE_ALL))
-def start_keystone_all():
- """Starts the Keystone API."""
- Daemon(service_path_root="usr_bin_dir",
- service_path="%s/keystone-all",
- extra_cmds=['--config-file'],
- conf_file_name="keystone_conf").run()
-
-
-@test(groups=["services.initialize", "services.initialize.glance"],
- enabled=START_SERVICES)
-def start_glance_registry():
- """Starts the Glance Registry."""
- Daemon(alternate_path="/usr/bin/glance-registry",
- conf_file_name="glance_reg_conf",
- service_path_root="usr_bin_dir",
- service_path="%s/glance-registry").run()
-
-
-@test(groups=["services.initialize", "services.initialize.glance"],
- depends_on=[start_glance_registry], enabled=START_SERVICES)
-def start_glance_api():
- """Starts the Glance API."""
- Daemon(alternate_path="/usr/bin/glance-api",
- conf_file_name="glance_reg_conf",
- service_path_root="usr_bin_dir",
- service_path="%s/glance-api").run()
-
-
-@test(groups=["services.initialize"], depends_on_classes=[start_glance_api],
- enabled=START_NOVA_NETWORK)
-def start_nova_network():
- """Starts the Nova Network Service."""
- Daemon(service_path_root="usr_bin_dir",
- service_path="%s/nova-network",
- extra_cmds=['--config-file='],
- conf_file_name="nova_conf").run()
-
-
-@test(groups=["services.initialize"], enabled=START_SERVICES)
-def start_scheduler():
- """Starts the Scheduler Service."""
- Daemon(service_path_root="usr_bin_dir",
- service_path="%s/nova-scheduler",
- extra_cmds=['--config-file='],
- conf_file_name="nova_conf").run()
-
-
-@test(groups=["services.initialize"],
- depends_on_classes=[start_glance_api],
- enabled=START_SERVICES)
-def start_compute():
- """Starts the Nova Compute Service."""
- Daemon(service_path_root="usr_bin_dir",
- service_path="%s/nova-compute",
- extra_cmds=['--config-file='],
- conf_file_name="nova_conf").run()
-
-
-@test(groups=["services.initialize"], depends_on_classes=[start_scheduler],
- enabled=START_SERVICES and USE_NOVA_VOLUME)
-def start_volume():
- """Starts the Nova Compute Service."""
- Daemon(service_path_root="usr_bin_dir",
- service_path="%s/nova-volume",
- extra_cmds=['--config-file='],
- conf_file_name="nova_conf").run()
-
-
-@test(groups=["services.initialize"],
- depends_on_classes=[start_glance_api, start_nova_network, start_compute,
- start_volume],
- enabled=START_SERVICES)
-def start_nova_api():
- """Starts the Nova Compute Service."""
- Daemon(service_path_root="usr_bin_dir",
- service_path="%s/nova-api",
- extra_cmds=['--config-file='],
- conf_file_name="nova_conf").run()
-
-
-@test(groups=["services.initialize"],
- depends_on_classes=[start_nova_api],
- enabled=START_SERVICES)
-def start_trove_api():
- """Starts the Trove Service."""
- Daemon(service_path_root="usr_bin_dir",
- service_path="%s/trove-api",
- extra_cmds=['--config-file='],
- conf_file_name="trove_conf").run()
diff --git a/integration/tests/integration/tests/smoke/__init__.py b/integration/tests/integration/tests/smoke/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/integration/tests/integration/tests/smoke/__init__.py
+++ /dev/null
diff --git a/integration/tests/integration/tests/smoke/instance.py b/integration/tests/integration/tests/smoke/instance.py
deleted file mode 100644
index 3488e5f2..00000000
--- a/integration/tests/integration/tests/smoke/instance.py
+++ /dev/null
@@ -1,103 +0,0 @@
-
-from proboscis.asserts import assert_equal
-from proboscis import test
-from proboscis import before_class
-
-from trove.common.utils import poll_until
-from trove.tests.util import create_client
-
-
-class InstanceGenerator(object):
-
- def __init__(self, client, status=None, name=None, flavor=None,
- account_id=None, created_at=None, databases=None, users=None,
- volume_size=None):
- self.client = client
- self.status = status
- self.name = name
- self.flavor = flavor
- self.account_id = account_id
- self.databases = databases
- self.users = users
- self.volume_size = volume_size
- self.id = None
-
- def create_instance(self):
- #make the call to create the instance
- instance = self.client.instances.create(self.name, self.flavor,
- self.volume_size, self.databases, self.users)
- self.client.assert_http_code(200)
-
- #verify we are in a build state
- assert_equal(instance.status, "BUILD")
- #pull out the ID
- self.id = instance.id
-
- return instance
-
- def wait_for_build_to_finish(self):
- poll_until(lambda: self.client.instance.get(self.id),
- lambda instance: instance.status != "BUILD",
- time_out=600)
-
- def get_active_instance(self):
- instance = self.client.instance.get(self.id)
- self.client.assert_http_code(200)
-
- #check the container name
- assert_equal(instance.name, self.name)
-
- #pull out volume info and verify
- assert_equal(str(instance.volume_size), str(self.volume_size))
-
- #pull out the flavor and verify
- assert_equal(str(instance.flavor), str(self.flavor))
-
- return instance
-
-
-@test(groups=['smoke', 'positive'])
-class CreateInstance(object):
-
- @before_class
- def set_up(self):
- client = create_client(is_admin=False)
- name = 'test_createInstance_container'
- flavor = 1
- volume_size = 1
- db_name = 'test_db'
- databases = [
- {
- "name": db_name
- }
- ]
- users = [
- {
- "name": "lite",
- "password": "litepass",
- "databases": [{"name": db_name}]
- }
- ]
-
- #create the Instance
- instance = InstanceGenerator(client, name=self.name,
- flavor=flavor,
- volume_size=self.volume_size,
- databases=databases, users=users)
- instance.create_instance()
-
- #wait for the instance
- instance.wait_for_build_to_finish()
-
- #get the active instance
- inst = instance.get_active_instance()
-
- #list out the databases for our instance and verify the db name
- dbs = client.databases.list(inst.id)
- client.assert_http_code(200)
-
- assert_equal(len(dbs), 1)
- assert_equal(dbs[0].name, instance.db_name)
-
- client.instance.delete(inst.id)
- client.assert_http_code(202)
diff --git a/integration/tests/integration/tests/volumes/__init__.py b/integration/tests/integration/tests/volumes/__init__.py
deleted file mode 100644
index 09c4cfed..00000000
--- a/integration/tests/integration/tests/volumes/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) 2011 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-:mod:`volumes` -- Tests for volumes.
-===================================
-"""
-
-""""Tests for Volumes."""
-
-# Is a set of tests written directly against the VolumeManager and VolumeClient
-# classes which doesn't require standing up Nova daemons or anything.
-VOLUMES_DRIVER = "trove.volumes.driver"
diff --git a/integration/tests/integration/tests/volumes/driver.py b/integration/tests/integration/tests/volumes/driver.py
deleted file mode 100644
index 3e084597..00000000
--- a/integration/tests/integration/tests/volumes/driver.py
+++ /dev/null
@@ -1,547 +0,0 @@
-# Copyright (c) 2012 OpenStack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from numbers import Number
-import os
-import re
-import shutil
-import six
-import socket
-import time
-import unittest
-
-import pexpect
-
-from proboscis import test
-from proboscis.asserts import assert_raises
-from proboscis.decorators import expect_exception
-from proboscis.decorators import time_out
-
-from trove.tests.config import CONFIG
-from trove.common.utils import poll_until
-from trove.tests.util import process
-from trove.common.utils import import_class
-from tests import initialize
-
-
-WHITE_BOX = CONFIG.white_box
-VOLUMES_DRIVER = "trove.volumes.driver"
-
-if WHITE_BOX:
- # TODO(tim.simpson): Restore this once white box functionality can be
- # added back to this test module.
- pass
- # from nova import context
- # from nova import exception
- # from nova import flags
- # from nova import utils
- # from trove import exception as trove_exception
- # from trove.utils import poll_until
- # from trove import volume
- # from trove.tests.volume import driver as test_driver
-
- # FLAGS = flags.FLAGS
-
-
-UUID_PATTERN = re.compile('^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-'
- '[0-9a-f]{4}-[0-9a-f]{12}$')
-
-HUGE_VOLUME = 5000
-
-
-def is_uuid(text):
- return UUID_PATTERN.search(text) is not None
-
-
-class StoryDetails(object):
-
- def __init__(self):
- self.api = volume.API()
- self.client = volume.Client()
- self.context = context.get_admin_context()
- self.device_path = None
- self.volume_desc = None
- self.volume_id = None
- self.volume_name = None
- self.volume = None
- self.host = socket.gethostname()
- self.original_uuid = None
- self.original_device_info = None
- self.resize_volume_size = 2
-
- def get_volume(self):
- return self.api.get(self.context, self.volume_id)
-
- @property
- def mount_point(self):
- return "%s/%s" % (LOCAL_MOUNT_PATH, self.volume_id)
-
- @property
- def test_mount_file_path(self):
- return "%s/test.txt" % self.mount_point
-
-
-story = None
-storyFail = None
-
-LOCAL_MOUNT_PATH = "/testsmnt"
-
-
-class VolumeTest(unittest.TestCase):
- """This test tells the story of a volume, from cradle to grave."""
-
- def __init__(self, *args, **kwargs):
- unittest.TestCase.__init__(self, *args, **kwargs)
-
- def setUp(self):
- global story, storyFail
- self.story = story
- self.storyFail = storyFail
-
- def assert_volume_as_expected(self, volume):
- self.assertIsInstance(volume["id"], Number)
- self.assertEqual(self.story.volume_name, volume["display_name"])
- self.assertEqual(self.story.volume_desc, volume["display_description"])
- self.assertEqual(1, volume["size"])
- self.assertEqual(self.story.context.user_id, volume["user_id"])
- self.assertEqual(self.story.context.project_id, volume["project_id"])
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[initialize.start_volume])
-class SetUp(VolumeTest):
-
- def test_05_create_story(self):
- """Creating 'story' vars used by the rest of these tests."""
- global story, storyFail
- story = StoryDetails()
- storyFail = StoryDetails()
-
- @time_out(60)
- def test_10_wait_for_topics(self):
- """Wait until the volume topic is up before proceeding."""
- topics = ["volume"]
- from tests.util.topics import hosts_up
- while not all(hosts_up(topic) for topic in topics):
- pass
-
- def test_20_refresh_local_folders(self):
- """Delete the local folders used as mount locations if they exist."""
- if os.path.exists(LOCAL_MOUNT_PATH):
- #TODO(rnirmal): Also need to remove any existing mounts.
- shutil.rmtree(LOCAL_MOUNT_PATH)
- os.mkdir(LOCAL_MOUNT_PATH)
- # Give some time for the services to startup
- time.sleep(10)
-
- @time_out(60)
- def test_30_mgmt_volume_check(self):
- """Get the volume information from the mgmt API"""
- story_context = self.story.context
- device_info = self.story.api.get_storage_device_info(story_context)
- print("device_info : %r" % device_info)
- self.assertNotEqual(device_info, None,
- "the storage device information should exist")
- self.story.original_device_info = device_info
-
- @time_out(60)
- def test_31_mgmt_volume_info(self):
- """Check the available space against the mgmt API info."""
- story_context = self.story.context
- device_info = self.story.api.get_storage_device_info(story_context)
- print("device_info : %r" % device_info)
- info = {'spaceTotal': device_info['raw_total'],
- 'spaceAvail': device_info['raw_avail']}
- self._assert_available_space(info)
-
- def _assert_available_space(self, device_info, fail=False):
- """
- Give the SAN device_info(fake or not) and get the asserts for free
- """
- print("DEVICE_INFO on SAN : %r" % device_info)
- # Calculate the GBs; Divide by 2 for the FLAGS.san_network_raid_factor
- gbs = 1.0 / 1024 / 1024 / 1024 / 2
- total = int(device_info['spaceTotal']) * gbs
- free = int(device_info['spaceAvail']) * gbs
- used = total - free
- usable = total * (FLAGS.san_max_provision_percent * 0.01)
- real_free = float(int(usable - used))
-
- print("total : %r" % total)
- print("free : %r" % free)
- print("used : %r" % used)
- print("usable : %r" % usable)
- print("real_free : %r" % real_free)
-
- check_space = self.story.api.check_for_available_space
- self.assertFalse(check_space(self.story.context, HUGE_VOLUME))
- self.assertFalse(check_space(self.story.context, real_free + 1))
-
- if fail:
- self.assertFalse(check_space(self.story.context, real_free))
- self.assertFalse(check_space(self.story.context, real_free - 1))
- self.assertFalse(check_space(self.story.context, 1))
- else:
- self.assertTrue(check_space(self.story.context, real_free))
- self.assertTrue(check_space(self.story.context, real_free - 1))
- self.assertTrue(check_space(self.story.context, 1))
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
-class AddVolumeFailure(VolumeTest):
-
- @time_out(60)
- def test_add(self):
- """
- Make call to FAIL a prov. volume and assert the return value is a
- FAILURE.
- """
- self.assertIsNone(self.storyFail.volume_id)
- name = "TestVolume"
- desc = "A volume that was created for testing."
- self.storyFail.volume_name = name
- self.storyFail.volume_desc = desc
- volume = self.storyFail.api.create(self.storyFail.context,
- size=HUGE_VOLUME,
- snapshot_id=None, name=name,
- description=desc)
- self.assertEqual(HUGE_VOLUME, volume["size"])
- self.assertEqual("creating", volume["status"])
- self.assertEqual("detached", volume["attach_status"])
- self.storyFail.volume = volume
- self.storyFail.volume_id = volume["id"]
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolumeFailure])
-class AfterVolumeFailureIsAdded(VolumeTest):
- """Check that the volume can be retrieved via the API, and setup.
-
- All we want to see returned is a list-like with an initial string.
-
- """
-
- @time_out(120)
- def test_api_get(self):
- """Wait until the volume is a FAILURE."""
- volume = poll_until(lambda: self.storyFail.get_volume(),
- lambda volume: volume["status"] != "creating")
- self.assertEqual("error", volume["status"])
- self.assertEqual("detached", volume["attach_status"])
-
- @time_out(60)
- def test_mgmt_volume_check(self):
- """Get the volume information from the mgmt API"""
- info = self.story.api.get_storage_device_info(self.story.context)
- print("device_info : %r" % info)
- self.assertNotEqual(info, None,
- "the storage device information should exist")
- self.assertEqual(self.story.original_device_info['raw_total'],
- info['raw_total'])
- self.assertEqual(self.story.original_device_info['raw_avail'],
- info['raw_avail'])
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetUp])
-class AddVolume(VolumeTest):
-
- @time_out(60)
- def test_add(self):
- """Make call to prov. a volume and assert the return value is OK."""
- self.assertIsNone(self.story.volume_id)
- name = "TestVolume"
- desc = "A volume that was created for testing."
- self.story.volume_name = name
- self.story.volume_desc = desc
- volume = self.story.api.create(self.story.context, size=1,
- snapshot_id=None, name=name,
- description=desc)
- self.assert_volume_as_expected(volume)
- self.assertEqual("creating", volume["status"])
- self.assertEqual("detached", volume["attach_status"])
- self.story.volume = volume
- self.story.volume_id = volume["id"]
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AddVolume])
-class AfterVolumeIsAdded(VolumeTest):
- """Check that the volume can be retrieved via the API, and setup.
-
- All we want to see returned is a list-like with an initial string.
-
- """
-
- @time_out(120)
- def test_api_get(self):
- """Wait until the volume is finished provisioning."""
- volume = poll_until(lambda: self.story.get_volume(),
- lambda volume: volume["status"] != "creating")
- self.assertEqual("available", volume["status"])
- self.assert_volume_as_expected(volume)
- self.assertEqual("detached", volume["attach_status"])
-
- @time_out(60)
- def test_mgmt_volume_check(self):
- """Get the volume information from the mgmt API"""
- print("self.story.original_device_info : %r" %
- self.story.original_device_info)
- info = self.story.api.get_storage_device_info(self.story.context)
- print("device_info : %r" % info)
- self.assertNotEqual(info, None,
- "the storage device information should exist")
- self.assertEqual(self.story.original_device_info['raw_total'],
- info['raw_total'])
- volume_size = int(self.story.volume['size']) * (1024 ** 3) * 2
- print("volume_size: %r" % volume_size)
- print("self.story.volume['size']: %r" % self.story.volume['size'])
- avail = int(self.story.original_device_info['raw_avail']) - volume_size
- print("avail space: %r" % avail)
- self.assertEqual(int(info['raw_avail']), avail)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[AfterVolumeIsAdded])
-class SetupVolume(VolumeTest):
-
- @time_out(60)
- def test_assign_volume(self):
- """Tell the volume it belongs to this host node."""
- #TODO(tim.simpson) If this is important, could we add a test to
- # make sure some kind of exception is thrown if it
- # isn't added to certain drivers?
- self.assertNotEqual(None, self.story.volume_id)
- self.story.api.assign_to_compute(self.story.context,
- self.story.volume_id,
- self.story.host)
-
- @time_out(60)
- def test_setup_volume(self):
- """Set up the volume on this host. AKA discovery."""
- self.assertNotEqual(None, self.story.volume_id)
- device = self.story.client._setup_volume(self.story.context,
- self.story.volume_id,
- self.story.host)
- if not isinstance(device, six.string_types):
- self.fail("Expected device to be a string, but instead it was " +
- str(type(device)) + ".")
- self.story.device_path = device
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[SetupVolume])
-class FormatVolume(VolumeTest):
-
- @expect_exception(IOError)
- @time_out(60)
- def test_10_should_raise_IOError_if_format_fails(self):
- """
-
- Tests that if the driver's _format method fails, its
- public format method will perform an assertion properly, discover
- it failed, and raise an exception.
-
- """
-
- volume_driver_cls = import_class(FLAGS.volume_driver)
-
- class BadFormatter(volume_driver_cls):
-
- def _format(self, device_path):
- pass
-
- bad_client = volume.Client(volume_driver=BadFormatter())
- bad_client._format(self.story.device_path)
-
- @time_out(60)
- def test_20_format(self):
- self.assertNotEqual(None, self.story.device_path)
- self.story.client._format(self.story.device_path)
-
- def test_30_check_options(self):
- cmd = ("sudo dumpe2fs -h %s 2> /dev/null | "
- "awk -F ':' '{ if($1 == \"Reserved block count\") "
- "{ rescnt=$2 } } { if($1 == \"Block count\") "
- "{ blkcnt=$2 } } END { print (rescnt/blkcnt)*100 }'")
- cmd = cmd % self.story.device_path
- out, err = process(cmd)
- self.assertEqual(float(5), round(float(out)), msg=out)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[FormatVolume])
-class MountVolume(VolumeTest):
-
- @time_out(60)
- def test_mount(self):
- self.story.client._mount(self.story.device_path,
- self.story.mount_point)
- with open(self.story.test_mount_file_path, 'w') as file:
- file.write("Yep, it's mounted alright.")
- self.assertTrue(os.path.exists(self.story.test_mount_file_path))
-
- def test_mount_options(self):
- cmd = "mount -l | awk '/%s.*noatime/ { print $1 }'"
- cmd %= LOCAL_MOUNT_PATH.replace('/', '')
- out, err = process(cmd)
- self.assertEqual(os.path.realpath(self.story.device_path), out.strip(),
- msg=out)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
-class ResizeVolume(VolumeTest):
-
- @time_out(300)
- def test_resize(self):
- self.story.api.resize(self.story.context, self.story.volume_id,
- self.story.resize_volume_size)
-
- volume = poll_until(lambda: self.story.get_volume(),
- lambda volume: volume["status"] == "resized")
- self.assertEqual("resized", volume["status"])
- self.assertEqual("attached", volume["attach_status"])
- self.assertEqual(self.story.resize_volume_size, volume['size'])
-
- @time_out(300)
- def test_resizefs_rescan(self):
- self.story.client.resize_fs(self.story.context,
- self.story.volume_id)
- expected = "trove.tests.volume.driver.ISCSITestDriver"
- if FLAGS.volume_driver is expected:
- size = self.story.resize_volume_size * \
- test_driver.TESTS_VOLUME_SIZE_MULTIPLIER * 1024 * 1024
- else:
- size = self.story.resize_volume_size * 1024 * 1024
- out, err = process('sudo blockdev --getsize64 %s' %
- os.path.realpath(self.story.device_path))
- if int(out) < (size * 0.8):
- self.fail("Size %s is not more or less %s" % (out, size))
-
- # Reset the volume status to available
- self.story.api.update(self.story.context, self.story.volume_id,
- {'status': 'available'})
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[MountVolume])
-class UnmountVolume(VolumeTest):
-
- @time_out(60)
- def test_unmount(self):
- self.story.client._unmount(self.story.mount_point)
- child = pexpect.spawn("sudo mount %s" % self.story.mount_point)
- child.expect("mount: can't find %s in" % self.story.mount_point)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[UnmountVolume])
-class GrabUuid(VolumeTest):
-
- @time_out(60)
- def test_uuid_must_match_pattern(self):
- """UUID must be hex chars in the form 8-4-4-4-12."""
- client = self.story.client # volume.Client()
- device_path = self.story.device_path # '/dev/sda5'
- uuid = client.get_uuid(device_path)
- self.story.original_uuid = uuid
- self.assertTrue(is_uuid(uuid), "uuid must match regex")
-
- @time_out(60)
- def test_get_invalid_uuid(self):
- """DevicePathInvalidForUuid is raised if device_path is wrong."""
- client = self.story.client
- device_path = "gdfjghsfjkhggrsyiyerreygghdsghsdfjhf"
- self.assertRaises(trove_exception.DevicePathInvalidForUuid,
- client.get_uuid, device_path)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
-class RemoveVolume(VolumeTest):
-
- @time_out(60)
- def test_remove(self):
- self.story.client.remove_volume(self.story.context,
- self.story.volume_id,
- self.story.host)
- self.assertRaises(Exception,
- self.story.client._format, self.story.device_path)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[GrabUuid])
-class Initialize(VolumeTest):
-
- @time_out(300)
- def test_10_initialize_will_format(self):
- """initialize will setup, format, and store the UUID of a volume"""
- self.assertTrue(self.story.get_volume()['uuid'] is None)
- self.story.client.initialize(self.story.context, self.story.volume_id,
- self.story.host)
- volume = self.story.get_volume()
- self.assertTrue(is_uuid(volume['uuid']), "uuid must match regex")
- self.assertNotEqual(self.story.original_uuid, volume['uuid'],
- "Validate our assumption that the volume UUID "
- "will change when the volume is formatted.")
- self.story.client.remove_volume(self.story.context,
- self.story.volume_id,
- self.story.host)
-
- @time_out(60)
- def test_20_initialize_the_second_time_will_not_format(self):
- """If initialize is called but a UUID exists, it should not format."""
- old_uuid = self.story.get_volume()['uuid']
- self.assertTrue(old_uuid is not None)
-
- class VolumeClientNoFmt(volume.Client):
-
- def _format(self, device_path):
- raise RuntimeError("_format should not be called!")
-
- no_fmt_client = VolumeClientNoFmt()
- no_fmt_client.initialize(self.story.context, self.story.volume_id,
- self.story.host)
- self.assertEqual(old_uuid, self.story.get_volume()['uuid'],
- "UUID should be the same as no formatting occurred.")
- self.story.client.remove_volume(self.story.context,
- self.story.volume_id,
- self.story.host)
-
- def test_30_check_device_exists(self):
- assert_raises(exception.InvalidDevicePath, self.story.client._format,
- self.story.device_path)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[Initialize])
-class DeleteVolume(VolumeTest):
-
- @time_out(60)
- def test_delete(self):
- self.story.api.delete(self.story.context, self.story.volume_id)
-
-
-@test(groups=[VOLUMES_DRIVER], depends_on_classes=[DeleteVolume])
-class ConfirmMissing(VolumeTest):
-
- @time_out(60)
- def test_discover_should_fail(self):
- try:
- self.story.client.driver.discover_volume(self.story.context,
- self.story.volume)
- self.fail("Expecting an error but did not get one.")
- except exception.Error:
- pass
- except trove_exception.ISCSITargetNotDiscoverable:
- pass
-
- @time_out(60)
- def test_get_missing_volume(self):
- try:
- volume = poll_until(lambda: self.story.api.get(self.story.context,
- self.story.volume_id),
- lambda volume: volume["status"] != "deleted")
- self.assertEqual(volume["deleted"], False)
- except exception.VolumeNotFound:
- pass
diff --git a/integration/tests/integration/tox.ini b/integration/tests/integration/tox.ini
deleted file mode 100644
index 81051493..00000000
--- a/integration/tests/integration/tox.ini
+++ /dev/null
@@ -1,28 +0,0 @@
-# Examples:
-# Run tests against Trove running locally in fake mode:
-# TROVE_CLIENT_PATH=../some_path tox -e local -- --group=blackbox
-[tox]
-envlist = py26
-
-[testenv]
-deps =
- coverage
- nose
- pexpect
- proboscis
- sqlalchemy
- {env:TROVE_PATH}
- {env:TROVE_CLIENT_PATH}
-
-[testenv:py26]
-
-[testenv:local]
-deps =
- nose
- pexpect
- proboscis
- sqlalchemy
- {env:TROVE_PATH}
- {env:TROVE_CLIENT_PATH}
-commands =
- {envpython} int_tests.py --conf=localhost.test.conf {posargs:DEFAULTS}
diff --git a/roles/trove-devstack/defaults/main.yml b/roles/trove-devstack/defaults/main.yml
index e525b714..5280e125 100644
--- a/roles/trove-devstack/defaults/main.yml
+++ b/roles/trove-devstack/defaults/main.yml
@@ -1,4 +1,5 @@
devstack_base_dir: /opt/stack
trove_test_datastore: 'mysql'
-trove_test_group: ''
+trove_test_group: 'mysql'
+trove_test_datastore_version: '5.7'
trove_resize_time_out: ''
diff --git a/roles/trove-devstack/tasks/main.yml b/roles/trove-devstack/tasks/main.yml
index 3a1dc575..1dbbbbce 100644
--- a/roles/trove-devstack/tasks/main.yml
+++ b/roles/trove-devstack/tasks/main.yml
@@ -8,4 +8,4 @@
export TROVE_RESIZE_TIME_OUT={{trove_resize_time_out}}
cd $DEST/trove
- tox -etrovestack -vv -- gate-tests {{trove_test_datastore}} {{trove_test_group}}
+ tox -etrovestack -vv -- gate-tests {{trove_test_datastore}} {{trove_test_group}} {{trove_test_datastore_version}}
diff --git a/run_tests.py b/run_tests.py
index 8c0dd95f..b7219ddb 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -206,20 +206,14 @@ def import_tests():
from trove.tests.api import configurations # noqa
from trove.tests.api import databases # noqa
from trove.tests.api import datastores # noqa
- from trove.tests.api import header # noqa
from trove.tests.api import instances as rd_instances # noqa
from trove.tests.api import instances_actions as rd_actions # noqa
from trove.tests.api import instances_delete # noqa
- from trove.tests.api import instances_mysql_down # noqa
from trove.tests.api import instances_resize # noqa
from trove.tests.api import limits # noqa
- from trove.tests.api.mgmt import admin_required # noqa
- from trove.tests.api.mgmt import instances as mgmt_instances # noqa
from trove.tests.api.mgmt import instances_actions as mgmt_actions # noqa
- from trove.tests.api.mgmt import malformed_json # noqa
from trove.tests.api import replication # noqa
from trove.tests.api import root # noqa
- from trove.tests.api import root_on_create # noqa
from trove.tests.api import user_access # noqa
from trove.tests.api import users # noqa
from trove.tests.api import versions # noqa
diff --git a/trove/cmd/guest.py b/trove/cmd/guest.py
index d45ebf0d..6e12db57 100644
--- a/trove/cmd/guest.py
+++ b/trove/cmd/guest.py
@@ -50,9 +50,6 @@ def main():
"was not injected into the guest or not read by guestagent"))
raise RuntimeError(msg)
- # BUG(1650518): Cleanup in the Pike release
- # make it fatal if CONF.instance_rpc_encr_key is None
-
# rpc module must be loaded after decision about thread monkeypatching
# because if thread module is not monkeypatched we can't use eventlet
# executor from oslo_messaging library.
diff --git a/trove/common/cfg.py b/trove/common/cfg.py
index 37027bd9..f2e3eae3 100644
--- a/trove/common/cfg.py
+++ b/trove/common/cfg.py
@@ -175,7 +175,7 @@ common_opts = [
help="Maximum time (in seconds) to wait for Guest Agent "
"'quick' requests (such as retrieving a list of "
"users or databases)."),
- cfg.IntOpt('agent_call_high_timeout', default=60 * 10,
+ cfg.IntOpt('agent_call_high_timeout', default=60 * 5,
help="Maximum time (in seconds) to wait for Guest Agent 'slow' "
"requests (such as restarting the database)."),
cfg.IntOpt('agent_replication_snapshot_timeout', default=36000,
diff --git a/trove/common/utils.py b/trove/common/utils.py
index c6a730ac..a5e1a1d4 100644
--- a/trove/common/utils.py
+++ b/trove/common/utils.py
@@ -196,7 +196,7 @@ def build_polling_task(retriever, condition=lambda value: value,
raise loopingcall.LoopingCallDone(retvalue=obj)
call = loopingcall.BackOffLoopingCall(f=poll_and_check)
- return call.start(initial_delay=False, starting_interval=sleep_time,
+ return call.start(initial_delay=0, starting_interval=sleep_time,
max_interval=30, timeout=time_out)
@@ -209,7 +209,7 @@ def wait_for_task(polling_task):
def poll_until(retriever, condition=lambda value: value,
- sleep_time=1, time_out=0):
+ sleep_time=3, time_out=0):
"""Retrieves object until it passes condition, then returns it.
If time_out_limit is passed in, PollTimeOut will be raised once that
diff --git a/trove/guestagent/datastore/mysql_common/manager.py b/trove/guestagent/datastore/mysql_common/manager.py
index e4e27c03..6d61c5dd 100644
--- a/trove/guestagent/datastore/mysql_common/manager.py
+++ b/trove/guestagent/datastore/mysql_common/manager.py
@@ -431,7 +431,7 @@ class MySqlManager(manager.Manager):
}))
def attach_replica(self, context, replica_info, slave_config):
- LOG.debug("Attaching replica.")
+ LOG.info("Attaching replica.")
app = self.mysql_app(self.mysql_app_status.get())
try:
if 'replication_strategy' in replica_info:
diff --git a/trove/guestagent/datastore/mysql_common/service.py b/trove/guestagent/datastore/mysql_common/service.py
index de5f1a2b..c562775e 100644
--- a/trove/guestagent/datastore/mysql_common/service.py
+++ b/trove/guestagent/datastore/mysql_common/service.py
@@ -50,7 +50,8 @@ from trove.guestagent.datastore import service
from trove.guestagent import pkg
ADMIN_USER_NAME = "os_admin"
-CONNECTION_STR_FORMAT = "mysql+pymysql://%s:%s@127.0.0.1:3306"
+CONNECTION_STR_FORMAT = ("mysql+pymysql://%s:%s@localhost/?"
+ "unix_socket=/var/run/mysqld/mysqld.sock")
LOG = logging.getLogger(__name__)
FLUSH = text(sql_query.FLUSH)
ENGINE = None
@@ -157,26 +158,14 @@ class BaseMySqlAppStatus(service.BaseDbStatus):
The checks which don't need service app can be put here.
"""
try:
- utils.execute_with_timeout(
- "/usr/bin/mysqladmin",
- "ping", run_as_root=True, root_helper="sudo",
- log_output_on_error=True)
-
- LOG.debug("Database service check: mysqld is alive")
- return rd_instance.ServiceStatuses.RUNNING
- except exception.ProcessExecutionError:
- LOG.warning("Database service check: Failed to get database "
- "service status by mysqladmin, fall back to use ps.")
-
- try:
out, _ = utils.execute_with_timeout(
"/bin/ps", "-C", "mysqld", "h",
log_output_on_error=True
)
pid = out.split()[0]
- LOG.debug('Database service check: service PID exists', pid)
- return rd_instance.ServiceStatuses.BLOCKED
+ LOG.debug('Database service check: service PID exists: %s', pid)
+ return rd_instance.ServiceStatuses.RUNNING
except exception.ProcessExecutionError:
LOG.warning("Database service check: Failed to get database "
"service status by ps, fall back to check PID file.")
@@ -629,20 +618,28 @@ class BaseMySqlApp(object):
override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT))
def get_engine(self):
- """Create the default engine with the updated admin user."""
- # TODO(rnirmal):Based on permission issues being resolved we may revert
- # url = URL(drivername='mysql', host='localhost',
- # query={'read_default_file': '/etc/mysql/my.cnf'})
+ """Create the default engine with the updated admin user.
+
+ If admin user not created yet, use root instead.
+ """
global ENGINE
if ENGINE:
return ENGINE
- pwd = self.get_auth_password()
+ user = ADMIN_USER_NAME
+ password = ""
+ try:
+ password = self.get_auth_password()
+ except exception.UnprocessableEntity:
+ # os_admin user not created yet
+ user = 'root'
+
ENGINE = sqlalchemy.create_engine(
- CONNECTION_STR_FORMAT % (ADMIN_USER_NAME,
- urllib.parse.quote(pwd.strip())),
+ CONNECTION_STR_FORMAT % (user,
+ urllib.parse.quote(password.strip())),
pool_recycle=120, echo=CONF.sql_query_logging,
listeners=[self.keep_alive_connection_cls()])
+
return ENGINE
@classmethod
@@ -678,7 +675,7 @@ class BaseMySqlApp(object):
with all privileges similar to the root user.
"""
LOG.debug("Creating Trove admin user '%s'.", ADMIN_USER_NAME)
- host = "127.0.0.1"
+ host = "localhost"
try:
cu = sql_query.CreateUser(ADMIN_USER_NAME, host=host,
clear=password)
@@ -742,6 +739,9 @@ class BaseMySqlApp(object):
LOG.debug("Generating admin password.")
admin_password = utils.generate_random_password()
+
+ # By default, MySQL does not require a password at all for connecting
+ # as root
engine = sqlalchemy.create_engine(
CONNECTION_STR_FORMAT % ('root', ''), echo=True)
with self.local_sql_client(engine, use_flush=False) as client:
@@ -771,9 +771,11 @@ class BaseMySqlApp(object):
self.wipe_ib_logfiles()
def _save_authentication_properties(self, admin_password):
+ # Use localhost to connect with mysql using unix socket instead of ip
+ # and port.
client_sect = {'client': {'user': ADMIN_USER_NAME,
'password': admin_password,
- 'host': '127.0.0.1'}}
+ 'host': 'localhost'}}
operating_system.write_file(self.get_client_auth_file(),
client_sect, codec=self.CFG_CODEC)
diff --git a/trove/guestagent/strategies/backup/experimental/mariadb_impl.py b/trove/guestagent/strategies/backup/experimental/mariadb_impl.py
index 2ff310ca..b3644aeb 100644
--- a/trove/guestagent/strategies/backup/experimental/mariadb_impl.py
+++ b/trove/guestagent/strategies/backup/experimental/mariadb_impl.py
@@ -30,9 +30,11 @@ class MariaBackup(base.BackupRunner):
@property
def user_and_pass(self):
- return ('--user=%(user)s --password=%(password)s --host=127.0.0.1' %
+ return ('--user=%(user)s --password=%(password)s --host=localhost '
+ '--socket=%(socket_file)s' %
{'user': common_service.ADMIN_USER_NAME,
- 'password': mysql_service.MySqlApp.get_auth_password()})
+ 'password': mysql_service.MySqlApp.get_auth_password(),
+ 'socket_file': '/var/run/mysqld/mysqld.sock'})
@property
def cmd(self):
diff --git a/trove/guestagent/strategies/backup/mysql_impl.py b/trove/guestagent/strategies/backup/mysql_impl.py
index 8d5b844a..25bd5c52 100644
--- a/trove/guestagent/strategies/backup/mysql_impl.py
+++ b/trove/guestagent/strategies/backup/mysql_impl.py
@@ -67,16 +67,18 @@ class InnoBackupEx(base.BackupRunner):
@property
def user_and_pass(self):
- return (' --user=%(user)s --password=%(password)s --host=127.0.0.1 ' %
+ return ('--user=%(user)s --password=%(password)s --host=localhost '
+ '--socket=%(socket_file)s' %
{'user': ADMIN_USER_NAME,
- 'password': MySqlApp.get_auth_password()})
+ 'password': MySqlApp.get_auth_password(),
+ 'socket_file': '/var/run/mysqld/mysqld.sock'})
@property
def cmd(self):
cmd = ('sudo innobackupex'
' --stream=xbstream'
' %(extra_opts)s ' +
- self.user_and_pass +
+ self.user_and_pass + ' ' +
MySqlApp.get_data_dir() +
' 2>/tmp/innobackupex.log'
)
@@ -134,7 +136,7 @@ class InnoBackupExIncremental(InnoBackupEx):
' --incremental'
' --incremental-lsn=%(lsn)s'
' %(extra_opts)s ' +
- self.user_and_pass +
+ self.user_and_pass + ' ' +
MySqlApp.get_data_dir() +
' 2>/tmp/innobackupex.log')
return cmd + self.zip_cmd + self.encrypt_cmd
diff --git a/trove/taskmanager/models.py b/trove/taskmanager/models.py
index db4997a3..16ffb178 100755
--- a/trove/taskmanager/models.py
+++ b/trove/taskmanager/models.py
@@ -478,9 +478,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
access=None):
"""Prepare the networks for the trove instance.
- the params are all passed from trove-taskmanager.
-
- Exception is raised if any error happens.
+ 'nics' contains the networks that management network always comes at
+ last.
"""
LOG.info("Preparing networks for the instance %s", self.id)
security_group = None
@@ -1243,34 +1242,8 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
# Also we check guest state before issuing reboot
LOG.debug(str(e))
- # Wait for the mysql stopped.
- def _datastore_is_offline():
- self._refresh_datastore_status()
- return (
- self.datastore_status_matches(
- rd_instance.ServiceStatuses.SHUTDOWN) or
- self.datastore_status_matches(
- rd_instance.ServiceStatuses.CRASHED)
- )
-
- try:
- utils.poll_until(
- _datastore_is_offline,
- sleep_time=3,
- time_out=CONF.reboot_time_out
- )
- except exception.PollTimeOut:
- LOG.error("Cannot reboot instance, DB status is %s",
- self.datastore_status.status)
- return
-
- LOG.debug("The guest service status is %s.",
- self.datastore_status.status)
-
LOG.info("Rebooting instance %s.", self.id)
self.server.reboot()
- # Poll nova until instance is active
- reboot_time_out = CONF.reboot_time_out
def update_server_info():
self.refresh_compute_server_info()
@@ -1279,7 +1252,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
utils.poll_until(
update_server_info,
sleep_time=3,
- time_out=reboot_time_out)
+ time_out=CONF.reboot_time_out)
# Set the status to PAUSED. The guest agent will reset the status
# when the reboot completes and MySQL is running.
diff --git a/trove/tests/__init__.py b/trove/tests/__init__.py
index 25302464..5f3d5fea 100644
--- a/trove/tests/__init__.py
+++ b/trove/tests/__init__.py
@@ -14,10 +14,21 @@
import os
+# Groups
DBAAS_API = "dbaas.api"
-PRE_INSTANCES = "dbaas.api.pre_instances"
-INSTANCES = "dbaas.api.instances"
-POST_INSTANCES = "dbaas.api.post_instances"
+DBAAS_API_INSTANCES = "dbaas.api.instances"
+DBAAS_API_INSTANCES_DELETE = "dbaas.api.instances.delete"
+DBAAS_API_USERS = "dbaas.api.users"
+DBAAS_API_USERS_ACCESS = "dbaas.api.users.access"
+DBAAS_API_USERS_ROOT = "dbaas.api.users.root"
+DBAAS_API_DATABASES = "dbaas.api.databases"
+DBAAS_API_VERSIONS = "dbaas.api.versions"
+DBAAS_API_DATASTORES = "dbaas.api.datastores"
+DBAAS_API_MGMT_DATASTORES = "dbaas.api.mgmt.datastores"
+DBAAS_API_INSTANCE_ACTIONS = "dbaas.api.instances.actions"
+DBAAS_API_BACKUPS = "dbaas.api.backups"
+DBAAS_API_CONFIGURATIONS = "dbaas.api.configurations"
+DBAAS_API_REPLICATION = "dbaas.api.replication"
# Use '-t' to avoid the warning message 'mesg: ttyname failed: Inappropriate
# ioctl for device'
diff --git a/trove/tests/api/backups.py b/trove/tests/api/backups.py
index c00280cb..caa39331 100644
--- a/trove/tests/api/backups.py
+++ b/trove/tests/api/backups.py
@@ -31,13 +31,11 @@ from trove.tests.api.instances import instance_info
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
from trove.tests.api.instances import WaitForGuestInstallationToFinish
-from trove.tests.api import instances_actions
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
-BACKUP_GROUP = "dbaas.api.backups"
BACKUP_NAME = 'backup_test'
BACKUP_DESC = 'test description'
@@ -53,8 +51,8 @@ backup_count_prior_to_create = 0
backup_count_for_instance_prior_to_create = 0
-@test(depends_on_groups=[instances_actions.GROUP_RESIZE],
- groups=[BACKUP_GROUP, tests.INSTANCES],
+@test(depends_on_groups=[tests.DBAAS_API_INSTANCE_ACTIONS],
+ groups=[tests.DBAAS_API_BACKUPS],
enabled=CONFIG.swift_enabled)
class CreateBackups(object):
@@ -145,7 +143,7 @@ class BackupRestoreMixin(object):
@test(depends_on_classes=[CreateBackups],
- groups=[BACKUP_GROUP, tests.INSTANCES],
+ groups=[tests.DBAAS_API_BACKUPS],
enabled=CONFIG.swift_enabled)
class WaitForBackupCreateToFinish(BackupRestoreMixin):
"""Wait until the backup creation is finished."""
@@ -153,12 +151,12 @@ class WaitForBackupCreateToFinish(BackupRestoreMixin):
@test
@time_out(TIMEOUT_BACKUP_CREATE)
def test_backup_created(self):
- # This version just checks the REST API status.
+ """Wait for the backup to be finished."""
self.verify_backup(backup_info.id)
@test(depends_on=[WaitForBackupCreateToFinish],
- groups=[BACKUP_GROUP, tests.INSTANCES],
+ groups=[tests.DBAAS_API_BACKUPS],
enabled=CONFIG.swift_enabled)
class ListBackups(object):
@@ -191,7 +189,7 @@ class ListBackups(object):
def test_backup_list_filter_different_datastore(self):
"""Test list backups and filter by datastore."""
result = instance_info.dbaas.backups.list(
- datastore='Test_Datastore_1')
+ datastore=CONFIG.dbaas_datastore_name_no_versions)
# There should not be any backups for this datastore
assert_equal(0, len(result))
@@ -247,7 +245,7 @@ class ListBackups(object):
@test(runs_after=[ListBackups],
depends_on=[WaitForBackupCreateToFinish],
- groups=[BACKUP_GROUP, tests.INSTANCES],
+ groups=[tests.DBAAS_API_BACKUPS],
enabled=CONFIG.swift_enabled)
class IncrementalBackups(BackupRestoreMixin):
@@ -275,9 +273,10 @@ class IncrementalBackups(BackupRestoreMixin):
assert_equal(backup_info.id, incremental_info.parent_id)
-@test(groups=[BACKUP_GROUP, tests.INSTANCES], enabled=CONFIG.swift_enabled)
+@test(groups=[tests.DBAAS_API_BACKUPS],
+ depends_on_classes=[IncrementalBackups],
+ enabled=CONFIG.swift_enabled)
class RestoreUsingBackup(object):
-
@classmethod
def _restore(cls, backup_ref):
restorePoint = {"backupRef": backup_ref}
@@ -295,15 +294,15 @@ class RestoreUsingBackup(object):
@test(depends_on=[IncrementalBackups])
def test_restore_incremental(self):
+ """Restore from incremental backup."""
global incremental_restore_instance_id
incremental_restore_instance_id = self._restore(incremental_info.id)
@test(depends_on_classes=[RestoreUsingBackup],
- groups=[BACKUP_GROUP, tests.INSTANCES],
+ groups=[tests.DBAAS_API_BACKUPS],
enabled=CONFIG.swift_enabled)
class WaitForRestoreToFinish(object):
-
@classmethod
def _poll(cls, instance_id_to_poll):
"""Shared "instance restored" test logic."""
@@ -324,7 +323,7 @@ class WaitForRestoreToFinish(object):
poll_until(result_is_active, time_out=TIMEOUT_INSTANCE_CREATE,
sleep_time=10)
- @test(depends_on=[RestoreUsingBackup.test_restore_incremental])
+ @test
def test_instance_restored_incremental(self):
try:
self._poll(incremental_restore_instance_id)
@@ -333,8 +332,8 @@ class WaitForRestoreToFinish(object):
@test(enabled=(not CONFIG.fake_mode and CONFIG.swift_enabled),
- depends_on=[WaitForRestoreToFinish],
- groups=[BACKUP_GROUP, tests.INSTANCES])
+ depends_on_classes=[WaitForRestoreToFinish],
+ groups=[tests.DBAAS_API_BACKUPS])
class VerifyRestore(object):
@classmethod
@@ -348,8 +347,7 @@ class VerifyRestore(object):
poll_until(db_is_found, time_out=60 * 10, sleep_time=10)
- @test(depends_on=[WaitForRestoreToFinish.
- test_instance_restored_incremental])
+ @test
def test_database_restored_incremental(self):
try:
self._poll(incremental_restore_instance_id, incremental_db)
@@ -359,8 +357,8 @@ class VerifyRestore(object):
fail('Timed out')
-@test(groups=[BACKUP_GROUP, tests.INSTANCES], enabled=CONFIG.swift_enabled,
- depends_on=[VerifyRestore])
+@test(groups=[tests.DBAAS_API_BACKUPS], enabled=CONFIG.swift_enabled,
+ depends_on_classes=[VerifyRestore])
class DeleteRestoreInstance(object):
@classmethod
@@ -380,7 +378,7 @@ class DeleteRestoreInstance(object):
assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
instance_id)
- @test(depends_on=[VerifyRestore.test_database_restored_incremental])
+ @test
def test_delete_restored_instance_incremental(self):
try:
self._delete(incremental_restore_instance_id)
@@ -388,11 +386,10 @@ class DeleteRestoreInstance(object):
fail('Timed out')
-@test(depends_on=[DeleteRestoreInstance],
- groups=[BACKUP_GROUP, tests.INSTANCES],
+@test(depends_on_classes=[DeleteRestoreInstance],
+ groups=[tests.DBAAS_API_BACKUPS],
enabled=CONFIG.swift_enabled)
class DeleteBackups(object):
-
@test
def test_backup_delete_not_found(self):
"""Test delete unknown backup."""
diff --git a/trove/tests/api/configurations.py b/trove/tests/api/configurations.py
index 5044911f..78352139 100644
--- a/trove/tests/api/configurations.py
+++ b/trove/tests/api/configurations.py
@@ -35,7 +35,6 @@ from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove import tests
-from trove.tests.api import backups
from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import InstanceTestInfo
@@ -49,10 +48,6 @@ from trove.tests.util import create_dbaas_client
from trove.tests.util.mysql import create_mysql_connection
from trove.tests.util.users import Requirements
-
-CONFIGURATION_GROUP = "dbaas.api.configurations"
-GROUP_CONFIG_DEFINE = "dbaas.api.configurations.define"
-CONFIG_NEW_INSTANCE_GROUP = "dbaas.api.configurations.newinstance"
CONFIG_NAME = "test_configuration"
CONFIG_DESC = "configuration description"
@@ -180,8 +175,8 @@ class ConfigurationsTestBase(object):
return datastore_test_configs.get("configurations", {})
-@test(depends_on_groups=[backups.BACKUP_GROUP],
- groups=[CONFIGURATION_GROUP, GROUP_CONFIG_DEFINE, tests.INSTANCES])
+@test(depends_on_groups=[tests.DBAAS_API_BACKUPS],
+ groups=[tests.DBAAS_API_CONFIGURATIONS])
class CreateConfigurations(ConfigurationsTestBase):
@test
@@ -315,13 +310,13 @@ class CreateConfigurations(ConfigurationsTestBase):
assert_equal(resp.status, 200)
-@test(depends_on=[CreateConfigurations],
- groups=[CONFIGURATION_GROUP, GROUP_CONFIG_DEFINE, tests.INSTANCES])
+@test(depends_on_classes=[CreateConfigurations],
+ groups=[tests.DBAAS_API_CONFIGURATIONS])
class AfterConfigurationsCreation(ConfigurationsTestBase):
@test
def test_assign_configuration_to_invalid_instance(self):
- # test assigning to an instance that does not exist
+ """test assigning to an instance that does not exist"""
invalid_id = "invalid-inst-id"
try:
instance_info.dbaas.instances.modify(invalid_id,
@@ -332,7 +327,7 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
@test
def test_assign_configuration_to_valid_instance(self):
- # test assigning a configuration to an instance
+ """test assigning a configuration to an instance"""
print("instance_info.id: %s" % instance_info.id)
print("configuration_info: %s" % configuration_info)
print("configuration_info.id: %s" % configuration_info.id)
@@ -344,8 +339,7 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
@test(depends_on=[test_assign_configuration_to_valid_instance])
def test_assign_configuration_to_instance_with_config(self):
- # test assigning a configuration to an instance that
- # already has an assigned configuration
+ """test assigning a configuration to an instance conflicts"""
config_id = configuration_info.id
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify, instance_info.id,
@@ -354,7 +348,7 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
@test(depends_on=[test_assign_configuration_to_valid_instance])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
- # validate that the configuration was applied correctly to the instance
+ """validate the configuration after attaching"""
print("instance_info.id: %s" % instance_info.id)
inst = instance_info.dbaas.instances.get(instance_info.id)
configuration_id = inst.configuration['id']
@@ -363,8 +357,9 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
_test_configuration_is_applied_to_instance(instance_info,
configuration_id)
+ @test(depends_on=[test_get_configuration_details_from_instance_validation])
def test_configurations_get(self):
- # test that the instance shows up on the assigned configuration
+ """test that the instance shows up on the assigned configuration"""
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
@@ -428,8 +423,8 @@ class AfterConfigurationsCreation(ConfigurationsTestBase):
configuration_info.id)
-@test(depends_on=[AfterConfigurationsCreation],
- groups=[CONFIGURATION_GROUP, GROUP_CONFIG_DEFINE, tests.INSTANCES])
+@test(depends_on_classes=[AfterConfigurationsCreation],
+ groups=[tests.DBAAS_API_CONFIGURATIONS])
class ListConfigurations(ConfigurationsTestBase):
@test
@@ -545,13 +540,13 @@ class ListConfigurations(ConfigurationsTestBase):
assert_equal(list_config[0].updated, details_config.updated)
-@test(depends_on=[ListConfigurations],
- groups=[CONFIGURATION_GROUP, CONFIG_NEW_INSTANCE_GROUP, tests.INSTANCES])
+@test(depends_on_classes=[ListConfigurations],
+ groups=[tests.DBAAS_API_CONFIGURATIONS])
class StartInstanceWithConfiguration(ConfigurationsTestBase):
@test
def test_start_instance_with_configuration(self):
- # test that a new instance will apply the configuration on create
+ """test that a new instance will apply the configuration on create"""
global configuration_instance
databases = []
databases.append({"name": "firstdbconfig", "character_set": "latin2",
@@ -588,13 +583,13 @@ class StartInstanceWithConfiguration(ConfigurationsTestBase):
@test(depends_on_classes=[StartInstanceWithConfiguration],
- groups=[CONFIGURATION_GROUP, CONFIG_NEW_INSTANCE_GROUP, tests.INSTANCES])
+ groups=[tests.DBAAS_API_CONFIGURATIONS])
class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_instance_with_configuration_active(self):
- # wait for the instance to become active
+ """wait for the instance created with configuration"""
def result_is_active():
instance = instance_info.dbaas.instances.get(
@@ -619,7 +614,7 @@ class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
@test(depends_on=[WaitForConfigurationInstanceToFinish],
- groups=[CONFIGURATION_GROUP, tests.INSTANCES])
+ groups=[tests.DBAAS_API_CONFIGURATIONS])
class DeleteConfigurations(ConfigurationsTestBase):
@before_class
@@ -689,7 +684,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
@test(depends_on=[test_unable_delete_instance_configurations])
@time_out(30)
def test_unassign_configuration_from_instances(self):
- # test to unassign configuration from instance
+ """test to unassign configuration from instance"""
instance_info.dbaas.instances.modify(configuration_instance.id,
configuration="")
resp, body = instance_info.dbaas.client.last_response
@@ -725,7 +720,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
@test(depends_on=[test_assign_in_wrong_state])
def test_no_instances_on_configuration(self):
- # test there is no configuration on the instance after unassigning
+ """test_no_instances_on_configuration"""
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
@@ -737,6 +732,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
@test(depends_on=[test_unassign_configuration_from_instances])
@time_out(120)
def test_restart_service_after_unassign_return_active(self):
+ """test_restart_service_after_unassign_return_active"""
def result_is_not_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
@@ -756,7 +752,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
@test(depends_on=[test_restart_service_after_unassign_return_active])
@time_out(120)
def test_restart_service_should_return_active(self):
- # test that after restarting the instance it becomes active
+ """test that after restarting the instance it becomes active"""
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@@ -773,7 +769,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
@test(depends_on=[test_restart_service_should_return_active])
def test_assign_config_and_name_to_instance_using_patch(self):
- # test assigning a configuration and name to an instance
+ """test_assign_config_and_name_to_instance_using_patch"""
new_name = 'new_name'
report = CONFIG.get_report()
report.log("instance_info.id: %s" % instance_info.id)
@@ -858,8 +854,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
@test(runs_after=[test_unassign_configuration_after_patch])
def test_delete_unassigned_configuration(self):
- # test that we can delete the configuration after no instances are
- # assigned to it any longer
+ """test_delete_unassigned_configuration"""
instance_info.dbaas.configurations.delete(configuration_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@@ -867,8 +862,7 @@ class DeleteConfigurations(ConfigurationsTestBase):
@test(depends_on=[test_delete_unassigned_configuration])
@time_out(TIMEOUT_INSTANCE_DELETE)
def test_delete_configuration_instance(self):
- # test that we can delete the instance even though there is a
- # configuration applied to the instance
+ """test_delete_configuration_instance"""
instance_info.dbaas.instances.delete(configuration_instance.id)
assert_equal(202, instance_info.dbaas.last_http_code)
diff --git a/trove/tests/api/databases.py b/trove/tests/api/databases.py
index ece7d0e6..a1512ce5 100644
--- a/trove/tests/api/databases.py
+++ b/trove/tests/api/databases.py
@@ -19,49 +19,21 @@ from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis import before_class
-from proboscis.decorators import time_out
from proboscis import test
from troveclient.compat import exceptions
from trove import tests
-from trove.tests.api.instances import GROUP_START
from trove.tests.api.instances import instance_info
from trove.tests import util
from trove.tests.util import test_config
-GROUP = "dbaas.api.databases"
FAKE = test_config.values['fake_mode']
-@test(depends_on_groups=[GROUP_START],
- groups=[tests.INSTANCES, "dbaas.guest.mysql"],
- enabled=not test_config.values['fake_mode'])
-class TestMysqlAccess(object):
- """
- Make sure that MySQL server was secured.
- """
-
- @time_out(60 * 2)
- @test
- def test_mysql_admin(self):
- """Ensure we aren't allowed access with os_admin and wrong password."""
- util.mysql_connection().assert_fails(
- instance_info.get_address(), "os_admin", "asdfd-asdf234")
-
- @test
- def test_mysql_root(self):
- """Ensure we aren't allowed access with root and wrong password."""
- util.mysql_connection().assert_fails(
- instance_info.get_address(), "root", "dsfgnear")
-
-
-@test(depends_on_groups=[GROUP_START],
- depends_on_classes=[TestMysqlAccess],
- groups=[tests.DBAAS_API, GROUP, tests.INSTANCES])
+@test(depends_on_groups=[tests.DBAAS_API_USERS_ACCESS],
+ groups=[tests.DBAAS_API_DATABASES])
class TestDatabases(object):
- """
- Test the creation and deletion of additional MySQL databases
- """
+ """Test the creation and deletion of additional MySQL databases"""
dbname = "third #?@some_-"
dbname_urlencoded = "third%20%23%3F%40some_-"
diff --git a/trove/tests/api/datastores.py b/trove/tests/api/datastores.py
index e916148d..5c0be738 100644
--- a/trove/tests/api/datastores.py
+++ b/trove/tests/api/datastores.py
@@ -28,15 +28,12 @@ from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
-
-GROUP = "dbaas.api.datastores"
NAME = "nonexistent"
-@test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES],
- depends_on_groups=["services.initialize"])
+@test(groups=[tests.DBAAS_API_DATASTORES],
+ depends_on_groups=[tests.DBAAS_API_VERSIONS])
class Datastores(object):
-
@before_class
def setUp(self):
rd_user = test_config.users.find_user(
@@ -87,24 +84,38 @@ class Datastores(object):
"Datastore '%s' cannot be found." % NAME)
@test
+ def test_create_inactive_datastore_by_admin(self):
+ datastore = self.rd_client.datastores.get(test_config.dbaas_datastore)
+ ds_version = self.rd_client.datastore_versions.list(datastore.id)[0]
+ ds_version_info = self.rd_admin.datastore_versions.get_by_uuid(
+ ds_version.id)
+
+ # Create datastore version for testing
+ # 'Test_Datastore_1' is also used in other test cases.
+ # Will be deleted in test_delete_datastore_version
+ self.rd_admin.mgmt_datastore_versions.create(
+ "inactive_version", test_config.dbaas_datastore_name_no_versions,
+ "test_manager", ds_version_info.image,
+ active='false', default='false'
+ )
+
+ @test(depends_on=[test_create_inactive_datastore_by_admin])
def test_datastore_with_no_active_versions_is_hidden(self):
datastores = self.rd_client.datastores.list()
name_list = [datastore.name for datastore in datastores]
- name_no_versions = test_config.dbaas_datastore_name_no_versions
- assert_true(name_no_versions not in name_list)
- @test
+ assert_true(
+ test_config.dbaas_datastore_name_no_versions not in name_list)
+
+ @test(depends_on=[test_create_inactive_datastore_by_admin])
def test_datastore_with_no_active_versions_is_visible_for_admin(self):
datastores = self.rd_admin.datastores.list()
name_list = [datastore.name for datastore in datastores]
- name_no_versions = test_config.dbaas_datastore_name_no_versions
- assert_true(name_no_versions in name_list)
+ assert_true(test_config.dbaas_datastore_name_no_versions in name_list)
-@test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES],
- depends_on_groups=["services.initialize"])
+@test(groups=[tests.DBAAS_API_DATASTORES])
class DatastoreVersions(object):
-
@before_class
def setUp(self):
rd_user = test_config.users.find_user(
diff --git a/trove/tests/api/header.py b/trove/tests/api/header.py
deleted file mode 100644
index 86719b51..00000000
--- a/trove/tests/api/header.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2013 Rackspace Hosting
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from functools import wraps
-
-from proboscis import SkipTest
-from proboscis import test
-from troveclient.compat.client import TroveHTTPClient
-
-from trove.tests.api.versions import Versions
-
-
-@test(groups=['dbaas.api.headers'])
-def must_work_with_blank_accept_headers():
- """Test to make sure that trove works without the headers."""
- versions = Versions()
- versions.setUp()
- client = versions.client
-
- if type(client.client).morph_request != TroveHTTPClient.morph_request:
- raise SkipTest("Not using the JSON client so can't execute this test.")
-
- original_morph_request = client.client.morph_request
-
- def morph_content_type_to(content_type):
- @wraps(original_morph_request)
- def _morph_request(kwargs):
- original_morph_request(kwargs)
- kwargs['headers']['Accept'] = content_type
- kwargs['headers']['Content-Type'] = content_type
-
- client.client.morph_request = _morph_request
-
- try:
- morph_content_type_to('')
- # run versions to make sure the API still returns JSON even though the
- # header type is blank
- versions.test_list_versions_index()
- finally:
- client.client.morph_request = original_morph_request
diff --git a/trove/tests/api/instances.py b/trove/tests/api/instances.py
index ce6aff39..d6be7bdd 100644
--- a/trove/tests/api/instances.py
+++ b/trove/tests/api/instances.py
@@ -21,7 +21,6 @@ import uuid
from proboscis import asserts
from proboscis.asserts import assert_equal
-from proboscis.asserts import assert_is_not_none
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
@@ -39,8 +38,6 @@ from trove import tests
from trove.tests.config import CONFIG
from trove.tests.util.check import AttrCheck
from trove.tests.util import create_dbaas_client
-from trove.tests.util import dns_checker
-from trove.tests.util import iso_time
from trove.tests.util import test_config
from trove.tests.util.usage import create_usage_verifier
from trove.tests.util.users import Requirements
@@ -49,19 +46,6 @@ CONF = cfg.CONF
FAKE = test_config.values['fake_mode']
-GROUP = "dbaas.guest"
-GROUP_NEUTRON = "dbaas.neutron"
-GROUP_START = "dbaas.guest.initialize"
-GROUP_START_SIMPLE = "dbaas.guest.initialize.simple"
-GROUP_TEST = "dbaas.guest.test"
-GROUP_STOP = "dbaas.guest.shutdown"
-GROUP_USERS = "dbaas.api.users"
-GROUP_ROOT = "dbaas.api.root"
-GROUP_GUEST = "dbaas.guest.start.test"
-GROUP_DATABASES = "dbaas.api.databases"
-GROUP_CREATE_INSTANCE_FAILURE = "dbaas.api.failures"
-GROUP_QUOTAS = "dbaas.quotas"
-
TIMEOUT_INSTANCE_CREATE = 60 * 32
TIMEOUT_INSTANCE_DELETE = 120
@@ -76,7 +60,6 @@ class InstanceTestInfo(object):
self.dbaas_flavor_href = None # The flavor of the instance.
self.dbaas_datastore = None # The datastore id
self.dbaas_datastore_version = None # The datastore version id
- self.dbaas_inactive_datastore_version = None # The DS inactive id
self.id = None # The ID of the instance in the database.
self.local_id = None
@@ -163,7 +146,6 @@ class InstanceTestInfo(object):
# existing.
instance_info = InstanceTestInfo()
dbaas = None # Rich client used throughout this test.
-
dbaas_admin = None # Same as above, with admin privs.
ROOT_ON_CREATE = CONFIG.get('root_on_create', False)
VOLUME_SUPPORT = CONFIG.get('trove_volume_support', False)
@@ -179,27 +161,137 @@ def existing_instance():
return os.environ.get("TESTS_USE_INSTANCE_ID", None)
-def do_not_delete_instance():
- return os.environ.get("TESTS_DO_NOT_DELETE_INSTANCE", None) is not None
-
-
def create_new_instance():
return existing_instance() is None
-@test(groups=['dbaas.usage', 'dbaas.usage.init'])
-def clear_messages_off_queue():
- instance_info.consumer.clear_events()
+def assert_unprocessable(func, *args):
+ try:
+ func(*args)
+ # If the exception didn't get raised, but the instance is still in
+ # the BUILDING state, that's a bug.
+ result = dbaas.instances.get(instance_info.id)
+ if result.status == "BUILD":
+ fail("When an instance is being built, this function should "
+ "always raise UnprocessableEntity.")
+ except exceptions.UnprocessableEntity:
+ assert_equal(422, dbaas.last_http_code)
+ pass # Good
-@test(groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, 'dbaas.setup'],
- depends_on_groups=["services.initialize"])
-class InstanceSetup(object):
- """Makes sure the client can hit the ReST service.
+class CheckInstance(AttrCheck):
+ """Class to check various attributes of Instance details."""
- This test also uses the API to find the flavor to use.
+ def __init__(self, instance):
+ super(CheckInstance, self).__init__()
+ self.instance = instance
- """
+ def flavor(self):
+ if 'flavor' not in self.instance:
+ self.fail("'flavor' not found in instance.")
+ else:
+ allowed_attrs = ['id', 'links']
+ self.contains_allowed_attrs(
+ self.instance['flavor'], allowed_attrs,
+ msg="Flavor")
+ self.links(self.instance['flavor']['links'])
+
+ def datastore(self):
+ if 'datastore' not in self.instance:
+ self.fail("'datastore' not found in instance.")
+ else:
+ allowed_attrs = ['type', 'version']
+ self.contains_allowed_attrs(
+ self.instance['datastore'], allowed_attrs,
+ msg="datastore")
+
+ def volume_key_exists(self):
+ if 'volume' not in self.instance:
+ self.fail("'volume' not found in instance.")
+ return False
+ return True
+
+ def volume(self):
+ if not VOLUME_SUPPORT:
+ return
+ if self.volume_key_exists():
+ allowed_attrs = ['size']
+ if not create_new_instance():
+ allowed_attrs.append('used')
+ self.contains_allowed_attrs(
+ self.instance['volume'], allowed_attrs,
+ msg="Volumes")
+
+ def used_volume(self):
+ if not VOLUME_SUPPORT:
+ return
+ if self.volume_key_exists():
+ allowed_attrs = ['size', 'used']
+ print(self.instance)
+ self.contains_allowed_attrs(
+ self.instance['volume'], allowed_attrs,
+ msg="Volumes")
+
+ def volume_mgmt(self):
+ if not VOLUME_SUPPORT:
+ return
+ if self.volume_key_exists():
+ allowed_attrs = ['description', 'id', 'name', 'size']
+ self.contains_allowed_attrs(
+ self.instance['volume'], allowed_attrs,
+ msg="Volumes")
+
+ def addresses(self):
+ allowed_attrs = ['addr', 'version']
+ print(self.instance)
+ networks = ['usernet']
+ for network in networks:
+ for address in self.instance['addresses'][network]:
+ self.contains_allowed_attrs(
+ address, allowed_attrs,
+ msg="Address")
+
+ def guest_status(self):
+ allowed_attrs = ['created_at', 'deleted', 'deleted_at', 'instance_id',
+ 'state', 'state_description', 'updated_at']
+ self.contains_allowed_attrs(
+ self.instance['guest_status'], allowed_attrs,
+ msg="Guest status")
+
+ def mgmt_volume(self):
+ if not VOLUME_SUPPORT:
+ return
+ allowed_attrs = ['description', 'id', 'name', 'size']
+ self.contains_allowed_attrs(
+ self.instance['volume'], allowed_attrs,
+ msg="Volume")
+
+ def replica_of(self):
+ if 'replica_of' not in self.instance:
+ self.fail("'replica_of' not found in instance.")
+ else:
+ allowed_attrs = ['id', 'links']
+ self.contains_allowed_attrs(
+ self.instance['replica_of'], allowed_attrs,
+ msg="Replica-of links not found")
+ self.links(self.instance['replica_of']['links'])
+
+ def slaves(self):
+ if 'replicas' not in self.instance:
+ self.fail("'replicas' not found in instance.")
+ else:
+ allowed_attrs = ['id', 'links']
+ for slave in self.instance['replicas']:
+ self.contains_allowed_attrs(
+ slave, allowed_attrs,
+ msg="Replica links not found")
+ self.links(slave['links'])
+
+
+@test(groups=[tests.DBAAS_API_INSTANCES],
+ depends_on_groups=[tests.DBAAS_API_MGMT_DATASTORES])
+class TestInstanceSetup(object):
+ """Prepare the instance creation parameters."""
@before_class
def setUp(self):
@@ -241,18 +333,9 @@ class InstanceSetup(object):
instance_info.name = dbaas.instances.get(id).name
-@test(depends_on_classes=[InstanceSetup], groups=[GROUP])
-def test_delete_instance_not_found():
- """Deletes an instance that does not exist."""
- # Looks for a random UUID that (most probably) does not exist.
- assert_raises(exceptions.NotFound, dbaas.instances.delete,
- "7016efb6-c02c-403e-9628-f6f57d0920d0")
-
-
-@test(depends_on_classes=[InstanceSetup],
- groups=[GROUP, GROUP_QUOTAS],
- runs_after_groups=[tests.PRE_INSTANCES])
-class CreateInstanceQuotaTest(unittest.TestCase):
+@test(groups=[tests.DBAAS_API_INSTANCES],
+ depends_on_classes=[TestInstanceSetup])
+class TestCreateInstanceQuota(unittest.TestCase):
def tearDown(self):
quota_dict = {'instances': CONFIG.trove_max_instances_per_tenant,
'volumes': CONFIG.trove_max_volumes_per_tenant}
@@ -329,11 +412,10 @@ class CreateInstanceQuotaTest(unittest.TestCase):
assert_equal(413, dbaas.last_http_code)
-@test(depends_on_classes=[InstanceSetup],
- groups=[GROUP, GROUP_CREATE_INSTANCE_FAILURE],
- runs_after_groups=[tests.PRE_INSTANCES, GROUP_QUOTAS])
+@test(groups=[tests.DBAAS_API_INSTANCES],
+ depends_on_classes=[TestCreateInstanceQuota])
class CreateInstanceFail(object):
-
+ """Negative instance creation tests."""
def instance_in_error(self, instance_id):
def check_if_error():
instance = dbaas.instances.get(instance_id)
@@ -355,9 +437,8 @@ class CreateInstanceFail(object):
time.sleep(1)
@test
- @time_out(30)
def test_create_with_bad_availability_zone(self):
- instance_name = "instance-failure-with-bad-ephemeral"
+ instance_name = "instance-failure-with-bad-az"
if VOLUME_SUPPORT:
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
@@ -369,7 +450,8 @@ class CreateInstanceFail(object):
availability_zone="BAD_ZONE",
nics=instance_info.nics)
- poll_until(self.instance_in_error(result.id))
+ poll_until(self.instance_in_error(result.id), sleep_time=5,
+ time_out=30)
instance = dbaas.instances.get(result.id)
assert_equal("ERROR", instance.status)
@@ -571,7 +653,8 @@ class CreateInstanceFail(object):
instance_name = "datastore_default_version_notfound"
databases = []
users = []
- datastore = "Test_Datastore_1"
+ datastore = CONFIG.dbaas_datastore_name_no_versions
+
try:
assert_raises(exceptions.NotFound,
dbaas.instances.create, instance_name,
@@ -630,58 +713,11 @@ class CreateInstanceFail(object):
"Datastore version '%s' cannot be found." %
datastore_version)
- @test
- def test_create_failure_with_datastore_version_inactive(self):
- if VOLUME_SUPPORT:
- volume = {'size': CONFIG.get('trove_volume_size', 1)}
- else:
- volume = None
- instance_name = "datastore_version_inactive"
- databases = []
- users = []
- datastore = CONFIG.dbaas_datastore
- datastore_version = CONFIG.dbaas_inactive_datastore_version
- try:
- assert_raises(exceptions.NotFound,
- dbaas.instances.create, instance_name,
- instance_info.dbaas_flavor_href,
- volume, databases, users,
- datastore=datastore,
- datastore_version=datastore_version,
- nics=instance_info.nics)
- except exceptions.BadRequest as e:
- assert_equal(e.message,
- "Datastore version '%s' is not active." %
- datastore_version)
-
-def assert_unprocessable(func, *args):
- try:
- func(*args)
- # If the exception didn't get raised, but the instance is still in
- # the BUILDING state, that's a bug.
- result = dbaas.instances.get(instance_info.id)
- if result.status == "BUILD":
- fail("When an instance is being built, this function should "
- "always raise UnprocessableEntity.")
- except exceptions.UnprocessableEntity:
- assert_equal(422, dbaas.last_http_code)
- pass # Good
-
- @test
- def test_deep_list_security_group_with_rules(self):
- securityGroupList = dbaas.security_groups.list()
- assert_is_not_none(securityGroupList)
- securityGroup = [x for x in securityGroupList
- if x.name in self.secGroupName]
- assert_is_not_none(securityGroup[0])
- assert_not_equal(len(securityGroup[0].rules), 0)
-
-
-@test(depends_on_classes=[InstanceSetup],
- run_after_class=[CreateInstanceFail],
- groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, tests.INSTANCES],
- runs_after_groups=[tests.PRE_INSTANCES, GROUP_QUOTAS])
+@test(
+ groups=[tests.DBAAS_API_INSTANCES],
+ depends_on_classes=[CreateInstanceFail],
+)
class CreateInstance(object):
"""Test to create a Database Instance
@@ -761,62 +797,11 @@ class CreateInstance(object):
check.volume()
-@test(depends_on_classes=[InstanceSetup],
- groups=[GROUP, tests.INSTANCES],
- runs_after_groups=[tests.PRE_INSTANCES])
-class CreateInstanceFlavors(object):
-
- def _result_is_active(self):
- instance = dbaas.instances.get(self.result.id)
- if instance.status in CONFIG.running_status:
- return True
- else:
- # If its not ACTIVE, anything but BUILD must be
- # an error.
- assert_equal("BUILD", instance.status)
- if instance_info.volume is not None:
- assert_equal(instance.volume.get('used', None), None)
- return False
-
- def _delete_async(self, instance_id):
- dbaas.instances.delete(instance_id)
- while True:
- try:
- dbaas.instances.get(instance_id)
- except exceptions.NotFound:
- return True
- time.sleep(1)
-
- def _create_with_flavor(self, flavor_id):
- if not FAKE:
- raise SkipTest("This test only for fake mode.")
- instance_name = "instance-with-flavor-%s" % flavor_id
- databases = []
- if VOLUME_SUPPORT:
- volume = {'size': CONFIG.get('trove_volume_size', 1)}
- else:
- volume = None
- self.result = dbaas.instances.create(instance_name, flavor_id, volume,
- databases,
- nics=instance_info.nics)
- poll_until(self._result_is_active)
- self._delete_async(self.result.id)
-
- @test
- def test_create_with_int_flavor(self):
- self._create_with_flavor(1)
-
- @test
- def test_create_with_str_flavor(self):
- self._create_with_flavor('custom')
-
-
-@test(depends_on_classes=[CreateInstance],
- groups=[GROUP,
- GROUP_START,
- GROUP_START_SIMPLE,
- 'dbaas.mgmt.hosts_post_install'],
- enabled=create_new_instance())
+@test(
+ groups=[tests.DBAAS_API_INSTANCES],
+ depends_on_classes=[CreateInstance],
+ enabled=create_new_instance()
+)
class AfterInstanceCreation(unittest.TestCase):
# instance calls
@@ -865,19 +850,16 @@ class AfterInstanceCreation(unittest.TestCase):
instance_info.id, 2)
-@test(depends_on_classes=[CreateInstance],
- runs_after=[AfterInstanceCreation],
- groups=[GROUP, GROUP_START, GROUP_START_SIMPLE],
- enabled=create_new_instance())
+@test(
+ depends_on_classes=[AfterInstanceCreation],
+ groups=[tests.DBAAS_API_INSTANCES],
+ enabled=create_new_instance()
+)
class WaitForGuestInstallationToFinish(object):
- """
- Wait until the Guest is finished installing. It takes quite a while...
- """
-
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_instance_created(self):
- # This version just checks the REST API status.
+ """Wait for normal instance to be created."""
def result_is_active():
instance = dbaas.instances.get(instance_info.id)
if instance.status in CONFIG.running_status:
@@ -890,7 +872,7 @@ class WaitForGuestInstallationToFinish(object):
assert_equal(instance.volume.get('used', None), None)
return False
- poll_until(result_is_active)
+ poll_until(result_is_active, sleep_time=5)
dbaas.instances.get(instance_info.id)
report = CONFIG.get_report()
@@ -903,51 +885,11 @@ class WaitForGuestInstallationToFinish(object):
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
- groups=[GROUP, GROUP_START], enabled=create_new_instance())
-class TestGuestProcess(object):
- """
- Test that the guest process is started with all the right parameters
- """
-
- @test
- def check_hwinfo_before_tests(self):
- if CONFIG.test_mgmt:
- hwinfo = dbaas_admin.hwinfo.get(instance_info.id)
- print("hwinfo : %r" % hwinfo._info)
- allowed_attrs = ['hwinfo']
- CheckInstance(None).contains_allowed_attrs(
- hwinfo._info, allowed_attrs,
- msg="Hardware information")
- # TODO(pdmars): instead of just checking that these are int's, get
- # the instance flavor and verify that the values are correct for
- # the flavor
- assert_true(isinstance(hwinfo.hwinfo['mem_total'], int))
- assert_true(isinstance(hwinfo.hwinfo['num_cpus'], int))
-
-
-@test(depends_on_classes=[WaitForGuestInstallationToFinish],
- groups=[GROUP, GROUP_TEST, "dbaas.dns"])
-class DnsTests(object):
-
- @test
- def test_dns_entries_are_found(self):
- """Talk to DNS system to ensure entries were created."""
- print("Instance name=%s" % instance_info.name)
- client = instance_info.dbaas_admin
- mgmt_instance = client.mgmt.instances.show(instance_info.id)
- dns_checker(mgmt_instance)
-
-
-@test(depends_on_classes=[WaitForGuestInstallationToFinish],
- groups=[GROUP, GROUP_TEST, GROUP_GUEST])
-class TestAfterInstanceCreatedGuestData(object):
- """
- Test the optional parameters (databases and users) passed in to create
- instance call were created.
- """
-
+ groups=[tests.DBAAS_API_INSTANCES])
+class TestDBandUserAfterInstanceCreated(object):
@test
def test_databases(self):
+ """Get databases after instance creation."""
databases = dbaas.databases.list(instance_info.id)
dbs = [database.name for database in databases]
for db in instance_info.databases:
@@ -955,6 +897,7 @@ class TestAfterInstanceCreatedGuestData(object):
@test
def test_users(self):
+ """Get users after instance creation."""
users = dbaas.users.list(instance_info.id)
usernames = [user.name for user in users]
for user in instance_info.users:
@@ -962,10 +905,8 @@ class TestAfterInstanceCreatedGuestData(object):
@test(depends_on_classes=[WaitForGuestInstallationToFinish],
- groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, "dbaas.listing"])
-class TestInstanceListing(object):
- """Test the listing of the instance information."""
-
+ groups=[tests.DBAAS_API_INSTANCES])
+class TestGetInstances(object):
@before_class
def setUp(self):
reqs = Requirements(is_admin=False)
@@ -1059,16 +1000,6 @@ class TestInstanceListing(object):
instance = dbaas.instances.get(instance_info.id)
if create_new_instance():
assert_equal(instance_info.volume['size'], instance.volume['size'])
- else:
- # FIXME(peterstac): Sometimes this returns as an int - is that ok?
- assert_true(type(instance_info.volume['size']) in [int, float])
- if create_new_instance():
- # FIXME(pmalik): Keeps failing because 'used' > 'size'.
- # It seems like the reported 'used' space is from the root volume
- # instead of the attached Trove volume.
- # assert_true(0.0 < instance.volume['used'] <
- # instance.volume['size'])
- pass
@test(enabled=EPHEMERAL_SUPPORT)
def test_ephemeral_mount(self):
@@ -1080,7 +1011,7 @@ class TestInstanceListing(object):
instance = dbaas.instances.get(instance_info.id)
assert_true(isinstance(instance.local_storage['used'], float))
- @test(enabled=do_not_delete_instance())
+ @test
def test_instance_not_shown_to_other_user(self):
daffy_ids = [instance.id for instance in
self.other_client.instances.list()]
@@ -1095,7 +1026,7 @@ class TestInstanceListing(object):
for id in admin_ids:
assert_equal(daffy_ids.count(id), 0)
- @test(enabled=do_not_delete_instance())
+ @test
def test_instance_not_deleted_by_other_user(self):
assert_raises(exceptions.NotFound,
self.other_client.instances.get, instance_info.id)
@@ -1121,175 +1052,24 @@ class TestInstanceListing(object):
check.volume_mgmt()
-@test(depends_on_classes=[WaitForGuestInstallationToFinish],
- groups=[GROUP, GROUP_START, GROUP_START_SIMPLE, "dbaas.update"])
-class TestInstanceUpdate(object):
- """Test the updation of the instance information."""
-
- @before_class
- def setUp(self):
- reqs = Requirements(is_admin=False)
- self.other_user = CONFIG.users.find_user(
- reqs,
- black_list=[instance_info.user.auth_user])
- self.other_client = create_dbaas_client(self.other_user)
-
- @test
- def test_update_name(self):
- new_name = 'new-name'
- result = dbaas.instances.edit(instance_info.id, name=new_name)
- assert_equal(202, dbaas.last_http_code)
- result = dbaas.instances.get(instance_info.id)
- assert_equal(200, dbaas.last_http_code)
- assert_equal(new_name, result.name)
- # Restore instance name because other tests depend on it
- dbaas.instances.edit(instance_info.id, name=instance_info.name)
- assert_equal(202, dbaas.last_http_code)
-
- @test
- def test_update_name_to_invalid_instance(self):
- # test assigning to an instance that does not exist
- invalid_id = "invalid-inst-id"
- assert_raises(exceptions.NotFound, instance_info.dbaas.instances.edit,
- invalid_id, name='name')
- assert_equal(404, instance_info.dbaas.last_http_code)
-
-
-@test(depends_on_classes=[WaitForGuestInstallationToFinish],
- groups=[GROUP, 'dbaas.usage'])
-class TestCreateNotification(object):
- """
- Test that the create notification has been sent correctly.
- """
-
- @test
- def test_create_notification(self):
- expected = {
- 'instance_size': instance_info.dbaas_flavor.ram,
- 'tenant_id': instance_info.user.tenant_id,
- 'instance_id': instance_info.id,
- 'instance_name': instance_info.name,
- 'created_at': iso_time(instance_info.initial_result.created),
- 'launched_at': iso_time(instance_info.initial_result.created),
- 'region': 'LOCAL_DEV',
- 'availability_zone': 'nova',
- }
- instance_info.consumer.check_message(instance_info.id,
- 'trove.instance.create',
- **expected)
-
-
-@test(depends_on=[WaitForGuestInstallationToFinish],
- depends_on_groups=[GROUP_USERS, GROUP_DATABASES, GROUP_ROOT],
- groups=[GROUP, GROUP_STOP],
- runs_after_groups=[GROUP_START,
- GROUP_START_SIMPLE, GROUP_TEST, tests.INSTANCES],
- enabled=not do_not_delete_instance())
-class DeleteInstance(object):
- """Delete the created instance."""
-
- @time_out(3 * 60)
- @test
- def test_delete(self):
- global dbaas
- if not hasattr(instance_info, "initial_result"):
- raise SkipTest("Instance was never created, skipping test...")
- # Update the report so the logs inside the instance will be saved.
- CONFIG.get_report().update()
- dbaas.instances.delete(instance_info.id)
-
- attempts = 0
- try:
- time.sleep(1)
- result = True
- while result is not None:
- attempts += 1
- result = dbaas.instances.get(instance_info.id)
- assert_equal(200, dbaas.last_http_code)
- assert_equal("SHUTDOWN", result.status)
- time.sleep(1)
- except exceptions.NotFound:
- pass
- except Exception as ex:
- fail("A failure occurred when trying to GET instance %s for the %d"
- " time: %s" % (str(instance_info.id), attempts, str(ex)))
-
-
-@test(depends_on=[DeleteInstance],
- groups=[GROUP, GROUP_STOP, 'dbaas.usage'],
- enabled=not do_not_delete_instance())
-class AfterDeleteChecks(object):
-
- @test
- def test_instance_delete_event_sent(self):
- deleted_at = None
- mgmt_details = dbaas_admin.management.index(deleted=True)
- for instance in mgmt_details:
- if instance.id == instance_info.id:
- deleted_at = instance.deleted_at
- expected = {
- 'instance_size': instance_info.dbaas_flavor.ram,
- 'tenant_id': instance_info.user.tenant_id,
- 'instance_id': instance_info.id,
- 'instance_name': instance_info.name,
- 'created_at': iso_time(instance_info.initial_result.created),
- 'launched_at': iso_time(instance_info.initial_result.created),
- 'deleted_at': iso_time(deleted_at),
- }
- instance_info.consumer.check_message(instance_info.id,
- 'trove.instance.delete',
- **expected)
-
- @test
- def test_instance_status_deleted_in_db(self):
- mgmt_details = dbaas_admin.management.index(deleted=True)
- for instance in mgmt_details:
- if instance.id == instance_info.id:
- assert_equal(instance.service_status, 'DELETED')
- break
- else:
- fail("Could not find instance %s" % instance_info.id)
-
-
-@test(depends_on_classes=[CreateInstance,
- WaitForGuestInstallationToFinish],
- groups=[GROUP, GROUP_START, GROUP_START_SIMPLE],
+@test(depends_on_classes=[TestGetInstances],
+ groups=[tests.DBAAS_API_INSTANCES],
enabled=CONFIG.test_mgmt)
-class VerifyInstanceMgmtInfo(object):
-
+class TestInstanceMgmtInfo(object):
@before_class
def set_up(self):
self.mgmt_details = dbaas_admin.management.show(instance_info.id)
- def _assert_key(self, k, expected):
- v = getattr(self.mgmt_details, k)
- err = "Key %r does not match expected value of %r (was %r)." \
- % (k, expected, v)
- assert_equal(str(v), str(expected), err)
-
- @test
- def test_id_matches(self):
- self._assert_key('id', instance_info.id)
-
- @test
- def test_bogus_instance_mgmt_data(self):
- # Make sure that a management call to a bogus API 500s.
- # The client reshapes the exception into just an OpenStackException.
- assert_raises(exceptions.NotFound,
- dbaas_admin.management.show, -1)
-
@test
def test_mgmt_ips_associated(self):
- # Test that the management index properly associates an instances with
- # ONLY its IPs.
+ """Every instances has exactly one address"""
mgmt_index = dbaas_admin.management.index()
- # Every instances has exactly one address.
for instance in mgmt_index:
assert_equal(1, len(instance.ips))
@test
def test_mgmt_data(self):
- # Test that the management API returns all the values we expect it to.
+ """Test management API returns all the values expected."""
info = instance_info
ir = info.initial_result
cid = ir.id
@@ -1297,8 +1077,6 @@ class VerifyInstanceMgmtInfo(object):
'id': cid,
'name': ir.name,
'account_id': info.user.auth_user,
- # TODO(hub-cap): fix this since its a flavor object now
- # 'flavorRef': info.dbaas_flavor_href,
'databases': [
{
'name': 'db2',
@@ -1329,110 +1107,26 @@ class VerifyInstanceMgmtInfo(object):
assert_true('name' in user, "'name' not in users element.")
-class CheckInstance(AttrCheck):
- """Class to check various attributes of Instance details."""
-
- def __init__(self, instance):
- super(CheckInstance, self).__init__()
- self.instance = instance
-
- def flavor(self):
- if 'flavor' not in self.instance:
- self.fail("'flavor' not found in instance.")
- else:
- allowed_attrs = ['id', 'links']
- self.contains_allowed_attrs(
- self.instance['flavor'], allowed_attrs,
- msg="Flavor")
- self.links(self.instance['flavor']['links'])
-
- def datastore(self):
- if 'datastore' not in self.instance:
- self.fail("'datastore' not found in instance.")
- else:
- allowed_attrs = ['type', 'version']
- self.contains_allowed_attrs(
- self.instance['datastore'], allowed_attrs,
- msg="datastore")
-
- def volume_key_exists(self):
- if 'volume' not in self.instance:
- self.fail("'volume' not found in instance.")
- return False
- return True
-
- def volume(self):
- if not VOLUME_SUPPORT:
- return
- if self.volume_key_exists():
- allowed_attrs = ['size']
- if not create_new_instance():
- allowed_attrs.append('used')
- self.contains_allowed_attrs(
- self.instance['volume'], allowed_attrs,
- msg="Volumes")
-
- def used_volume(self):
- if not VOLUME_SUPPORT:
- return
- if self.volume_key_exists():
- allowed_attrs = ['size', 'used']
- print(self.instance)
- self.contains_allowed_attrs(
- self.instance['volume'], allowed_attrs,
- msg="Volumes")
-
- def volume_mgmt(self):
- if not VOLUME_SUPPORT:
- return
- if self.volume_key_exists():
- allowed_attrs = ['description', 'id', 'name', 'size']
- self.contains_allowed_attrs(
- self.instance['volume'], allowed_attrs,
- msg="Volumes")
-
- def addresses(self):
- allowed_attrs = ['addr', 'version']
- print(self.instance)
- networks = ['usernet']
- for network in networks:
- for address in self.instance['addresses'][network]:
- self.contains_allowed_attrs(
- address, allowed_attrs,
- msg="Address")
-
- def guest_status(self):
- allowed_attrs = ['created_at', 'deleted', 'deleted_at', 'instance_id',
- 'state', 'state_description', 'updated_at']
- self.contains_allowed_attrs(
- self.instance['guest_status'], allowed_attrs,
- msg="Guest status")
-
- def mgmt_volume(self):
- if not VOLUME_SUPPORT:
- return
- allowed_attrs = ['description', 'id', 'name', 'size']
- self.contains_allowed_attrs(
- self.instance['volume'], allowed_attrs,
- msg="Volume")
-
- def replica_of(self):
- if 'replica_of' not in self.instance:
- self.fail("'replica_of' not found in instance.")
- else:
- allowed_attrs = ['id', 'links']
- self.contains_allowed_attrs(
- self.instance['replica_of'], allowed_attrs,
- msg="Replica-of links not found")
- self.links(self.instance['replica_of']['links'])
+@test(depends_on_classes=[TestInstanceMgmtInfo],
+ groups=[tests.DBAAS_API_INSTANCES])
+class TestUpdateInstance(object):
+ """Test updating instance."""
+ @test
+ def test_update_name(self):
+ new_name = 'new-name'
+ result = dbaas.instances.edit(instance_info.id, name=new_name)
+ assert_equal(202, dbaas.last_http_code)
+ result = dbaas.instances.get(instance_info.id)
+ assert_equal(200, dbaas.last_http_code)
+ assert_equal(new_name, result.name)
+ # Restore instance name because other tests depend on it
+ dbaas.instances.edit(instance_info.id, name=instance_info.name)
+ assert_equal(202, dbaas.last_http_code)
- def slaves(self):
- if 'replicas' not in self.instance:
- self.fail("'replicas' not found in instance.")
- else:
- allowed_attrs = ['id', 'links']
- for slave in self.instance['replicas']:
- self.contains_allowed_attrs(
- slave, allowed_attrs,
- msg="Replica links not found")
- self.links(slave['links'])
+ @test
+ def test_update_name_to_invalid_instance(self):
+ # test assigning to an instance that does not exist
+ invalid_id = "invalid-inst-id"
+ assert_raises(exceptions.NotFound, instance_info.dbaas.instances.edit,
+ invalid_id, name='name')
+ assert_equal(404, instance_info.dbaas.last_http_code)
diff --git a/trove/tests/api/instances_actions.py b/trove/tests/api/instances_actions.py
index fd088fc0..456de722 100644
--- a/trove/tests/api/instances_actions.py
+++ b/trove/tests/api/instances_actions.py
@@ -30,8 +30,6 @@ from trove.common.utils import poll_until
from trove import tests
from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import EPHEMERAL_SUPPORT
-from trove.tests.api.instances import GROUP as INSTANCE_GROUP
-from trove.tests.api.instances import GROUP_START
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import VOLUME_SUPPORT
from trove.tests.config import CONFIG
@@ -40,21 +38,11 @@ from trove.tests.util.check import TypeCheck
from trove.tests.util import LocalSqlClient
from trove.tests.util.server_connection import create_server_connection
-GROUP = "dbaas.api.instances.actions"
-GROUP_REBOOT = "dbaas.api.instances.actions.reboot"
-GROUP_RESTART = "dbaas.api.instances.actions.restart"
-GROUP_RESIZE = "dbaas.api.instances.actions.resize"
-GROUP_STOP_MYSQL = "dbaas.api.instances.actions.stop"
-GROUP_UPDATE_GUEST = "dbaas.api.instances.actions.update_guest"
MYSQL_USERNAME = "test_user"
MYSQL_PASSWORD = "abcde"
-# stored in test conf
-SERVICE_ID = '123'
FAKE_MODE = CONFIG.fake_mode
# If true, then we will actually log into the database.
USE_IP = not FAKE_MODE
-# If true, then we will actually search for the process
-USE_LOCAL_OVZ = CONFIG.use_local_ovz
class MySqlConnection(object):
@@ -202,7 +190,7 @@ class ActionTestBase(object):
return expected
-@test(depends_on_groups=[GROUP_START])
+@test(depends_on_groups=[tests.DBAAS_API_INSTANCES])
def create_user():
"""Create a test user so that subsequent tests can log in."""
helper = ActionTestBase()
@@ -274,10 +262,11 @@ class RebootTestBase(ActionTestBase):
poll_until(is_finished_rebooting, time_out=TIME_OUT_TIME)
-@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_RESTART],
- depends_on_groups=[GROUP_START], depends_on=[create_user])
+@test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS],
+ depends_on_groups=[tests.DBAAS_API_DATABASES],
+ depends_on=[create_user])
class RestartTests(RebootTestBase):
- """Tests restarting MySQL."""
+ """Test restarting MySQL."""
def call_reboot(self):
self.instance.restart()
@@ -298,10 +287,10 @@ class RestartTests(RebootTestBase):
self.successful_restart()
-@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_STOP_MYSQL],
- depends_on_groups=[GROUP_RESTART], depends_on=[create_user])
+@test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS],
+ depends_on_classes=[RestartTests])
class StopTests(RebootTestBase):
- """Tests which involve stopping MySQL."""
+ """Test stopping MySQL."""
def call_reboot(self):
self.instance.restart()
@@ -343,10 +332,10 @@ class StopTests(RebootTestBase):
self.successful_restart()
-@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_REBOOT],
- depends_on_groups=[GROUP_STOP_MYSQL])
+@test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS],
+ depends_on_classes=[StopTests])
class RebootTests(RebootTestBase):
- """Tests restarting instance."""
+ """Test restarting instance."""
def call_reboot(self):
instance_info.dbaas_admin.management.reboot(self.instance_id)
@@ -359,24 +348,21 @@ class RebootTests(RebootTestBase):
@test
def test_ensure_mysql_is_running(self):
- """Make sure MySQL is accessible before restarting."""
+ """Make sure MySQL is accessible before rebooting."""
self.ensure_mysql_is_running()
@after_class(depends_on=[test_ensure_mysql_is_running])
- def test_successful_restart(self):
- """Restart MySQL via the REST API successfully."""
+ def test_successful_reboot(self):
+ """MySQL process is different after rebooting."""
if FAKE_MODE:
raise SkipTest("Cannot run this in fake mode.")
self.successful_restart()
-@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_RESIZE],
- depends_on_groups=[GROUP_REBOOT])
+@test(groups=[tests.DBAAS_API_INSTANCE_ACTIONS],
+ depends_on_classes=[RebootTests])
class ResizeInstanceTest(ActionTestBase):
-
- """
- Integration Test cases for resize instance
- """
+ """Test resizing instance."""
@property
def flavor_id(self):
return instance_info.dbaas_flavor_href
@@ -456,6 +442,7 @@ class ResizeInstanceTest(ActionTestBase):
@test(depends_on=[test_instance_resize_same_size_should_fail])
def test_status_changed_to_resize(self):
+ """test_status_changed_to_resize"""
self.log_current_users()
self.obtain_flavor_ids()
self.dbaas.instances.resize_instance(
@@ -472,6 +459,7 @@ class ResizeInstanceTest(ActionTestBase):
@test(depends_on=[test_status_changed_to_resize])
@time_out(TIME_OUT_TIME)
def test_instance_returns_to_active_after_resize(self):
+ """test_instance_returns_to_active_after_resize"""
self.wait_for_resize()
@test(depends_on=[test_instance_returns_to_active_after_resize,
@@ -510,12 +498,11 @@ class ResizeInstanceTest(ActionTestBase):
asserts.assert_equal(actual, expected)
-@test(depends_on=[ResizeInstanceTest],
- groups=[GROUP, tests.INSTANCES, INSTANCE_GROUP, GROUP_RESIZE],
+@test(depends_on_classes=[ResizeInstanceTest],
+ groups=[tests.DBAAS_API_INSTANCE_ACTIONS],
enabled=VOLUME_SUPPORT)
-class ResizeInstanceVolume(ActionTestBase):
+class ResizeInstanceVolumeTest(ActionTestBase):
"""Resize the volume of the instance."""
-
@before_class
def setUp(self):
self.set_up()
@@ -589,55 +576,3 @@ class ResizeInstanceVolume(ActionTestBase):
asserts.fail(
"Database %s was not found after the volume resize. "
"Returned list: %s" % (name, databases))
-
-
-# This tests the ability of the guest to upgrade itself.
-# It is necessarily tricky because we need to be able to upload a new copy of
-# the guest into an apt-repo in the middle of the test.
-# "guest-update-test" is where the knowledge of how to do this is set in the
-# test conf. If it is not specified this test never runs.
-UPDATE_GUEST_CONF = CONFIG.values.get("guest-update-test", None)
-
-
-@test(groups=[tests.INSTANCES, INSTANCE_GROUP, GROUP, GROUP_UPDATE_GUEST],
- depends_on_groups=[GROUP_RESIZE])
-class UpdateGuest(object):
-
- def get_version(self):
- info = instance_info.dbaas_admin.diagnostics.get(instance_info.id)
- return info.version
-
- @before_class(enabled=UPDATE_GUEST_CONF is not None)
- def check_version_is_old(self):
- """Make sure we have the old version before proceeding."""
- self.old_version = self.get_version()
- self.next_version = UPDATE_GUEST_CONF["next-version"]
- asserts.assert_not_equal(self.old_version, self.next_version)
-
- @test(enabled=UPDATE_GUEST_CONF is not None)
- def upload_update_to_repo(self):
- cmds = UPDATE_GUEST_CONF["install-repo-cmd"]
- testsutil.execute(*cmds, run_as_root=True, root_helper="sudo")
-
- @test(enabled=UPDATE_GUEST_CONF is not None,
- depends_on=[upload_update_to_repo])
- def update_and_wait_to_finish(self):
- instance_info.dbaas_admin.management.update(instance_info.id)
-
- def finished():
- current_version = self.get_version()
- if current_version == self.next_version:
- return True
- # The only valid thing for it to be aside from next_version is
- # old version.
- asserts.assert_equal(current_version, self.old_version)
- poll_until(finished, sleep_time=1, time_out=3 * 60)
-
- @test(enabled=UPDATE_GUEST_CONF is not None,
- depends_on=[upload_update_to_repo])
- @time_out(30)
- def update_again(self):
- """Test the wait time of a pointless update."""
- instance_info.dbaas_admin.management.update(instance_info.id)
- # Make sure this isn't taking too long.
- instance_info.dbaas_admin.diagnostics.get(instance_info.id)
diff --git a/trove/tests/api/instances_delete.py b/trove/tests/api/instances_delete.py
index 706a1fba..fcd57687 100644
--- a/trove/tests/api/instances_delete.py
+++ b/trove/tests/api/instances_delete.py
@@ -15,155 +15,78 @@
# License for the specific language governing permissions and limitations
# under the License.
#
-
+import os
import time
-from proboscis import after_class
from proboscis import asserts
-from proboscis import before_class
from proboscis.decorators import time_out
+from proboscis import SkipTest
from proboscis import test
from troveclient.compat import exceptions
-from trove.common import cfg
-from trove.common.utils import poll_until
+from trove import tests
from trove.tests.api.instances import instance_info
-from trove.tests.api.instances import VOLUME_SUPPORT
-from trove.tests.util import create_dbaas_client
-from trove.tests.util import test_config
-from trove.tests.util.users import Requirements
-
-
-CONF = cfg.CONF
-
-
-class TestBase(object):
-
- def set_up(self):
- reqs = Requirements(is_admin=True)
- self.user = test_config.users.find_user(reqs)
- self.dbaas = create_dbaas_client(self.user)
-
- def create_instance(self, name, size=1):
- volume = None
- if VOLUME_SUPPORT:
- volume = {'size': size}
- result = self.dbaas.instances.create(name,
- instance_info.dbaas_flavor_href,
- volume, [], [],
- nics=instance_info.nics)
- return result.id
-
- def wait_for_instance_status(self, instance_id, status="HEALTHY",
- acceptable_states=None):
- if acceptable_states:
- acceptable_states.append(status)
-
- def assert_state(instance):
- if acceptable_states:
- assert_true(instance.status in acceptable_states,
- "Invalid status: %s" % instance.status)
- return instance
-
- poll_until(lambda: self.dbaas.instances.get(instance_id),
- lambda instance: assert_state(instance).status == status,
- time_out=30, sleep_time=1)
+from trove.tests.config import CONFIG
- def wait_for_instance_task_status(self, instance_id, description):
- poll_until(lambda: self.dbaas.management.show(instance_id),
- lambda instance: instance.task_description == description,
- time_out=30, sleep_time=1)
- def is_instance_deleted(self, instance_id):
- while True:
- try:
- self.dbaas.instances.get(instance_id)
- except exceptions.NotFound:
- return True
- time.sleep(.5)
+def do_not_delete_instance():
+ return os.environ.get("TESTS_DO_NOT_DELETE_INSTANCE", None) is not None
- def get_task_info(self, instance_id):
- instance = self.dbaas.management.show(instance_id)
- return instance.status, instance.task_description
-
- def delete_instance(self, instance_id, assert_deleted=True):
- instance = self.dbaas.instances.get(instance_id)
- instance.delete()
- if assert_deleted:
- asserts.assert_true(self.is_instance_deleted(instance_id))
-
- def delete_errored_instance(self, instance_id):
- self.wait_for_instance_status(instance_id, 'ERROR')
- status, desc = self.get_task_info(instance_id)
- asserts.assert_equal(status, "ERROR")
- self.delete_instance(instance_id)
-
-
-@test(runs_after_groups=["services.initialize", "dbaas.guest.shutdown"],
- groups=['dbaas.api.instances.delete'])
-class ErroredInstanceDelete(TestBase):
- """
- Test that an instance in an ERROR state is actually deleted when delete
- is called.
- """
-
- @before_class
- def set_up_err(self):
- """Create some flawed instances."""
- from trove.taskmanager.models import CONF
- self.old_dns_support = CONF.trove_dns_support
- CONF.trove_dns_support = False
-
- super(ErroredInstanceDelete, self).set_up()
- # Create an instance that fails during server prov.
- self.server_error = self.create_instance('test_SERVER_ERROR')
- if VOLUME_SUPPORT:
- # Create an instance that fails during volume prov.
- self.volume_error = self.create_instance('test_VOLUME_ERROR',
- size=9)
- else:
- self.volume_error = None
- # Create an instance that fails during DNS prov.
- # self.dns_error = self.create_instance('test_DNS_ERROR')
- # Create an instance that fails while it's been deleted the first time.
- self.delete_error = self.create_instance('test_ERROR_ON_DELETE')
-
- @after_class(always_run=True)
- def clean_up(self):
- from trove.taskmanager.models import CONF
- CONF.trove_dns_support = self.old_dns_support
+@test(depends_on_groups=[tests.DBAAS_API_REPLICATION],
+ groups=[tests.DBAAS_API_INSTANCES_DELETE],
+ enabled=not do_not_delete_instance())
+class TestDeleteInstance(object):
+ @time_out(3 * 60)
@test
- @time_out(30)
- def delete_server_error(self):
- self.delete_errored_instance(self.server_error)
+ def test_delete(self):
+ """Delete instance for clean up."""
+ if not hasattr(instance_info, "initial_result"):
+ raise SkipTest("Instance was never created, skipping test...")
+ # Update the report so the logs inside the instance will be saved.
+ CONFIG.get_report().update()
+
+ dbaas = instance_info.dbaas
+ dbaas.instances.delete(instance_info.id)
+
+ attempts = 0
+ try:
+ time.sleep(1)
+ result = True
+ while result is not None:
+ attempts += 1
+ result = dbaas.instances.get(instance_info.id)
+ asserts.assert_equal(200, dbaas.last_http_code)
+ asserts.assert_equal("SHUTDOWN", result.status)
+ time.sleep(1)
+ except exceptions.NotFound:
+ pass
+ except Exception as ex:
+ asserts.fail("A failure occurred when trying to GET instance %s "
+ "for the %d time: %s" %
+ (str(instance_info.id), attempts, str(ex)))
+
+ @test(depends_on=[test_delete])
+ def test_instance_status_deleted_in_db(self):
+ """test_instance_status_deleted_in_db"""
+ dbaas_admin = instance_info.dbaas_admin
+ mgmt_details = dbaas_admin.management.index(deleted=True)
+ for instance in mgmt_details:
+ if instance.id == instance_info.id:
+ asserts.assert_equal(instance.service_status, 'DELETED')
+ break
+ else:
+ asserts.fail("Could not find instance %s" % instance_info.id)
- @test(enabled=VOLUME_SUPPORT)
- @time_out(30)
- def delete_volume_error(self):
- self.delete_errored_instance(self.volume_error)
+ @test(depends_on=[test_instance_status_deleted_in_db])
+ def test_delete_datastore(self):
+ dbaas_admin = instance_info.dbaas_admin
- @test(enabled=False)
- @time_out(30)
- def delete_dns_error(self):
- self.delete_errored_instance(self.dns_error)
+ datastore = dbaas_admin.datastores.get(
+ CONFIG.dbaas_datastore_name_no_versions)
+ versions = dbaas_admin.datastore_versions.list(datastore.id)
+ for version in versions:
+ dbaas_admin.mgmt_datastore_versions.delete(version.id)
- @test
- @time_out(30)
- def delete_error_on_delete_instance(self):
- id = self.delete_error
- self.wait_for_instance_status(id, 'HEALTHY')
- self.wait_for_instance_task_status(id, 'No tasks for the instance.')
- instance = self.dbaas.management.show(id)
- asserts.assert_equal(instance.status, "HEALTHY")
- asserts.assert_equal(instance.task_description,
- 'No tasks for the instance.')
- # Try to delete the instance. This fails the first time due to how
- # the test fake is setup.
- self.delete_instance(id, assert_deleted=False)
- instance = self.dbaas.management.show(id)
- asserts.assert_equal(instance.status, "SHUTDOWN")
- asserts.assert_equal(instance.task_description,
- "Deleting the instance.")
- # Try a second time. This will succeed.
- self.delete_instance(id)
+ # Delete the datastore
+ dbaas_admin.datastores.delete(datastore.id)
diff --git a/trove/tests/api/instances_mysql_down.py b/trove/tests/api/instances_mysql_down.py
deleted file mode 100644
index 6d615870..00000000
--- a/trove/tests/api/instances_mysql_down.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Extra tests to create an instance, shut down MySQL, and delete it.
-"""
-
-import time
-import uuid
-
-from proboscis import asserts
-from proboscis import before_class
-from proboscis.decorators import time_out
-from proboscis import test
-from troveclient.compat import exceptions
-
-from trove.common.utils import poll_until
-from trove.tests.api.instances import EPHEMERAL_SUPPORT
-from trove.tests.api.instances import VOLUME_SUPPORT
-from trove.tests.config import CONFIG
-from trove.tests.util import create_client
-from trove.tests.util import test_config
-
-
-@test(groups=["dbaas.api.instances.down"])
-class TestBase(object):
- """Base class for instance-down tests."""
-
- @before_class
- def set_up(self):
- self.client = create_client(is_admin=False)
- self.mgmt_client = create_client(is_admin=True)
-
- if EPHEMERAL_SUPPORT:
- flavor_name = test_config.values.get('instance_eph_flavor_name',
- 'eph.rd-tiny')
- flavor2_name = test_config.values.get(
- 'instance_bigger_eph_flavor_name', 'eph.rd-smaller')
- else:
- flavor_name = test_config.values.get('instance_flavor_name',
- 'm1.tiny')
- flavor2_name = test_config.values.get(
- 'instance_bigger_flavor_name', 'm1.small')
-
- flavors = self.client.find_flavors_by_name(flavor_name)
- self.flavor_id = flavors[0].id
- self.name = "TEST_" + str(uuid.uuid4())
- # Get the resize to flavor.
- flavors2 = self.client.find_flavors_by_name(flavor2_name)
- self.new_flavor_id = flavors2[0].id
- asserts.assert_not_equal(self.flavor_id, self.new_flavor_id)
-
- def _wait_for_active(self):
- poll_until(lambda: self.client.instances.get(self.id),
- lambda instance: instance.status in CONFIG.running_status,
- time_out=(60 * 8))
-
- @test
- def create_instance(self):
- volume = None
- if VOLUME_SUPPORT:
- volume = {'size': 1}
- nics = None
- shared_network = CONFIG.get('shared_network', None)
- if shared_network:
- nics = [{'net-id': shared_network}]
- initial = self.client.instances.create(self.name, self.flavor_id,
- volume, [], [],
- nics=nics)
- self.id = initial.id
- self._wait_for_active()
-
- def _shutdown_instance(self):
- self.client.instances.get(self.id)
- self.mgmt_client.management.stop(self.id)
-
- @test(depends_on=[create_instance])
- def put_into_shutdown_state(self):
- self._shutdown_instance()
-
- @test(depends_on=[put_into_shutdown_state])
- @time_out(60 * 5)
- def resize_instance_in_shutdown_state(self):
- self.client.instances.resize_instance(self.id, self.new_flavor_id)
- self._wait_for_active()
-
- @test(depends_on=[create_instance],
- runs_after=[resize_instance_in_shutdown_state])
- def put_into_shutdown_state_2(self):
- self._shutdown_instance()
-
- @test(depends_on=[put_into_shutdown_state_2],
- enabled=VOLUME_SUPPORT)
- @time_out(60 * 5)
- def resize_volume_in_shutdown_state(self):
- self.client.instances.resize_volume(self.id, 2)
- poll_until(lambda: self.client.instances.get(self.id),
- lambda instance: instance.volume['size'] == 2,
- time_out=(60 * 8))
-
- @test(depends_on=[create_instance],
- runs_after=[resize_volume_in_shutdown_state])
- def put_into_shutdown_state_3(self):
- self._shutdown_instance()
-
- @test(depends_on=[create_instance],
- runs_after=[put_into_shutdown_state_3])
- @time_out(2 * 60)
- def delete_instances(self):
- instance = self.client.instances.get(self.id)
- instance.delete()
- while True:
- try:
- instance = self.client.instances.get(self.id)
- asserts.assert_equal("SHUTDOWN", instance.status)
- except exceptions.NotFound:
- break
- time.sleep(0.25)
diff --git a/trove/tests/api/mgmt/admin_required.py b/trove/tests/api/mgmt/admin_required.py
deleted file mode 100644
index 72b771d8..00000000
--- a/trove/tests/api/mgmt/admin_required.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from proboscis.asserts import assert_raises
-from proboscis import before_class
-from proboscis import test
-from troveclient.compat.exceptions import Unauthorized
-
-from trove import tests
-from trove.tests.util import create_dbaas_client
-from trove.tests.util import test_config
-from trove.tests.util.users import Requirements
-
-GROUP = "dbaas.api.mgmt.admin"
-
-
-@test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES],
- depends_on_groups=["services.initialize"])
-class TestAdminRequired(object):
- """
- These tests verify that admin privileges are checked
- when calling management level functions.
- """
-
- @before_class
- def setUp(self):
- """Create the user and client for use in the subsequent tests."""
- self.user = test_config.users.find_user(Requirements(is_admin=False))
- self.dbaas = create_dbaas_client(self.user)
-
- @test
- def test_mgmt_show(self):
- """
- A regular user may not view the management details
- of any instance.
- """
- assert_raises(Unauthorized, self.dbaas.management.show, 0)
-
- @test
- def test_mgmt_root_history(self):
- """
- A regular user may not view the root access history of
- any instance.
- """
- assert_raises(Unauthorized,
- self.dbaas.management.root_enabled_history, 0)
-
- @test
- def test_mgmt_instance_reboot(self):
- """A regular user may not perform an instance reboot."""
- assert_raises(Unauthorized, self.dbaas.management.reboot, 0)
-
- @test
- def test_mgmt_instance_reset_task_status(self):
- """A regular user may not perform an instance task status reset."""
- assert_raises(Unauthorized, self.dbaas.management.reset_task_status, 0)
-
- @test
- def test_diagnostics_get(self):
- """A regular user may not view the diagnostics."""
- assert_raises(Unauthorized, self.dbaas.diagnostics.get, 0)
-
- @test
- def test_hwinfo_get(self):
- """A regular user may not view the hardware info."""
- assert_raises(Unauthorized, self.dbaas.hwinfo.get, 0)
diff --git a/trove/tests/api/mgmt/configurations.py b/trove/tests/api/mgmt/configurations.py
index 700280df..50b968fe 100644
--- a/trove/tests/api/mgmt/configurations.py
+++ b/trove/tests/api/mgmt/configurations.py
@@ -26,8 +26,7 @@ from trove.tests.util.users import Requirements
GROUP = "dbaas.api.mgmt.configurations"
-@test(groups=[GROUP, tests.DBAAS_API, tests.PRE_INSTANCES],
- depends_on_groups=["services.initialize"])
+@test(groups=[GROUP, tests.DBAAS_API, tests.PRE_INSTANCES])
class ConfigGroupsSetupBeforeInstanceCreation(object):
@before_class
diff --git a/trove/tests/api/mgmt/datastore_versions.py b/trove/tests/api/mgmt/datastore_versions.py
index d26b3226..33dc77e3 100644
--- a/trove/tests/api/mgmt/datastore_versions.py
+++ b/trove/tests/api/mgmt/datastore_versions.py
@@ -22,6 +22,7 @@ from proboscis.check import Check
from proboscis import test
from troveclient.compat import exceptions
+from trove import tests
from trove.tests.config import CONFIG
from trove.tests.util import create_client
from trove.tests.util import create_dbaas_client
@@ -29,10 +30,9 @@ from trove.tests.util import create_glance_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
-GROUP = "dbaas.api.mgmt.ds_versions"
-
-@test(groups=[GROUP])
+@test(groups=[tests.DBAAS_API_MGMT_DATASTORES],
+ depends_on_groups=[tests.DBAAS_API_DATASTORES])
class MgmtDataStoreVersion(object):
"""Tests the mgmt datastore version methods."""
diff --git a/trove/tests/api/mgmt/instances.py b/trove/tests/api/mgmt/instances.py
deleted file mode 100644
index 05bdbb18..00000000
--- a/trove/tests/api/mgmt/instances.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from proboscis.asserts import assert_equal
-from proboscis.asserts import assert_raises
-from proboscis import before_class
-from proboscis.check import Check
-from proboscis import SkipTest
-from proboscis import test
-import six
-from troveclient.compat import exceptions
-
-from trove.common.utils import poll_until
-from trove.tests.api.instances import CreateInstance
-from trove.tests.api.instances import GROUP_START
-from trove.tests.api.instances import GROUP_TEST
-from trove.tests.api.instances import instance_info
-from trove.tests.config import CONFIG
-from trove.tests.util.check import CollectionCheck
-from trove.tests.util.check import TypeCheck
-from trove.tests.util import create_client
-from trove.tests.util import create_dbaas_client
-from trove.tests.util.users import Requirements
-
-
-GROUP = "dbaas.api.mgmt.instances"
-
-
-@test(groups=[GROUP])
-def mgmt_index_requires_admin_account():
- """Verify that an admin context is required to call this function."""
- client = create_client(is_admin=False)
- assert_raises(exceptions.Unauthorized, client.management.index)
-
-
-# These functions check some dictionaries in the returned response.
-def flavor_check(flavor):
- with CollectionCheck("flavor", flavor) as check:
- check.has_element("id", six.string_types)
- check.has_element("links", list)
-
-
-def datastore_check(datastore):
- with CollectionCheck("datastore", datastore) as check:
- check.has_element("type", six.string_types)
- check.has_element("version", six.string_types)
-
-
-def guest_status_check(guest_status):
- with CollectionCheck("guest_status", guest_status) as check:
- check.has_element("state_description", six.string_types)
-
-
-def volume_check(volume):
- with CollectionCheck("volume", volume) as check:
- check.has_element("id", six.string_types)
- check.has_element("size", int)
- check.has_element("used", float)
- check.has_element("total", float)
-
-
-@test(depends_on_groups=[GROUP_START], groups=[GROUP, GROUP_TEST])
-def mgmt_instance_get():
- """Tests the mgmt instances index method."""
- reqs = Requirements(is_admin=True)
- user = CONFIG.users.find_user(reqs)
- client = create_dbaas_client(user)
- mgmt = client.management
- # Grab the info.id created by the main instance test which is stored in
- # a global.
- id = instance_info.id
- api_instance = mgmt.show(id)
- datastore = getattr(api_instance, 'datastore')
- datastore_type = datastore.get('type')
-
- # Print out all fields for extra info if the test fails.
- for name in dir(api_instance):
- print(str(name) + "=" + str(getattr(api_instance, name)))
- with TypeCheck("instance", api_instance) as instance:
- instance.has_field('created', six.string_types)
- instance.has_field('deleted', bool)
- # If the instance hasn't been deleted, this should be false... but
- # lets avoid creating more ordering work.
- instance.has_field('deleted_at', (six.string_types, None))
- instance.has_field('flavor', dict, flavor_check)
- instance.has_field('datastore', dict, datastore_check)
- instance.has_field('guest_status', dict, guest_status_check)
- instance.has_field('id', six.string_types)
- instance.has_field('links', list)
- instance.has_field('name', six.string_types)
- # instance.has_field('server_status', six.string_types)
- instance.has_field('status', six.string_types)
- instance.has_field('tenant_id', six.string_types)
- instance.has_field('updated', six.string_types)
- # Can be None if no volume is given on this instance.
- volume_support = CONFIG.get(datastore_type, 'mysql')['volume_support']
- if volume_support:
- instance.has_field('volume', dict, volume_check)
- else:
- instance.has_field('volume', None)
- # TODO(tim-simpson): Validate additional fields, assert
- # no extra fields exist.
- if api_instance.server is not None:
- print("the real content of server: %s" % dir(api_instance.server))
- print("the type of server: %s" % type(api_instance.server))
- print("the real content of api_instance: %s" % dir(api_instance))
- print("the type of api_instance: %s" % type(api_instance))
- print(hasattr(api_instance, "server"))
-
- with CollectionCheck("server", api_instance.server) as server:
- server.has_element("addresses", dict)
- server.has_element("deleted", bool)
- server.has_element("deleted_at", (six.string_types, None))
- server.has_element("host", six.string_types)
- server.has_element("id", six.string_types)
- server.has_element("local_id", int)
- server.has_element("name", six.string_types)
- server.has_element("status", six.string_types)
- server.has_element("tenant_id", six.string_types)
-
- if (volume_support and
- CONFIG.trove_main_instance_has_volume):
- with CollectionCheck("volume", api_instance.volume) as volume:
- volume.has_element("attachments", list)
- volume.has_element("availability_zone", six.string_types)
- volume.has_element("created_at", (six.string_types, None))
- volume.has_element("id", six.string_types)
- volume.has_element("size", int)
- volume.has_element("status", six.string_types)
-
-
-@test(groups=["fake." + GROUP])
-class WhenMgmtInstanceGetIsCalledButServerIsNotReady(object):
-
- @before_class
- def set_up(self):
- """Create client for mgmt instance test (2)."""
- if not CONFIG.fake_mode:
- raise SkipTest("This test only works in fake mode.")
- self.client = create_client(is_admin=True)
- self.mgmt = self.client.management
- # Fake nova will fail a server ending with 'test_SERVER_ERROR'."
- # Fake volume will fail if the size is 13.
- # TODO(tim.simpson): This would be a lot nicer looking if we used a
- # traditional mock framework.
- datastore = {'type': 'mysql', 'version': '5.5'}
- body = {'datastore': datastore}
- vol_support = CONFIG.get(datastore['type'], 'mysql')['volume_support']
- if vol_support:
- body.update({'size': 13})
-
- shared_network = CONFIG.get('shared_network', None)
- if shared_network:
- nics = [{'net-id': shared_network}]
-
- response = self.client.instances.create(
- 'test_SERVER_ERROR',
- instance_info.dbaas_flavor_href,
- body,
- [], [],
- nics=nics
- )
-
- poll_until(lambda: self.client.instances.get(response.id),
- lambda instance: instance.status == 'ERROR',
- time_out=10)
- self.id = response.id
-
- @test
- def mgmt_instance_get(self):
- """Tests the mgmt get call works when the Nova server isn't ready."""
- api_instance = self.mgmt.show(self.id)
- # Print out all fields for extra info if the test fails.
- for name in dir(api_instance):
- print(str(name) + "=" + str(getattr(api_instance, name)))
- # Print out all fields for extra info if the test fails.
- for name in dir(api_instance):
- print(str(name) + "=" + str(getattr(api_instance, name)))
- with TypeCheck("instance", api_instance) as instance:
- instance.has_field('created', six.string_types)
- instance.has_field('deleted', bool)
- # If the instance hasn't been deleted, this should be false... but
- # lets avoid creating more ordering work.
- instance.has_field('deleted_at', (six.string_types, None))
- instance.has_field('flavor', dict, flavor_check)
- instance.has_field('datastore', dict, datastore_check)
- instance.has_field('guest_status', dict, guest_status_check)
- instance.has_field('id', six.string_types)
- instance.has_field('links', list)
- instance.has_field('name', six.string_types)
- # instance.has_field('server_status', six.string_types)
- instance.has_field('status', six.string_types)
- instance.has_field('tenant_id', six.string_types)
- instance.has_field('updated', six.string_types)
- # Can be None if no volume is given on this instance.
- instance.has_field('server', None)
- instance.has_field('volume', None)
- # TODO(tim-simpson): Validate additional fields,
- # assert no extra fields exist.
-
-
-@test(depends_on_classes=[CreateInstance], groups=[GROUP])
-class MgmtInstancesIndex(object):
- """Tests the mgmt instances index method."""
-
- @before_class
- def setUp(self):
- """Create client for mgmt instance test."""
- reqs = Requirements(is_admin=True)
- self.user = CONFIG.users.find_user(reqs)
- self.client = create_dbaas_client(self.user)
-
- @test
- def test_mgmt_instance_index_fields_present(self):
- """
- Verify that all the expected fields are returned by the index method.
- """
- expected_fields = [
- 'created',
- 'deleted',
- 'deleted_at',
- 'flavor',
- 'datastore',
- 'id',
- 'links',
- 'name',
- 'server',
- 'status',
- 'task_description',
- 'tenant_id',
- 'updated',
- 'region'
- ]
-
- if CONFIG.trove_volume_support:
- expected_fields.append('volume')
-
- index = self.client.management.index()
-
- if not hasattr(index, "deleted"):
- raise SkipTest("instance index must have a "
- "deleted label for this test")
-
- for instance in index:
- with Check() as check:
- for field in expected_fields:
- check.true(hasattr(instance, field),
- "Index lacks field %s" % field)
-
- @test
- def test_mgmt_instance_index_check_filter(self):
- """
- Make sure that the deleted= filter works as expected, and no instances
- are excluded.
- """
-
- if not hasattr(self.client.management.index, 'deleted'):
- raise SkipTest("instance index must have a deleted "
- "label for this test")
- instance_counts = []
- for deleted_filter in (True, False):
- filtered_index = self.client.management.index(
- deleted=deleted_filter)
- instance_counts.append(len(filtered_index))
- for instance in filtered_index:
- # Every instance listed here should have the proper value
- # for 'deleted'.
- assert_equal(deleted_filter, instance.deleted)
- full_index = self.client.management.index()
- # There should be no instances that are neither deleted or not-deleted.
- assert_equal(len(full_index), sum(instance_counts))
diff --git a/trove/tests/api/mgmt/malformed_json.py b/trove/tests/api/mgmt/malformed_json.py
deleted file mode 100644
index d1431b1a..00000000
--- a/trove/tests/api/mgmt/malformed_json.py
+++ /dev/null
@@ -1,345 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2013 Rackspace Hosting
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-from collections import deque
-import six
-
-from proboscis import after_class
-from proboscis import asserts
-from proboscis import before_class
-from proboscis import test
-
-from trove.common.utils import poll_until
-from trove.tests.api.instances import instance_info
-from trove.tests.api.instances import VOLUME_SUPPORT
-from trove.tests.config import CONFIG
-from trove.tests.util import assert_contains
-from trove.tests.util import create_dbaas_client
-from trove.tests.util.users import Requirements
-
-
-@test(groups=["dbaas.api.mgmt.malformed_json"])
-class MalformedJson(object):
- @before_class
- def setUp(self):
- self.reqs = Requirements(is_admin=False)
- self.user = CONFIG.users.find_user(self.reqs)
- self.dbaas = create_dbaas_client(self.user)
- volume = None
- if VOLUME_SUPPORT:
- volume = {"size": 1}
- shared_network = CONFIG.get('shared_network', None)
- if shared_network:
- nics = [{'net-id': shared_network}]
-
- self.instance = self.dbaas.instances.create(
- name="qe_instance",
- flavor_id=instance_info.dbaas_flavor_href,
- datastore=instance_info.dbaas_datastore,
- datastore_version=instance_info.dbaas_datastore_version,
- volume=volume,
- databases=[{"name": "firstdb", "character_set": "latin2",
- "collate": "latin2_general_ci"}],
- nics=nics
- )
-
- @after_class
- def tearDown(self):
- self.dbaas.instances.delete(self.instance)
-
- @test
- def test_bad_instance_data(self):
- databases = "foo"
- users = "bar"
- try:
- self.dbaas.instances.create("bad_instance", 3, 3,
- databases=databases, users=users)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Create instance failed with code %s,"
- " exception %s" % (httpCode, e))
- if six.PY3:
- databases = "'%s'" % databases
- users = "'%s'" % users
- else:
- databases = "u'%s'" % databases
- users = "u'%s'" % users
- assert_contains(
- str(e),
- ["Validation error:",
- "instance['databases'] %s is not of type 'array'" % databases,
- "instance['users'] %s is not of type 'array'" % users,
- "instance['volume'] 3 is not of type 'object'"])
-
- @test
- def test_bad_database_data(self):
- _bad_db_data = "{foo}"
- try:
- self.dbaas.databases.create(self.instance.id, _bad_db_data)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Create database failed with code %s, "
- "exception %s" % (httpCode, e))
- if six.PY3:
- _bad_db_data = "'%s'" % _bad_db_data
- else:
- _bad_db_data = "u'%s'" % _bad_db_data
- asserts.assert_equal(
- str(e),
- "Validation error: "
- "databases %s is not of type 'array' (HTTP 400)" %
- _bad_db_data)
-
- @test
- def test_bad_user_data(self):
-
- def format_path(values):
- values = list(values)
- msg = "%s%s" % (values[0],
- ''.join(['[%r]' % i for i in values[1:]]))
- return msg
-
- _user = []
- _user_name = "F343jasdf"
- _user.append({"name12": _user_name,
- "password12": "password"})
- try:
- self.dbaas.users.create(self.instance.id, _user)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Create user failed with code %s, "
- "exception %s" % (httpCode, e))
- err_1 = format_path(deque(('users', 0)))
- assert_contains(
- str(e),
- ["Validation error:",
- "%(err_1)s 'name' is a required property" % {'err_1': err_1},
- "%(err_1)s 'password' is a required property"
- % {'err_1': err_1}])
-
- @test
- def test_bad_resize_instance_data(self):
- def _check_instance_status():
- inst = self.dbaas.instances.get(self.instance)
- if inst.status in CONFIG.running_status:
- return True
- else:
- return False
-
- poll_until(_check_instance_status)
- try:
- self.dbaas.instances.resize_instance(self.instance.id, "")
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Resize instance failed with code %s, "
- "exception %s" % (httpCode, e))
-
- @test
- def test_bad_resize_vol_data(self):
- def _check_instance_status():
- inst = self.dbaas.instances.get(self.instance)
- if inst.status in CONFIG.running_status:
- return True
- else:
- return False
-
- poll_until(_check_instance_status)
- data = "bad data"
- try:
- self.dbaas.instances.resize_volume(self.instance.id, data)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Resize instance failed with code %s, "
- "exception %s" % (httpCode, e))
- if six.PY3:
- data = "'bad data'"
- else:
- data = "u'bad data'"
- assert_contains(
- str(e),
- ["Validation error:",
- "resize['volume']['size'] %s is not valid under "
- "any of the given schemas" % data,
- "%s is not of type 'integer'" % data,
- "%s does not match '^0*[1-9]+[0-9]*$'" % data])
-
- @test
- def test_bad_change_user_password(self):
- password = ""
- users = [{"name": password}]
-
- def _check_instance_status():
- inst = self.dbaas.instances.get(self.instance)
- if inst.status in CONFIG.running_status:
- return True
- else:
- return False
-
- poll_until(_check_instance_status)
- try:
- self.dbaas.users.change_passwords(self.instance, users)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Change usr/passwd failed with code %s, "
- "exception %s" % (httpCode, e))
- if six.PY3:
- password = "'%s'" % password
- else:
- password = "u'%s'" % password
- assert_contains(
- str(e),
- ["Validation error: users[0] 'password' "
- "is a required property",
- "users[0]['name'] %s is too short" % password,
- "users[0]['name'] %s does not match "
- "'^.*[0-9a-zA-Z]+.*$'" % password])
-
- @test
- def test_bad_grant_user_access(self):
- dbs = []
-
- def _check_instance_status():
- inst = self.dbaas.instances.get(self.instance)
- if inst.status in CONFIG.running_status:
- return True
- else:
- return False
-
- poll_until(_check_instance_status)
- try:
- self.dbaas.users.grant(self.instance, self.user, dbs)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Grant user access failed with code %s, "
- "exception %s" % (httpCode, e))
-
- @test
- def test_bad_revoke_user_access(self):
- db = ""
-
- def _check_instance_status():
- inst = self.dbaas.instances.get(self.instance)
- if inst.status in CONFIG.running_status:
- return True
- else:
- return False
-
- poll_until(_check_instance_status)
- try:
- self.dbaas.users.revoke(self.instance, self.user, db)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 404,
- "Revoke user access failed w/code %s, "
- "exception %s" % (httpCode, e))
- asserts.assert_equal(str(e), "The resource could not be found."
- " (HTTP 404)")
-
- @test
- def test_bad_body_flavorid_create_instance(self):
-
- flavorId = ["?"]
- try:
- self.dbaas.instances.create("test_instance",
- flavorId,
- 2)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Create instance failed with code %s, "
- "exception %s" % (httpCode, e))
- flavorId = [u'?']
- assert_contains(
- str(e),
- ["Validation error:",
- "instance['flavorRef'] %s is not valid "
- "under any of the given schemas" % flavorId,
- "%s is not of type 'string'" % flavorId,
- "%s is not of type 'string'" % flavorId,
- "%s is not of type 'integer'" % flavorId,
- "instance['volume'] 2 is not of type 'object'"])
-
- @test
- def test_bad_body_datastore_create_instance(self):
-
- datastore = "*"
- datastore_version = "*"
- try:
- self.dbaas.instances.create("test_instance",
- 3, {"size": 2},
- datastore=datastore,
- datastore_version=datastore_version)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Create instance failed with code %s, "
- "exception %s" % (httpCode, e))
- if six.PY3:
- datastore = "'%s'" % datastore
- datastore_version = "'%s'" % datastore_version
- else:
- datastore = "u'%s'" % datastore
- datastore_version = "u'%s'" % datastore_version
- assert_contains(
- str(e),
- ["Validation error:",
- "instance['datastore']['type']"
- " %s does not match"
- " '^.*[0-9a-zA-Z]+.*$'" % datastore,
- "instance['datastore']['version'] %s "
- "does not match '^.*[0-9a-zA-Z]+.*$'" % datastore_version])
-
- @test
- def test_bad_body_volsize_create_instance(self):
- volsize = "h3ll0"
- try:
- self.dbaas.instances.create("test_instance",
- "1",
- volsize)
- except Exception as e:
- resp, body = self.dbaas.client.last_response
- httpCode = resp.status
- asserts.assert_equal(httpCode, 400,
- "Create instance failed with code %s, "
- "exception %s" % (httpCode, e))
- if six.PY3:
- volsize = "'%s'" % volsize
- else:
- volsize = "u'%s'" % volsize
- print("DEBUG DEV: %s" % str(e))
- asserts.assert_equal(str(e),
- "Validation error: "
- "instance['volume'] %s is not of "
- "type 'object' (HTTP 400)" % volsize)
diff --git a/trove/tests/api/replication.py b/trove/tests/api/replication.py
index 149fb4db..02fc5b66 100644
--- a/trove/tests/api/replication.py
+++ b/trove/tests/api/replication.py
@@ -26,7 +26,6 @@ from troveclient.compat import exceptions
from trove.common.utils import generate_uuid
from trove.common.utils import poll_until
from trove import tests
-from trove.tests.api import configurations
from trove.tests.api.instances import CheckInstance
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
@@ -44,7 +43,6 @@ class SlaveInstanceTestInfo(object):
self.replicated_db = generate_uuid()
-REPLICATION_GROUP = "dbaas.api.replication"
slave_instance = SlaveInstanceTestInfo()
existing_db_on_master = generate_uuid()
backup_count = None
@@ -131,8 +129,8 @@ def validate_master(master, slaves):
assert_true(asserted_ids.issubset(master_ids))
-@test(depends_on_groups=[configurations.CONFIGURATION_GROUP],
- groups=[REPLICATION_GROUP, tests.INSTANCES],
+@test(depends_on_groups=[tests.DBAAS_API_CONFIGURATIONS],
+ groups=[tests.DBAAS_API_REPLICATION],
enabled=CONFIG.swift_enabled)
class CreateReplicationSlave(object):
@@ -151,6 +149,7 @@ class CreateReplicationSlave(object):
@test
def test_create_db_on_master(self):
+ """test_create_db_on_master"""
databases = [{'name': existing_db_on_master}]
# Ensure that the auth_token in the dbaas client is not stale
instance_info.dbaas.authenticate()
@@ -159,27 +158,29 @@ class CreateReplicationSlave(object):
@test(runs_after=['test_create_db_on_master'])
def test_create_slave(self):
+ """test_create_slave"""
global backup_count
backup_count = len(
instance_info.dbaas.instances.backups(instance_info.id))
slave_instance.id = create_slave()
-@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
+@test(groups=[tests.DBAAS_API_REPLICATION],
enabled=CONFIG.swift_enabled,
- depends_on=[CreateReplicationSlave])
+ depends_on_classes=[CreateReplicationSlave])
class WaitForCreateSlaveToFinish(object):
"""Wait until the instance is created and set up as slave."""
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_slave_created(self):
+ """Wait for replica to be created."""
poll_until(lambda: instance_is_active(slave_instance.id))
@test(enabled=(not CONFIG.fake_mode and CONFIG.swift_enabled),
- depends_on=[WaitForCreateSlaveToFinish],
- groups=[REPLICATION_GROUP, tests.INSTANCES])
+ depends_on_classes=[WaitForCreateSlaveToFinish],
+ groups=[tests.DBAAS_API_REPLICATION])
class VerifySlave(object):
def db_is_found(self, database_to_find):
@@ -194,15 +195,18 @@ class VerifySlave(object):
@test
@time_out(20 * 60)
def test_correctly_started_replication(self):
+ """test_correctly_started_replication"""
poll_until(slave_is_running())
@test(runs_after=[test_correctly_started_replication])
@time_out(60)
def test_backup_deleted(self):
+ """test_backup_deleted"""
poll_until(backup_count_matches(backup_count))
@test(depends_on=[test_correctly_started_replication])
def test_slave_is_read_only(self):
+ """test_slave_is_read_only"""
cmd = "mysql -BNq -e \\\'select @@read_only\\\'"
server = create_server_connection(slave_instance.id)
@@ -217,6 +221,7 @@ class VerifySlave(object):
@test(depends_on=[test_slave_is_read_only])
def test_create_db_on_master(self):
+ """test_create_db_on_master"""
databases = [{'name': slave_instance.replicated_db}]
instance_info.dbaas.databases.create(instance_info.id, databases)
assert_equal(202, instance_info.dbaas.last_http_code)
@@ -224,38 +229,41 @@ class VerifySlave(object):
@test(depends_on=[test_create_db_on_master])
@time_out(5 * 60)
def test_database_replicated_on_slave(self):
+ """test_database_replicated_on_slave"""
poll_until(self.db_is_found(slave_instance.replicated_db))
@test(runs_after=[test_database_replicated_on_slave])
@time_out(5 * 60)
def test_existing_db_exists_on_slave(self):
+ """test_existing_db_exists_on_slave"""
poll_until(self.db_is_found(existing_db_on_master))
@test(depends_on=[test_existing_db_exists_on_slave])
def test_slave_user_exists(self):
+ """test_slave_user_exists"""
assert_equal(_get_user_count(slave_instance), 1)
assert_equal(_get_user_count(instance_info), 1)
-@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
- depends_on=[WaitForCreateSlaveToFinish],
- runs_after=[VerifySlave],
+@test(groups=[tests.DBAAS_API_REPLICATION],
+ depends_on_classes=[VerifySlave],
enabled=CONFIG.swift_enabled)
class TestInstanceListing(object):
"""Test replication information in instance listing."""
@test
def test_get_slave_instance(self):
+ """test_get_slave_instance"""
validate_slave(instance_info, slave_instance)
@test
def test_get_master_instance(self):
+ """test_get_master_instance"""
validate_master(instance_info, [slave_instance])
-@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
- depends_on=[WaitForCreateSlaveToFinish],
- runs_after=[TestInstanceListing],
+@test(groups=[tests.DBAAS_API_REPLICATION],
+ depends_on_classes=[TestInstanceListing],
enabled=CONFIG.swift_enabled)
class TestReplicationFailover(object):
"""Test replication failover functionality."""
@@ -303,14 +311,17 @@ class TestReplicationFailover(object):
@test(depends_on=[test_promote_master, test_eject_slave,
test_eject_valid_master])
def test_promote_to_replica_source(self):
+ """test_promote_to_replica_source"""
TestReplicationFailover.promote(instance_info, slave_instance)
@test(depends_on=[test_promote_to_replica_source])
def test_promote_back_to_replica_source(self):
+ """test_promote_back_to_replica_source"""
TestReplicationFailover.promote(slave_instance, instance_info)
@test(depends_on=[test_promote_back_to_replica_source], enabled=False)
def add_second_slave(self):
+ """add_second_slave"""
if CONFIG.fake_mode:
raise SkipTest("three site promote not supported in fake mode")
@@ -324,6 +335,7 @@ class TestReplicationFailover(object):
@test(depends_on=[add_second_slave], enabled=False)
def test_three_site_promote(self):
+ """Promote the second slave"""
if CONFIG.fake_mode:
raise SkipTest("three site promote not supported in fake mode")
@@ -333,6 +345,7 @@ class TestReplicationFailover(object):
@test(depends_on=[test_three_site_promote], enabled=False)
def disable_master(self):
+ """Stop trove-guestagent on master"""
if CONFIG.fake_mode:
raise SkipTest("eject_replica_source not supported in fake mode")
@@ -361,9 +374,8 @@ class TestReplicationFailover(object):
validate_slave(instance_info, slave_instance)
-@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
- depends_on=[WaitForCreateSlaveToFinish],
- runs_after=[TestReplicationFailover],
+@test(groups=[tests.DBAAS_API_REPLICATION],
+ depends_on=[TestReplicationFailover],
enabled=CONFIG.swift_enabled)
class DetachReplica(object):
@@ -376,6 +388,7 @@ class DetachReplica(object):
@test
@time_out(5 * 60)
def test_detach_replica(self):
+ """test_detach_replica"""
if CONFIG.fake_mode:
raise SkipTest("Detach replica not supported in fake mode")
@@ -388,6 +401,7 @@ class DetachReplica(object):
@test(depends_on=[test_detach_replica])
@time_out(5 * 60)
def test_slave_is_not_read_only(self):
+ """test_slave_is_not_read_only"""
if CONFIG.fake_mode:
raise SkipTest("Test not_read_only not supported in fake mode")
@@ -407,15 +421,15 @@ class DetachReplica(object):
poll_until(check_not_read_only)
-@test(groups=[REPLICATION_GROUP, tests.INSTANCES],
- depends_on=[WaitForCreateSlaveToFinish],
- runs_after=[DetachReplica],
+@test(groups=[tests.DBAAS_API_REPLICATION],
+ depends_on=[DetachReplica],
enabled=CONFIG.swift_enabled)
class DeleteSlaveInstance(object):
@test
@time_out(TIMEOUT_INSTANCE_DELETE)
def test_delete_slave_instance(self):
+ """test_delete_slave_instance"""
instance_info.dbaas.instances.delete(slave_instance.id)
assert_equal(202, instance_info.dbaas.last_http_code)
diff --git a/trove/tests/api/root.py b/trove/tests/api/root.py
index 5f4a5626..89b521ef 100644
--- a/trove/tests/api/root.py
+++ b/trove/tests/api/root.py
@@ -22,99 +22,22 @@ from proboscis.asserts import assert_true
from proboscis import test
from troveclient.compat import exceptions
-from trove.common import utils
from trove import tests
from trove.tests.api import instances
-from trove.tests.config import CONFIG
-from trove.tests import util
from trove.tests.util import test_config
-from trove.tests.util import users as users_util
-GROUP = "dbaas.api.root"
-
-@test(groups=[tests.DBAAS_API, GROUP, tests.INSTANCES])
+@test(groups=[tests.DBAAS_API_USERS_ROOT],
+ depends_on_groups=[tests.DBAAS_API_INSTANCES])
class TestRoot(object):
- """
- Test the root operations
- """
-
root_enabled_timestamp = 'Never'
- system_users = ['root', 'debian_sys_maint']
@proboscis.before_class
def setUp(self):
- self.info = instances.InstanceTestInfo()
-
- reqs = users_util.Requirements(is_admin=True)
- self.info.admin_user = CONFIG.users.find_user(reqs)
- self.info.dbaas_admin = self.dbaas_admin = util.create_dbaas_client(
- self.info.admin_user
- )
- reqs = users_util.Requirements(is_admin=False)
- self.info.user = CONFIG.users.find_user(reqs)
- self.info.dbaas = self.dbaas = util.create_dbaas_client(self.info.user)
-
- self.info.name = "TEST_%s" % self.__class__.__name__
-
- flavor, flavor_href = self.info.find_default_flavor()
- self.info.dbaas_flavor = flavor
- self.info.dbaas_flavor_href = flavor_href
-
- databases = []
- databases.append({"name": "firstdb", "character_set": "latin2",
- "collate": "latin2_general_ci"})
- databases.append({"name": "db2"})
- self.info.databases = databases
-
- users = []
- users.append({"name": "lite", "password": "litepass",
- "databases": [{"name": "firstdb"}]})
- self.info.users = users
-
- self.info.dbaas_datastore = CONFIG.dbaas_datastore
- self.info.dbaas_datastore_version = CONFIG.dbaas_datastore_version
- self.info.volume = {'size': CONFIG.get('trove_volume_size', 2)}
-
- self.info.initial_result = self.dbaas.instances.create(
- self.info.name,
- self.info.dbaas_flavor_href,
- self.info.volume,
- databases,
- users,
- nics=self.info.nics,
- availability_zone="nova",
- datastore=self.info.dbaas_datastore,
- datastore_version=self.info.dbaas_datastore_version
- )
-
- assert_equal(200, self.dbaas.last_http_code)
-
- self.id = self.info.initial_result.id
-
- def result_is_active():
- instance = self.dbaas.instances.get(self.id)
- if instance.status in CONFIG.running_status:
- return True
- else:
- # If its not ACTIVE, anything but BUILD must be
- # an error.
- assert_equal("BUILD", instance.status)
- return False
-
- utils.poll_until(result_is_active)
-
- @proboscis.after_class
- def tearDown(self):
- self.dbaas.instances.delete(self.id)
-
- def _is_delete():
- try:
- self.dbaas.instances.get(self.id)
- except exceptions.NotFound:
- return True
-
- utils.poll_until(_is_delete)
+ # Reuse the instance created previously.
+ self.id = instances.instance_info.id
+ self.dbaas = instances.instance_info.dbaas
+ self.dbaas_admin = instances.instance_info.dbaas_admin
def _verify_root_timestamp(self, id):
reh = self.dbaas_admin.management.root_enabled_history(id)
diff --git a/trove/tests/api/root_on_create.py b/trove/tests/api/root_on_create.py
deleted file mode 100644
index 6011cf66..00000000
--- a/trove/tests/api/root_on_create.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from proboscis import after_class
-from proboscis.asserts import assert_equal
-from proboscis.asserts import assert_not_equal
-from proboscis.asserts import assert_true
-from proboscis import before_class
-from proboscis import test
-
-from trove.common import cfg
-from trove.common.utils import poll_until
-from trove import tests
-from trove.tests.api.databases import TestMysqlAccess
-from trove.tests.api.instances import instance_info
-from trove.tests.api.users import TestUsers
-from trove.tests.config import CONFIG
-from trove.tests import util
-
-CONF = cfg.CONF
-GROUP = "dbaas.api.root.oncreate"
-
-
-@test(depends_on_classes=[TestMysqlAccess],
- runs_after=[TestUsers],
- groups=[tests.DBAAS_API, GROUP, tests.INSTANCES])
-class TestRootOnCreate(object):
- """
- Test 'CONF.root_on_create', which if True, creates the root user upon
- database instance initialization.
- """
-
- root_enabled_timestamp = 'Never'
- instance_id = None
-
- def create_instance(self):
- result = self.dbaas.instances.create(
- instance_info.name,
- instance_info.dbaas_flavor_href,
- instance_info.volume,
- instance_info.databases,
- instance_info.users,
- nics=instance_info.nics,
- availability_zone="nova",
- datastore=instance_info.dbaas_datastore,
- datastore_version=instance_info.dbaas_datastore_version)
- assert_equal(200, self.dbaas.last_http_code)
- new_id = result.id
-
- def result_is_active():
- instance = self.dbaas.instances.get(new_id)
- if instance.status in CONFIG.running_status:
- return True
- else:
- assert_equal("BUILD", instance.status)
- poll_until(result_is_active)
- if 'password' in result._info:
- self.dbaas.root.create(new_id)
- return new_id
-
- @before_class
- def setUp(self):
- self.orig_conf_value = CONF.get(
- instance_info.dbaas_datastore).root_on_create
- CONF.get(instance_info.dbaas_datastore).root_on_create = True
- self.dbaas = util.create_dbaas_client(instance_info.user)
- self.dbaas_admin = util.create_dbaas_client(instance_info.admin_user)
- self.history = self.dbaas_admin.management.root_enabled_history
- self.enabled = self.dbaas.root.is_root_enabled
- self.instance_id = self.create_instance()
-
- @after_class
- def tearDown(self):
- CONF.get(instance_info.
- dbaas_datastore).root_on_create = self.orig_conf_value
- instance = self.dbaas.instances.get(self.instance_id)
- instance.delete()
-
- @test
- def test_root_on_create(self):
- """Test that root is enabled after instance creation."""
- enabled = self.enabled(self.instance_id).rootEnabled
- assert_equal(200, self.dbaas.last_http_code)
- assert_true(enabled)
-
- @test(depends_on=[test_root_on_create])
- def test_history_after_root_on_create(self):
- """Test that the timestamp in the root enabled history is set."""
- self.root_enabled_timestamp = self.history(self.instance_id).enabled
- assert_equal(200, self.dbaas.last_http_code)
- assert_not_equal(self.root_enabled_timestamp, 'Never')
-
- @test(depends_on=[test_history_after_root_on_create])
- def test_reset_root(self):
- """Test that root reset does not alter the timestamp."""
- orig_timestamp = self.root_enabled_timestamp
- self.dbaas.root.create(self.instance_id)
- assert_equal(200, self.dbaas.last_http_code)
- self.root_enabled_timestamp = self.history(self.instance_id).enabled
- assert_equal(200, self.dbaas.last_http_code)
- assert_equal(orig_timestamp, self.root_enabled_timestamp)
-
- @test(depends_on=[test_reset_root])
- def test_root_still_enabled(self):
- """Test that after root was reset, it's still enabled."""
- enabled = self.enabled(self.instance_id).rootEnabled
- assert_equal(200, self.dbaas.last_http_code)
- assert_true(enabled)
-
- @test(depends_on=[test_root_still_enabled])
- def test_root_disable(self):
- """
- After root disable ensure the history enabled flag
- is still enabled.
- """
- self.dbaas.root.delete(self.instance_id)
- assert_equal(204, self.dbaas.last_http_code)
-
- enabled = self.enabled(self.instance_id).rootEnabled
- assert_equal(200, self.dbaas.last_http_code)
- assert_true(enabled)
diff --git a/trove/tests/api/user_access.py b/trove/tests/api/user_access.py
index 3f26b31e..1d8f55ab 100644
--- a/trove/tests/api/user_access.py
+++ b/trove/tests/api/user_access.py
@@ -22,14 +22,9 @@ from troveclient.compat import exceptions
from trove import tests
from trove.tests.api.instances import instance_info
-from trove.tests.api.users import TestUsers
from trove.tests import util
from trove.tests.util import test_config
-GROUP = "dbaas.api.useraccess"
-GROUP_POSITIVE = GROUP + ".positive"
-GROUP_NEGATIVE = GROUP + ".negative"
-
FAKE = test_config.values['fake_mode']
@@ -123,14 +118,10 @@ class UserAccessBase(object):
self._test_access(self.users, [])
-@test(depends_on_classes=[TestUsers],
- groups=[tests.DBAAS_API, GROUP, tests.INSTANCES],
- runs_after=[TestUsers])
+@test(depends_on_groups=[tests.DBAAS_API_USERS],
+ groups=[tests.DBAAS_API_USERS_ACCESS])
class TestUserAccessPasswordChange(UserAccessBase):
- """
- Test that change_password works.
- """
-
+ """Test that change_password works."""
@before_class
def setUp(self):
super(TestUserAccessPasswordChange, self).set_up()
@@ -227,13 +218,10 @@ class TestUserAccessPasswordChange(UserAccessBase):
self.dbaas.users.delete(instance_info.id, username)
-@test(depends_on_classes=[TestUsers],
- groups=[tests.DBAAS_API, GROUP, GROUP_POSITIVE, tests.INSTANCES],
- runs_after=[TestUsers])
+@test(depends_on_classes=[TestUserAccessPasswordChange],
+ groups=[tests.DBAAS_API_USERS_ACCESS])
class TestUserAccessPositive(UserAccessBase):
- """
- Test the creation and deletion of user grants.
- """
+ """Test the creation and deletion of user grants."""
@before_class
def setUp(self):
@@ -365,13 +353,9 @@ class TestUserAccessPositive(UserAccessBase):
@test(depends_on_classes=[TestUserAccessPositive],
- groups=[tests.DBAAS_API, GROUP, GROUP_NEGATIVE, tests.INSTANCES],
- depends_on=[TestUserAccessPositive])
+ groups=[tests.DBAAS_API_USERS_ACCESS])
class TestUserAccessNegative(UserAccessBase):
- """
- Negative tests for the creation and deletion of user grants.
- """
-
+ """Negative tests for the creation and deletion of user grants."""
@before_class
def setUp(self):
super(TestUserAccessNegative, self).set_up()
diff --git a/trove/tests/api/users.py b/trove/tests/api/users.py
index a141d6a5..13c4835f 100644
--- a/trove/tests/api/users.py
+++ b/trove/tests/api/users.py
@@ -27,20 +27,33 @@ from proboscis import test
from troveclient.compat import exceptions
from trove import tests
-from trove.tests.api.databases import TestDatabases
-from trove.tests.api.databases import TestMysqlAccess
from trove.tests.api.instances import instance_info
from trove.tests import util
from trove.tests.util import test_config
-
-GROUP = "dbaas.api.users"
FAKE = test_config.values['fake_mode']
-@test(depends_on_classes=[TestMysqlAccess],
- groups=[tests.DBAAS_API, GROUP, tests.INSTANCES],
- runs_after=[TestDatabases])
+@test(depends_on_groups=[tests.DBAAS_API_USERS_ROOT],
+ groups=[tests.DBAAS_API_USERS],
+ enabled=not test_config.values['fake_mode'])
+class TestMysqlAccessNegative(object):
+ """Make sure that MySQL server was secured."""
+ @test
+ def test_mysql_admin(self):
+ """Ensure we aren't allowed access with os_admin and wrong password."""
+ util.mysql_connection().assert_fails(
+ instance_info.get_address(), "os_admin", "asdfd-asdf234")
+
+ @test
+ def test_mysql_root(self):
+ """Ensure we aren't allowed access with root and wrong password."""
+ util.mysql_connection().assert_fails(
+ instance_info.get_address(), "root", "dsfgnear")
+
+
+@test(depends_on_classes=[TestMysqlAccessNegative],
+ groups=[tests.DBAAS_API_USERS])
class TestUsers(object):
"""
Test the creation and deletion of users
@@ -367,25 +380,6 @@ class TestUsers(object):
instance_info.id, users)
assert_equal(400, self.dbaas.last_http_code)
- @test(enabled=False)
- # TODO(hub_cap): Make this test work once python-routes is updated,
- # if ever.
- def test_delete_user_with_period_in_name(self):
- """Attempt to create/destroy a user with a period in its name."""
- users = []
- username_with_period = "user.name"
- users.append({"name": username_with_period, "password": self.password,
- "databases": [{"name": self.db1}]})
- self.dbaas.users.create(instance_info.id, users)
- assert_equal(202, self.dbaas.last_http_code)
- if not FAKE:
- time.sleep(5)
-
- self.check_database_for_user(username_with_period, self.password,
- [self.db1])
- self.dbaas.users.delete(instance_info.id, username_with_period)
- assert_equal(202, self.dbaas.last_http_code)
-
@test
def test_invalid_password(self):
users = [{"name": "anouser", "password": "sdf,;",
diff --git a/trove/tests/api/versions.py b/trove/tests/api/versions.py
index 0d1a2d1d..6ef0107b 100644
--- a/trove/tests/api/versions.py
+++ b/trove/tests/api/versions.py
@@ -23,11 +23,8 @@ from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.users import Requirements
-GROUP = "dbaas.api.versions"
-
-@test(groups=[tests.DBAAS_API, GROUP, tests.PRE_INSTANCES, 'DBAAS_VERSIONS'],
- depends_on_groups=["services.initialize"])
+@test(groups=[tests.DBAAS_API_VERSIONS])
class Versions(object):
"""Test listing all versions and verify the current version."""
@@ -39,6 +36,7 @@ class Versions(object):
@test
def test_list_versions_index(self):
+ """test_list_versions_index"""
versions = self.client.versions.index(test_config.version_url)
assert_equal(1, len(versions))
assert_equal("CURRENT", versions[0].status,
diff --git a/trove/tests/config.py b/trove/tests/config.py
index 9ff1895f..85181075 100644
--- a/trove/tests/config.py
+++ b/trove/tests/config.py
@@ -78,7 +78,6 @@ class TestConfig(object):
'dbaas_datastore_version': "5.5",
'dbaas_datastore_version_id': "b00000b0-00b0-0b00-00b0-"
"000b000000bb",
- 'dbaas_inactive_datastore_version': "mysql_inactive_version",
'instance_create_time': 16 * 60,
'mysql_connection_method': {"type": "direct"},
'typical_nova_image_name': None,
diff --git a/trove/tests/int_tests.py b/trove/tests/int_tests.py
index c386fafd..848cdd4f 100644
--- a/trove/tests/int_tests.py
+++ b/trove/tests/int_tests.py
@@ -14,20 +14,8 @@
# under the License.
import proboscis
-from trove.tests.api import backups
-from trove.tests.api import configurations
-from trove.tests.api import databases
-from trove.tests.api import datastores
-from trove.tests.api import instances
-from trove.tests.api import instances_actions
-from trove.tests.api.mgmt import admin_required
-from trove.tests.api.mgmt import datastore_versions
-from trove.tests.api.mgmt import instances as mgmt_instances
-from trove.tests.api import replication
-from trove.tests.api import root
-from trove.tests.api import user_access
-from trove.tests.api import users
-from trove.tests.api import versions
+
+from trove import tests
from trove.tests.scenario import groups
from trove.tests.scenario.groups import backup_group
from trove.tests.scenario.groups import cluster_group
@@ -41,16 +29,11 @@ from trove.tests.scenario.groups import instance_error_create_group
from trove.tests.scenario.groups import instance_force_delete_group
from trove.tests.scenario.groups import instance_upgrade_group
from trove.tests.scenario.groups import module_group
-from trove.tests.scenario.groups import negative_cluster_actions_group
from trove.tests.scenario.groups import replication_group
from trove.tests.scenario.groups import root_actions_group
from trove.tests.scenario.groups import user_actions_group
-GROUP_SERVICES_INITIALIZE = "services.initialize"
-GROUP_SETUP = 'dbaas.setup'
-
-
def build_group(*groups):
def merge(collection, *items):
for item in items:
@@ -77,63 +60,14 @@ def register(group_names, *test_groups, **kwargs):
proboscis.register(groups=build_group(group_names),
depends_on_groups=build_group(*test_groups))
# Now register the same groups with '-' instead of '_'
- proboscis.register(groups=build_group(
- [name.replace('_', '-') for name in group_names]),
- depends_on_groups=build_group(*test_groups))
+ proboscis.register(
+ groups=build_group([name.replace('_', '-') for name in group_names]),
+ depends_on_groups=build_group(*test_groups))
-black_box_groups = [
- users.GROUP,
- user_access.GROUP,
- databases.GROUP,
- root.GROUP,
- GROUP_SERVICES_INITIALIZE,
- instances.GROUP_START,
- instances.GROUP_QUOTAS,
- backups.BACKUP_GROUP,
- replication.REPLICATION_GROUP,
- configurations.CONFIGURATION_GROUP,
- instances_actions.GROUP_RESIZE,
- instances_actions.GROUP_STOP_MYSQL,
- instances.GROUP_STOP,
- instances.GROUP_GUEST,
- versions.GROUP,
- datastores.GROUP,
- datastore_versions.GROUP,
- # TODO(SlickNik): The restart tests fail intermittently so pulling
- # them out of the blackbox group temporarily. Refer to Trove bug:
- # https://bugs.launchpad.net/trove/+bug/1204233
- # instances_actions.GROUP_RESTART,
-]
-proboscis.register(groups=["blackbox", "mysql"],
- depends_on_groups=black_box_groups)
-
-simple_black_box_groups = [
- GROUP_SERVICES_INITIALIZE,
- versions.GROUP,
- instances.GROUP_START_SIMPLE,
- admin_required.GROUP,
- datastore_versions.GROUP,
-]
-proboscis.register(groups=["simple_blackbox"],
- depends_on_groups=simple_black_box_groups)
-
-black_box_mgmt_groups = [
- instances_actions.GROUP_REBOOT,
- admin_required.GROUP,
- mgmt_instances.GROUP,
- datastore_versions.GROUP,
-]
-proboscis.register(groups=["blackbox_mgmt"],
- depends_on_groups=black_box_mgmt_groups)
-#
-# Group designations for datastore agnostic int-tests
-#
# Base groups for all other groups
base_groups = [
- GROUP_SERVICES_INITIALIZE,
- versions.GROUP,
- GROUP_SETUP
+ tests.DBAAS_API_VERSIONS,
]
# Cluster-based groups
@@ -143,8 +77,6 @@ cluster_create_groups.extend([groups.CLUSTER_DELETE_WAIT])
cluster_actions_groups = list(cluster_create_groups)
cluster_actions_groups.extend([groups.CLUSTER_ACTIONS_SHRINK_WAIT])
-cluster_negative_actions_groups = list(negative_cluster_actions_group.GROUP)
-
cluster_root_groups = list(cluster_create_groups)
cluster_root_groups.extend([groups.CLUSTER_ACTIONS_ROOT_ENABLE])
@@ -239,42 +171,60 @@ common_groups = list(instance_create_groups)
# of no use case.
common_groups.extend([guest_log_groups, instance_init_groups])
-# Register: Component based groups
-register(["backup"], backup_groups)
-register(["backup_incremental"], backup_incremental_groups)
-register(["backup_negative"], backup_negative_groups)
-register(["cluster"], cluster_actions_groups)
-register(["cluster_actions"], cluster_actions_groups)
-register(["cluster_create"], cluster_create_groups)
-register(["cluster_negative_actions"], cluster_negative_actions_groups)
-register(["cluster_restart"], cluster_restart_groups)
-register(["cluster_root"], cluster_root_groups)
-register(["cluster_root_actions"], cluster_root_actions_groups)
-register(["cluster_upgrade"], cluster_upgrade_groups)
-register(["cluster_config"], cluster_config_groups)
-register(["cluster_config_actions"], cluster_config_actions_groups)
-register(["common"], common_groups)
-register(["configuration"], configuration_groups)
-register(["configuration_create"], configuration_create_groups)
-register(["database"], database_actions_groups)
-register(["guest_log"], guest_log_groups)
-register(["instance"], instance_groups)
-register(["instance_actions"], instance_actions_groups)
-register(["instance_create"], instance_create_groups)
-register(["instance_error"], instance_error_create_groups)
-register(["instance_force_delete"], instance_force_delete_groups)
-register(["instance_init"], instance_init_groups)
-register(["instance_upgrade"], instance_upgrade_groups)
-register(["module"], module_groups)
-register(["module_create"], module_create_groups)
-register(["replication"], replication_groups)
-register(["replication_promote"], replication_promote_groups)
-register(["root"], root_actions_groups)
-register(["user"], user_actions_groups)
-
-# Register: Datastore based groups
-# These should contain all functionality currently supported by the datastore.
-# Keeping them in alphabetical order may reduce the number of merge conflicts.
+integration_groups = [
+ tests.DBAAS_API_VERSIONS,
+ tests.DBAAS_API_DATASTORES,
+ tests.DBAAS_API_MGMT_DATASTORES,
+ tests.DBAAS_API_INSTANCES,
+ tests.DBAAS_API_USERS_ROOT,
+ tests.DBAAS_API_USERS,
+ tests.DBAAS_API_USERS_ACCESS,
+ tests.DBAAS_API_DATABASES,
+ tests.DBAAS_API_INSTANCE_ACTIONS,
+ tests.DBAAS_API_BACKUPS,
+ tests.DBAAS_API_CONFIGURATIONS,
+ tests.DBAAS_API_REPLICATION,
+ tests.DBAAS_API_INSTANCES_DELETE
+]
+# We intentionally make the functional tests running in series and dependent
+# on each other, so that one test case failure will stop the whole testing.
+proboscis.register(groups=["mysql"],
+ depends_on_groups=integration_groups)
+
+register(
+ ["mysql_supported"],
+ single=[instance_create_group.GROUP,
+ backup_group.GROUP,
+ configuration_group.GROUP,
+ database_actions_group.GROUP,
+ guest_log_group.GROUP,
+ instance_actions_group.GROUP,
+ instance_error_create_group.GROUP,
+ instance_force_delete_group.GROUP,
+ root_actions_group.GROUP,
+ user_actions_group.GROUP,
+ instance_delete_group.GROUP],
+ multi=[replication_group.GROUP,
+ instance_delete_group.GROUP]
+)
+
+register(
+ ["mariadb_supported"],
+ single=[instance_create_group.GROUP,
+ backup_group.GROUP,
+ configuration_group.GROUP,
+ database_actions_group.GROUP,
+ guest_log_group.GROUP,
+ instance_actions_group.GROUP,
+ instance_error_create_group.GROUP,
+ instance_force_delete_group.GROUP,
+ root_actions_group.GROUP,
+ user_actions_group.GROUP,
+ instance_delete_group.GROUP],
+ multi=[replication_group.GROUP,
+ instance_delete_group.GROUP]
+)
+
register(
["db2_supported"],
single=[common_groups,
@@ -292,7 +242,6 @@ register(
configuration_groups,
user_actions_groups, ],
multi=[cluster_actions_groups,
- cluster_negative_actions_groups,
cluster_root_actions_groups,
cluster_config_actions_groups, ]
)
@@ -316,22 +265,6 @@ register(
)
register(
- ["mariadb_supported"],
- single=[common_groups,
- backup_groups,
- backup_incremental_groups,
- configuration_groups,
- database_actions_groups,
- root_actions_groups,
- user_actions_groups, ],
- multi=[replication_promote_groups, ]
- # multi=[cluster_actions_groups,
- # cluster_negative_actions_groups,
- # cluster_root_actions_groups,
- # replication_promote_groups, ]
-)
-
-register(
["mongodb_supported"],
single=[common_groups,
backup_groups,
@@ -343,19 +276,6 @@ register(
)
register(
- ["mysql_supported"],
- single=[common_groups,
- backup_incremental_groups,
- configuration_groups,
- database_actions_groups,
- instance_groups,
- instance_upgrade_groups,
- root_actions_groups,
- user_actions_groups, ],
- multi=[replication_promote_groups, ]
-)
-
-register(
["percona_supported"],
single=[common_groups,
backup_incremental_groups,
@@ -388,7 +308,6 @@ register(
user_actions_groups, ],
multi=[]
# multi=[cluster_actions_groups,
- # cluster_negative_actions_groups,
# cluster_root_actions_groups, ]
)
@@ -406,7 +325,6 @@ register(
redis_root_actions_groups, ],
multi=[replication_promote_groups, ]
# multi=[cluster_actions_groups,
- # cluster_negative_actions_groups,
# replication_promote_groups, ]
)
@@ -416,6 +334,5 @@ register(
configuration_groups,
root_actions_groups, ],
multi=[cluster_actions_groups,
- cluster_negative_actions_groups,
cluster_root_actions_groups, ]
)
diff --git a/trove/tests/scenario/groups/__init__.py b/trove/tests/scenario/groups/__init__.py
index e6d1a822..a5c2db06 100644
--- a/trove/tests/scenario/groups/__init__.py
+++ b/trove/tests/scenario/groups/__init__.py
@@ -169,3 +169,7 @@ USER_ACTION_INST_CREATE = "scenario.user_action_inst_create_grp"
USER_ACTION_INST_CREATE_WAIT = "scenario.user_action_inst_create_wait_grp"
USER_ACTION_INST_DELETE = "scenario.user_action_inst_delete_grp"
USER_ACTION_INST_DELETE_WAIT = "scenario.user_action_inst_delete_wait_grp"
+
+
+# Instance Log Group
+INST_LOG = "scenario.inst_log_grp"
diff --git a/trove/tests/scenario/groups/backup_group.py b/trove/tests/scenario/groups/backup_group.py
index 16d93def..4cd7f2ea 100644
--- a/trove/tests/scenario/groups/backup_group.py
+++ b/trove/tests/scenario/groups/backup_group.py
@@ -29,10 +29,8 @@ class BackupRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'BackupRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
- groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE],
- runs_after_groups=[groups.MODULE_INST_DELETE,
- groups.CFGGRP_INST_DELETE])
+@test(depends_on_groups=[groups.INST_CREATE],
+ groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE])
class BackupCreateGroup(TestGroup):
"""Test Backup Create functionality."""
@@ -61,8 +59,8 @@ class BackupCreateGroup(TestGroup):
self.test_runner.run_backup_create()
-@test(depends_on_groups=[groups.BACKUP_CREATE],
- groups=[groups.BACKUP_CREATE_NEGATIVE])
+@test(depends_on_classes=[BackupCreateGroup],
+ groups=[GROUP, groups.BACKUP_CREATE_NEGATIVE])
class BackupCreateNegativeGroup(TestGroup):
"""Test Backup Create Negative functionality."""
@@ -106,9 +104,8 @@ class BackupCreateNegativeGroup(TestGroup):
self.test_runner.run_backup_create_instance_not_found()
-@test(depends_on_groups=[groups.BACKUP_CREATE],
- groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE_WAIT],
- runs_after_groups=[groups.BACKUP_CREATE_NEGATIVE])
+@test(depends_on_classes=[BackupCreateNegativeGroup],
+ groups=[GROUP, groups.BACKUP, groups.BACKUP_CREATE_WAIT])
class BackupCreateWaitGroup(TestGroup):
"""Wait for Backup Create to Complete."""
@@ -137,11 +134,6 @@ class BackupCreateWaitGroup(TestGroup):
self.test_runner.run_backup_list_filter_datastore()
@test(depends_on=[backup_create_completed])
- def backup_list_filter_different_datastore(self):
- """Test list backups and filter by different datastore."""
- self.test_runner.run_backup_list_filter_different_datastore()
-
- @test(depends_on=[backup_create_completed])
def backup_list_filter_datastore_not_found(self):
"""Test list backups and filter by unknown datastore."""
self.test_runner.run_backup_list_filter_datastore_not_found()
@@ -162,7 +154,7 @@ class BackupCreateWaitGroup(TestGroup):
self.test_runner.run_backup_get_unauthorized_user()
-@test(depends_on_groups=[groups.BACKUP_CREATE],
+@test(depends_on_classes=[BackupCreateWaitGroup],
groups=[GROUP, groups.BACKUP_INC, groups.BACKUP_INC_CREATE])
class BackupIncCreateGroup(TestGroup):
"""Test Backup Incremental Create functionality."""
@@ -224,7 +216,7 @@ class BackupIncCreateGroup(TestGroup):
self.test_runner.run_instance_goes_active()
-@test(depends_on_groups=[groups.BACKUP_CREATE],
+@test(depends_on_classes=[BackupIncCreateGroup],
groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE])
class BackupInstCreateGroup(TestGroup):
"""Test Backup Instance Create functionality."""
@@ -239,10 +231,9 @@ class BackupInstCreateGroup(TestGroup):
self.test_runner.run_restore_from_backup()
-@test(depends_on_groups=[groups.BACKUP_INC_CREATE],
+@test(depends_on_classes=[BackupInstCreateGroup],
groups=[GROUP, groups.BACKUP_INC_INST,
- groups.BACKUP_INC_INST_CREATE],
- runs_after_groups=[groups.BACKUP_INST_CREATE])
+ groups.BACKUP_INC_INST_CREATE])
class BackupIncInstCreateGroup(TestGroup):
"""Test Backup Incremental Instance Create functionality."""
@@ -256,11 +247,8 @@ class BackupIncInstCreateGroup(TestGroup):
self.test_runner.run_restore_from_inc_1_backup()
-@test(depends_on_groups=[groups.BACKUP_INST_CREATE],
- groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE_WAIT],
- runs_after_groups=[groups.BACKUP_INC_INST_CREATE,
- groups.DB_ACTION_INST_CREATE,
- groups.INST_ACTIONS_RESIZE])
+@test(depends_on_classes=[BackupIncInstCreateGroup],
+ groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_CREATE_WAIT])
class BackupInstCreateWaitGroup(TestGroup):
"""Test Backup Instance Create completes."""
@@ -284,10 +272,9 @@ class BackupInstCreateWaitGroup(TestGroup):
self.test_runner.run_verify_databases_in_restored_instance()
-@test(depends_on_groups=[groups.BACKUP_INC_INST_CREATE],
+@test(depends_on_classes=[BackupInstCreateWaitGroup],
groups=[GROUP, groups.BACKUP_INC_INST,
- groups.BACKUP_INC_INST_CREATE_WAIT],
- runs_after_groups=[groups.BACKUP_INST_CREATE_WAIT])
+ groups.BACKUP_INC_INST_CREATE_WAIT])
class BackupIncInstCreateWaitGroup(TestGroup):
"""Test Backup Incremental Instance Create completes."""
@@ -311,9 +298,8 @@ class BackupIncInstCreateWaitGroup(TestGroup):
self.test_runner.run_verify_databases_in_restored_inc_1_instance()
-@test(depends_on_groups=[groups.BACKUP_INST_CREATE_WAIT],
- groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE],
- runs_after_groups=[groups.BACKUP_INC_INST_CREATE_WAIT])
+@test(depends_on_classes=[BackupIncInstCreateWaitGroup],
+ groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE])
class BackupInstDeleteGroup(TestGroup):
"""Test Backup Instance Delete functionality."""
@@ -327,10 +313,9 @@ class BackupInstDeleteGroup(TestGroup):
self.test_runner.run_delete_restored_instance()
-@test(depends_on_groups=[groups.BACKUP_INC_INST_CREATE_WAIT],
+@test(depends_on_classes=[BackupInstDeleteGroup],
groups=[GROUP, groups.BACKUP_INC_INST,
- groups.BACKUP_INC_INST_DELETE],
- runs_after_groups=[groups.BACKUP_INST_DELETE])
+ groups.BACKUP_INC_INST_DELETE])
class BackupIncInstDeleteGroup(TestGroup):
"""Test Backup Incremental Instance Delete functionality."""
@@ -344,9 +329,8 @@ class BackupIncInstDeleteGroup(TestGroup):
self.test_runner.run_delete_restored_inc_1_instance()
-@test(depends_on_groups=[groups.BACKUP_INST_DELETE],
- groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE_WAIT],
- runs_after_groups=[groups.INST_DELETE])
+@test(depends_on_classes=[BackupIncInstDeleteGroup],
+ groups=[GROUP, groups.BACKUP_INST, groups.BACKUP_INST_DELETE_WAIT])
class BackupInstDeleteWaitGroup(TestGroup):
"""Test Backup Instance Delete completes."""
@@ -360,10 +344,9 @@ class BackupInstDeleteWaitGroup(TestGroup):
self.test_runner.run_wait_for_restored_instance_delete()
-@test(depends_on_groups=[groups.BACKUP_INC_INST_DELETE],
+@test(depends_on_classes=[BackupInstDeleteWaitGroup],
groups=[GROUP, groups.BACKUP_INC_INST,
- groups.BACKUP_INC_INST_DELETE_WAIT],
- runs_after_groups=[groups.INST_DELETE])
+ groups.BACKUP_INC_INST_DELETE_WAIT])
class BackupIncInstDeleteWaitGroup(TestGroup):
"""Test Backup Incremental Instance Delete completes."""
@@ -377,9 +360,8 @@ class BackupIncInstDeleteWaitGroup(TestGroup):
self.test_runner.run_wait_for_restored_inc_1_instance_delete()
-@test(depends_on_groups=[groups.BACKUP_INC_CREATE],
- groups=[GROUP, groups.BACKUP_INC, groups.BACKUP_INC_DELETE],
- runs_after_groups=[groups.BACKUP_INC_INST_CREATE_WAIT])
+@test(depends_on_classes=[BackupIncInstDeleteWaitGroup],
+ groups=[GROUP, groups.BACKUP_INC, groups.BACKUP_INC_DELETE])
class BackupIncDeleteGroup(TestGroup):
"""Test Backup Incremental Delete functionality."""
@@ -395,11 +377,8 @@ class BackupIncDeleteGroup(TestGroup):
self.test_runner.run_delete_inc_2_backup()
-@test(depends_on_groups=[groups.BACKUP_CREATE],
- groups=[GROUP, groups.BACKUP, groups.BACKUP_DELETE],
- runs_after_groups=[groups.BACKUP_INST_CREATE_WAIT,
- groups.BACKUP_INC_DELETE,
- groups.INST_ACTIONS_RESIZE_WAIT])
+@test(depends_on_classes=[BackupIncDeleteGroup],
+ groups=[GROUP, groups.BACKUP, groups.BACKUP_DELETE])
class BackupDeleteGroup(TestGroup):
"""Test Backup Delete functionality."""
diff --git a/trove/tests/scenario/groups/configuration_group.py b/trove/tests/scenario/groups/configuration_group.py
index 4c366c35..fc84f7fc 100644
--- a/trove/tests/scenario/groups/configuration_group.py
+++ b/trove/tests/scenario/groups/configuration_group.py
@@ -30,7 +30,7 @@ class ConfigurationRunnerFactory(test_runners.RunnerFactory):
@test(groups=[GROUP, groups.CFGGRP_CREATE],
- runs_after_groups=[groups.MODULE_CREATE])
+ depends_on_groups=[groups.BACKUP_DELETE])
class ConfigurationCreateGroup(TestGroup):
"""Test Configuration Group functionality."""
@@ -94,11 +94,9 @@ class ConfigurationCreateGroup(TestGroup):
self.test_runner.run_non_dynamic_conf_get_unauthorized_user()
-@test(depends_on_groups=[groups.INST_CREATE_WAIT,
- groups.CFGGRP_CREATE],
+@test(depends_on_classes=[ConfigurationCreateGroup],
groups=[GROUP, groups.CFGGRP_INST,
- groups.CFGGRP_INST_CREATE],
- runs_after_groups=[groups.MODULE_INST_CREATE])
+ groups.CFGGRP_INST_CREATE])
class ConfigurationInstCreateGroup(TestGroup):
"""Test Instance Configuration Group Create functionality."""
@@ -230,12 +228,9 @@ class ConfigurationInstCreateGroup(TestGroup):
self.test_runner.run_create_instance_with_conf()
-@test(depends_on_groups=[groups.CFGGRP_INST_CREATE],
+@test(depends_on_classes=[ConfigurationInstCreateGroup],
groups=[GROUP, groups.CFGGRP_INST,
- groups.CFGGRP_INST_CREATE_WAIT],
- runs_after_groups=[groups.INST_ACTIONS,
- groups.INST_UPGRADE,
- groups.MODULE_INST_CREATE_WAIT])
+ groups.CFGGRP_INST_CREATE_WAIT])
class ConfigurationInstCreateWaitGroup(TestGroup):
"""Test that Instance Configuration Group Create Completes."""
@@ -254,10 +249,9 @@ class ConfigurationInstCreateWaitGroup(TestGroup):
self.test_runner.run_verify_instance_values()
-@test(depends_on_groups=[groups.CFGGRP_INST_CREATE_WAIT],
+@test(depends_on_classes=[ConfigurationInstCreateWaitGroup],
groups=[GROUP, groups.CFGGRP_INST,
- groups.CFGGRP_INST_DELETE],
- runs_after_groups=[groups.MODULE_INST_DELETE])
+ groups.CFGGRP_INST_DELETE])
class ConfigurationInstDeleteGroup(TestGroup):
"""Test Instance Configuration Group Delete functionality."""
@@ -271,10 +265,9 @@ class ConfigurationInstDeleteGroup(TestGroup):
self.test_runner.run_delete_conf_instance()
-@test(depends_on_groups=[groups.CFGGRP_INST_DELETE],
+@test(depends_on_classes=[ConfigurationInstDeleteGroup],
groups=[GROUP, groups.CFGGRP_INST,
- groups.CFGGRP_INST_DELETE_WAIT],
- runs_after_groups=[groups.INST_DELETE])
+ groups.CFGGRP_INST_DELETE_WAIT])
class ConfigurationInstDeleteWaitGroup(TestGroup):
"""Test that Instance Configuration Group Delete Completes."""
@@ -288,8 +281,7 @@ class ConfigurationInstDeleteWaitGroup(TestGroup):
self.test_runner.run_wait_for_delete_conf_instance()
-@test(depends_on_groups=[groups.CFGGRP_CREATE],
- runs_after_groups=[groups.CFGGRP_INST_DELETE_WAIT],
+@test(depends_on_classes=[ConfigurationInstDeleteWaitGroup],
groups=[GROUP, groups.CFGGRP_DELETE])
class ConfigurationDeleteGroup(TestGroup):
"""Test Configuration Group Delete functionality."""
diff --git a/trove/tests/scenario/groups/database_actions_group.py b/trove/tests/scenario/groups/database_actions_group.py
index b3b77c4a..b431fe39 100644
--- a/trove/tests/scenario/groups/database_actions_group.py
+++ b/trove/tests/scenario/groups/database_actions_group.py
@@ -35,7 +35,7 @@ class InstanceCreateRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'InstanceCreateRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
+@test(depends_on_groups=[groups.CFGGRP_DELETE],
groups=[GROUP, groups.DB_ACTION_CREATE])
class DatabaseActionsCreateGroup(TestGroup):
"""Test Database Actions Create functionality."""
@@ -73,7 +73,7 @@ class DatabaseActionsCreateGroup(TestGroup):
self.test_runner.run_existing_database_create()
-@test(depends_on_groups=[groups.DB_ACTION_CREATE],
+@test(depends_on_classes=[DatabaseActionsCreateGroup],
groups=[GROUP, groups.DB_ACTION_DELETE])
class DatabaseActionsDeleteGroup(TestGroup):
"""Test Database Actions Delete functionality."""
@@ -103,8 +103,8 @@ class DatabaseActionsDeleteGroup(TestGroup):
self.test_runner.run_system_database_delete()
-@test(groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_CREATE],
- runs_after_groups=[groups.INST_ACTIONS_RESIZE])
+@test(depends_on_classes=[DatabaseActionsDeleteGroup],
+ groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_CREATE])
class DatabaseActionsInstCreateGroup(TestGroup):
"""Test Database Actions Instance Create functionality."""
@@ -121,11 +121,8 @@ class DatabaseActionsInstCreateGroup(TestGroup):
name_suffix='_db')
-@test(depends_on_groups=[groups.DB_ACTION_INST_CREATE],
- groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_CREATE_WAIT],
- runs_after_groups=[groups.BACKUP_INST_CREATE,
- groups.BACKUP_INC_INST_CREATE,
- groups.INST_ACTIONS_RESIZE])
+@test(depends_on_classes=[DatabaseActionsInstCreateGroup],
+ groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_CREATE_WAIT])
class DatabaseActionsInstCreateWaitGroup(TestGroup):
"""Wait for Database Actions Instance Create to complete."""
@@ -150,7 +147,7 @@ class DatabaseActionsInstCreateWaitGroup(TestGroup):
self.instance_create_runner.run_validate_initialized_instance()
-@test(depends_on_groups=[groups.DB_ACTION_INST_CREATE_WAIT],
+@test(depends_on_classes=[DatabaseActionsInstCreateWaitGroup],
groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_DELETE])
class DatabaseActionsInstDeleteGroup(TestGroup):
"""Test Database Actions Instance Delete functionality."""
@@ -166,9 +163,8 @@ class DatabaseActionsInstDeleteGroup(TestGroup):
self.instance_create_runner.run_initialized_instance_delete()
-@test(depends_on_groups=[groups.DB_ACTION_INST_DELETE],
- groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_DELETE_WAIT],
- runs_after_groups=[groups.INST_DELETE])
+@test(depends_on_classes=[DatabaseActionsInstDeleteGroup],
+ groups=[GROUP, groups.DB_ACTION_INST, groups.DB_ACTION_INST_DELETE_WAIT])
class DatabaseActionsInstDeleteWaitGroup(TestGroup):
"""Wait for Database Actions Instance Delete to complete."""
diff --git a/trove/tests/scenario/groups/guest_log_group.py b/trove/tests/scenario/groups/guest_log_group.py
index 1ba5e287..c57c0657 100644
--- a/trove/tests/scenario/groups/guest_log_group.py
+++ b/trove/tests/scenario/groups/guest_log_group.py
@@ -28,10 +28,8 @@ class GuestLogRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'GuestLogRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
- groups=[GROUP],
- runs_after_groups=[groups.USER_ACTION_INST_CREATE,
- groups.ROOT_ACTION_INST_CREATE])
+@test(depends_on_groups=[groups.DB_ACTION_INST_DELETE_WAIT],
+ groups=[GROUP, groups.INST_LOG])
class GuestLogGroup(TestGroup):
"""Test Guest Log functionality."""
diff --git a/trove/tests/scenario/groups/instance_actions_group.py b/trove/tests/scenario/groups/instance_actions_group.py
index 4fe5e5e9..c38e1556 100644
--- a/trove/tests/scenario/groups/instance_actions_group.py
+++ b/trove/tests/scenario/groups/instance_actions_group.py
@@ -29,10 +29,8 @@ class InstanceActionsRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'InstanceActionsRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
- groups=[GROUP, groups.INST_ACTIONS],
- runs_after_groups=[groups.MODULE_INST_CREATE,
- groups.CFGGRP_INST_CREATE])
+@test(depends_on_groups=[groups.INST_LOG],
+ groups=[GROUP, groups.INST_ACTIONS])
class InstanceActionsGroup(TestGroup):
"""Test Instance Actions functionality."""
@@ -78,14 +76,8 @@ class InstanceActionsGroup(TestGroup):
self.test_runner.run_remove_test_data()
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
- groups=[GROUP, groups.INST_ACTIONS_RESIZE],
- runs_after_groups=[groups.INST_ACTIONS,
- groups.INST_UPGRADE,
- groups.MODULE_INST_CREATE_WAIT,
- groups.CFGGRP_INST_CREATE_WAIT,
- groups.BACKUP_CREATE,
- groups.BACKUP_INC_CREATE])
+@test(depends_on_classes=[InstanceActionsGroup],
+ groups=[GROUP, groups.INST_ACTIONS_RESIZE])
class InstanceActionsResizeGroup(TestGroup):
"""Test Instance Actions Resize functionality."""
@@ -109,11 +101,8 @@ class InstanceActionsResizeGroup(TestGroup):
self.test_runner.run_instance_resize_flavor()
-@test(depends_on_groups=[groups.INST_ACTIONS_RESIZE],
- groups=[GROUP, groups.INST_ACTIONS_RESIZE_WAIT],
- runs_after_groups=[groups.BACKUP_INST_CREATE,
- groups.BACKUP_INC_INST_CREATE,
- groups.DB_ACTION_INST_CREATE])
+@test(depends_on_classes=[InstanceActionsResizeGroup],
+ groups=[GROUP, groups.INST_ACTIONS_RESIZE_WAIT])
class InstanceActionsResizeWaitGroup(TestGroup):
"""Test that Instance Actions Resize Completes."""
diff --git a/trove/tests/scenario/groups/instance_create_group.py b/trove/tests/scenario/groups/instance_create_group.py
index 83a1f16b..32664ed5 100644
--- a/trove/tests/scenario/groups/instance_create_group.py
+++ b/trove/tests/scenario/groups/instance_create_group.py
@@ -15,7 +15,6 @@
from proboscis import test
-from trove.tests import PRE_INSTANCES
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
@@ -30,9 +29,7 @@ class InstanceCreateRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'InstanceCreateRunner'
-@test(depends_on_groups=["services.initialize"],
- runs_after_groups=[PRE_INSTANCES],
- groups=[GROUP, groups.INST_CREATE])
+@test(groups=[GROUP, groups.INST_CREATE])
class InstanceCreateGroup(TestGroup):
"""Test Instance Create functionality."""
@@ -46,7 +43,7 @@ class InstanceCreateGroup(TestGroup):
self.test_runner.run_empty_instance_create()
-@test(depends_on_groups=[groups.INST_CREATE],
+@test(depends_on_classes=[InstanceCreateGroup],
groups=[GROUP, groups.INST_INIT_CREATE])
class InstanceInitCreateGroup(TestGroup):
"""Test Instance Init Create functionality."""
@@ -66,10 +63,8 @@ class InstanceInitCreateGroup(TestGroup):
self.test_runner.run_initialized_instance_create()
-@test(depends_on_groups=[groups.INST_CREATE],
- groups=[GROUP, groups.INST_CREATE_WAIT],
- runs_after_groups=[groups.MODULE_CREATE, groups.CFGGRP_CREATE,
- groups.INST_ERROR_DELETE])
+@test(depends_on_classes=[InstanceCreateGroup],
+ groups=[GROUP, groups.INST_CREATE])
class InstanceCreateWaitGroup(TestGroup):
"""Test that Instance Create Completes."""
@@ -83,9 +78,8 @@ class InstanceCreateWaitGroup(TestGroup):
self.test_runner.run_wait_for_instance()
-@test(depends_on_groups=[groups.INST_INIT_CREATE],
- groups=[GROUP, groups.INST_INIT_CREATE_WAIT],
- runs_after_groups=[groups.INST_CREATE_WAIT])
+@test(depends_on_classes=[InstanceCreateWaitGroup],
+ groups=[GROUP, groups.INST_INIT_CREATE_WAIT])
class InstanceInitCreateWaitGroup(TestGroup):
"""Test that Instance Init Create Completes."""
@@ -109,7 +103,7 @@ class InstanceInitCreateWaitGroup(TestGroup):
self.test_runner.run_validate_initialized_instance()
-@test(depends_on_groups=[groups.INST_INIT_CREATE_WAIT],
+@test(depends_on_classes=[InstanceInitCreateWaitGroup],
groups=[GROUP, groups.INST_INIT_DELETE])
class InstanceInitDeleteGroup(TestGroup):
"""Test Initialized Instance Delete functionality."""
@@ -124,8 +118,7 @@ class InstanceInitDeleteGroup(TestGroup):
self.test_runner.run_initialized_instance_delete()
-@test(depends_on_groups=[groups.INST_INIT_DELETE],
- runs_after_groups=[groups.INST_ERROR_DELETE],
+@test(depends_on_classes=[InstanceInitDeleteGroup],
groups=[GROUP, groups.INST_INIT_DELETE_WAIT])
class InstanceInitDeleteWaitGroup(TestGroup):
"""Test that Initialized Instance Delete Completes."""
diff --git a/trove/tests/scenario/groups/instance_delete_group.py b/trove/tests/scenario/groups/instance_delete_group.py
index 40af6e31..615507db 100644
--- a/trove/tests/scenario/groups/instance_delete_group.py
+++ b/trove/tests/scenario/groups/instance_delete_group.py
@@ -29,22 +29,10 @@ class InstanceDeleteRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'InstanceDeleteRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
+@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_DELETE],
- runs_after_groups=[groups.INST_INIT_DELETE,
- groups.INST_ACTIONS,
- groups.INST_UPGRADE,
- groups.INST_ACTIONS_RESIZE_WAIT,
- groups.BACKUP_INST_DELETE,
- groups.BACKUP_INC_INST_DELETE,
- groups.CFGGRP_INST_DELETE,
- groups.DB_ACTION_DELETE,
- groups.DB_ACTION_INST_DELETE,
- groups.MODULE_INST_DELETE,
- groups.REPL_INST_DELETE_WAIT,
- groups.ROOT_ACTION_INST_DELETE,
- groups.USER_ACTION_DELETE,
- groups.USER_ACTION_INST_DELETE])
+ runs_after_groups=[groups.USER_ACTION_INST_DELETE_WAIT,
+ groups.REPL_INST_DELETE_WAIT])
class InstanceDeleteGroup(TestGroup):
"""Test Instance Delete functionality."""
@@ -58,16 +46,8 @@ class InstanceDeleteGroup(TestGroup):
self.test_runner.run_instance_delete()
-@test(depends_on_groups=[groups.INST_DELETE],
- groups=[GROUP, groups.INST_DELETE_WAIT],
- runs_after_groups=[groups.BACKUP_INST_DELETE_WAIT,
- groups.BACKUP_INC_INST_DELETE_WAIT,
- groups.CFGGRP_INST_DELETE_WAIT,
- groups.DB_ACTION_INST_DELETE_WAIT,
- groups.MODULE_INST_DELETE_WAIT,
- groups.REPL_INST_DELETE_WAIT,
- groups.ROOT_ACTION_INST_DELETE_WAIT,
- groups.USER_ACTION_INST_DELETE_WAIT])
+@test(depends_on_classes=[InstanceDeleteGroup],
+ groups=[GROUP, groups.INST_DELETE_WAIT])
class InstanceDeleteWaitGroup(TestGroup):
"""Test that Instance Delete Completes."""
diff --git a/trove/tests/scenario/groups/instance_error_create_group.py b/trove/tests/scenario/groups/instance_error_create_group.py
index 713b2cb9..ea1e873d 100644
--- a/trove/tests/scenario/groups/instance_error_create_group.py
+++ b/trove/tests/scenario/groups/instance_error_create_group.py
@@ -15,7 +15,6 @@
from proboscis import test
-from trove.tests import PRE_INSTANCES
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
@@ -30,8 +29,7 @@ class InstanceErrorCreateRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'InstanceErrorCreateRunner'
-@test(depends_on_groups=["services.initialize"],
- runs_after_groups=[PRE_INSTANCES, groups.INST_CREATE],
+@test(depends_on_groups=[groups.INST_CREATE],
groups=[GROUP, groups.INST_ERROR_CREATE])
class InstanceErrorCreateGroup(TestGroup):
"""Test Instance Error Create functionality."""
@@ -51,8 +49,7 @@ class InstanceErrorCreateGroup(TestGroup):
self.test_runner.run_create_error2_instance()
-@test(depends_on_groups=[groups.INST_ERROR_CREATE],
- runs_after_groups=[groups.MODULE_CREATE, groups.CFGGRP_CREATE],
+@test(depends_on_classes=[InstanceErrorCreateGroup],
groups=[GROUP, groups.INST_ERROR_CREATE_WAIT])
class InstanceErrorCreateWaitGroup(TestGroup):
"""Test that Instance Error Create Completes."""
@@ -78,7 +75,7 @@ class InstanceErrorCreateWaitGroup(TestGroup):
self.test_runner.run_validate_error2_instance()
-@test(depends_on_groups=[groups.INST_ERROR_CREATE_WAIT],
+@test(depends_on_classes=[InstanceErrorCreateWaitGroup],
groups=[GROUP, groups.INST_ERROR_DELETE])
class InstanceErrorDeleteGroup(TestGroup):
"""Test Instance Error Delete functionality."""
@@ -93,8 +90,7 @@ class InstanceErrorDeleteGroup(TestGroup):
self.test_runner.run_delete_error_instances()
-@test(depends_on_groups=[groups.INST_ERROR_DELETE],
- runs_after_groups=[groups.MODULE_INST_CREATE],
+@test(depends_on_classes=[InstanceErrorDeleteGroup],
groups=[GROUP, groups.INST_ERROR_DELETE_WAIT])
class InstanceErrorDeleteWaitGroup(TestGroup):
"""Test that Instance Error Delete Completes."""
diff --git a/trove/tests/scenario/groups/instance_force_delete_group.py b/trove/tests/scenario/groups/instance_force_delete_group.py
index fef58d18..c0775567 100644
--- a/trove/tests/scenario/groups/instance_force_delete_group.py
+++ b/trove/tests/scenario/groups/instance_force_delete_group.py
@@ -15,7 +15,6 @@
from proboscis import test
-from trove.tests import PRE_INSTANCES
from trove.tests.scenario import groups
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
@@ -30,8 +29,7 @@ class InstanceForceDeleteRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'InstanceForceDeleteRunner'
-@test(depends_on_groups=["services.initialize"],
- runs_after_groups=[PRE_INSTANCES, groups.INST_ERROR_CREATE],
+@test(depends_on_groups=[groups.INST_ERROR_DELETE_WAIT],
groups=[GROUP, groups.INST_FORCE_DELETE])
class InstanceForceDeleteGroup(TestGroup):
"""Test Instance Force Delete functionality."""
@@ -51,8 +49,7 @@ class InstanceForceDeleteGroup(TestGroup):
self.test_runner.run_delete_build_instance()
-@test(depends_on_groups=[groups.INST_FORCE_DELETE],
- runs_after_groups=[groups.MODULE_INST_CREATE],
+@test(depends_on_classes=[InstanceForceDeleteGroup],
groups=[GROUP, groups.INST_FORCE_DELETE_WAIT])
class InstanceForceDeleteWaitGroup(TestGroup):
"""Make sure the Force Delete instance goes away."""
diff --git a/trove/tests/scenario/groups/negative_cluster_actions_group.py b/trove/tests/scenario/groups/negative_cluster_actions_group.py
deleted file mode 100644
index c89e1792..00000000
--- a/trove/tests/scenario/groups/negative_cluster_actions_group.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright 2015 Tesora Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from proboscis import test
-
-from trove.tests.scenario.groups.test_group import TestGroup
-from trove.tests.scenario.runners import test_runners
-
-
-GROUP = "scenario.negative_cluster_actions_group"
-
-
-class NegativeClusterActionsRunnerFactory(test_runners.RunnerFactory):
-
- _runner_ns = 'negative_cluster_actions_runners'
- _runner_cls = 'NegativeClusterActionsRunner'
-
-
-@test(groups=[GROUP])
-class NegativeClusterActionsGroup(TestGroup):
-
- def __init__(self):
- super(NegativeClusterActionsGroup, self).__init__(
- NegativeClusterActionsRunnerFactory.instance())
-
- @test
- def create_constrained_size_cluster(self):
- """Ensure creating a cluster with wrong number of nodes fails."""
- self.test_runner.run_create_constrained_size_cluster()
-
- @test
- def create_heterogeneous_cluster(self):
- """Ensure creating a cluster with unequal nodes fails."""
- self.test_runner.run_create_heterogeneous_cluster()
diff --git a/trove/tests/scenario/groups/replication_group.py b/trove/tests/scenario/groups/replication_group.py
index 904b052a..6f44ad93 100644
--- a/trove/tests/scenario/groups/replication_group.py
+++ b/trove/tests/scenario/groups/replication_group.py
@@ -35,15 +35,8 @@ class BackupRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'BackupRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
- groups=[GROUP, groups.REPL_INST_CREATE],
- runs_after_groups=[groups.MODULE_INST_DELETE,
- groups.CFGGRP_INST_DELETE,
- groups.INST_ACTIONS_RESIZE_WAIT,
- groups.DB_ACTION_INST_DELETE,
- groups.USER_ACTION_DELETE,
- groups.USER_ACTION_INST_DELETE,
- groups.ROOT_ACTION_INST_DELETE])
+@test(depends_on_groups=[groups.INST_CREATE],
+ groups=[GROUP, groups.REPL_INST_CREATE])
class ReplicationInstCreateGroup(TestGroup):
"""Test Replication Instance Create functionality."""
@@ -72,9 +65,8 @@ class ReplicationInstCreateGroup(TestGroup):
self.test_runner.run_create_single_replica()
-@test(depends_on_groups=[groups.REPL_INST_CREATE],
- groups=[GROUP, groups.REPL_INST_CREATE_WAIT],
- runs_after_groups=[groups.INST_INIT_DELETE_WAIT])
+@test(depends_on_classes=[ReplicationInstCreateGroup],
+ groups=[GROUP, groups.REPL_INST_CREATE_WAIT])
class ReplicationInstCreateWaitGroup(TestGroup):
"""Wait for Replication Instance Create to complete."""
@@ -118,7 +110,7 @@ class ReplicationInstCreateWaitGroup(TestGroup):
self.test_runner.run_verify_replica_data_after_single()
-@test(depends_on_groups=[groups.REPL_INST_CREATE_WAIT],
+@test(depends_on_classes=[ReplicationInstCreateWaitGroup],
groups=[GROUP, groups.REPL_INST_MULTI_CREATE])
class ReplicationInstMultiCreateGroup(TestGroup):
"""Test Replication Instance Multi-Create functionality."""
@@ -146,10 +138,8 @@ class ReplicationInstMultiCreateGroup(TestGroup):
self.backup_runner.run_check_has_incremental()
-@test(depends_on_groups=[groups.REPL_INST_CREATE_WAIT],
- groups=[GROUP, groups.REPL_INST_DELETE_NON_AFFINITY_WAIT],
- runs_after_groups=[groups.REPL_INST_MULTI_CREATE,
- groups.USER_ACTION_DELETE])
+@test(depends_on_classes=[ReplicationInstMultiCreateGroup],
+ groups=[GROUP, groups.REPL_INST_DELETE_NON_AFFINITY_WAIT])
class ReplicationInstDeleteNonAffReplWaitGroup(TestGroup):
"""Wait for Replication Instance Non-Affinity repl to be gone."""
@@ -168,8 +158,7 @@ class ReplicationInstDeleteNonAffReplWaitGroup(TestGroup):
self.test_runner.run_delete_non_affinity_master()
-@test(depends_on_groups=[groups.REPL_INST_DELETE_NON_AFFINITY_WAIT,
- groups.REPL_INST_MULTI_CREATE],
+@test(depends_on_classes=[ReplicationInstDeleteNonAffReplWaitGroup],
groups=[GROUP, groups.REPL_INST_MULTI_CREATE_WAIT])
class ReplicationInstMultiCreateWaitGroup(TestGroup):
"""Wait for Replication Instance Multi-Create to complete."""
@@ -241,7 +230,7 @@ class ReplicationInstMultiCreateWaitGroup(TestGroup):
self.test_runner.run_delete_valid_master()
-@test(depends_on_groups=[groups.REPL_INST_MULTI_CREATE_WAIT],
+@test(depends_on_classes=[ReplicationInstMultiCreateWaitGroup],
groups=[GROUP, groups.REPL_INST_MULTI_PROMOTE])
class ReplicationInstMultiPromoteGroup(TestGroup):
"""Test Replication Instance Multi-Promote functionality."""
@@ -299,8 +288,7 @@ class ReplicationInstMultiPromoteGroup(TestGroup):
self.test_runner.run_verify_final_data_replicated()
-@test(depends_on_groups=[groups.REPL_INST_MULTI_CREATE_WAIT],
- runs_after_groups=[groups.REPL_INST_MULTI_PROMOTE],
+@test(depends_on_classes=[ReplicationInstMultiPromoteGroup],
groups=[GROUP, groups.REPL_INST_DELETE])
class ReplicationInstDeleteGroup(TestGroup):
"""Test Replication Instance Delete functionality."""
@@ -330,7 +318,7 @@ class ReplicationInstDeleteGroup(TestGroup):
self.test_runner.run_delete_all_replicas()
-@test(depends_on_groups=[groups.REPL_INST_DELETE],
+@test(depends_on_classes=[ReplicationInstDeleteGroup],
groups=[GROUP, groups.REPL_INST_DELETE_WAIT])
class ReplicationInstDeleteWaitGroup(TestGroup):
"""Wait for Replication Instance Delete to complete."""
diff --git a/trove/tests/scenario/groups/root_actions_group.py b/trove/tests/scenario/groups/root_actions_group.py
index 8f74207a..5e0d3011 100644
--- a/trove/tests/scenario/groups/root_actions_group.py
+++ b/trove/tests/scenario/groups/root_actions_group.py
@@ -16,7 +16,6 @@
from proboscis import test
from trove.tests.scenario import groups
-from trove.tests.scenario.groups import guest_log_group
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
@@ -42,7 +41,7 @@ class BackupRunnerFactory2(test_runners.RunnerFactory):
_runner_cls = 'BackupRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
+@test(depends_on_groups=[groups.INST_FORCE_DELETE_WAIT],
groups=[GROUP, groups.ROOT_ACTION_ENABLE])
class RootActionsEnableGroup(TestGroup):
"""Test Root Actions Enable functionality."""
@@ -100,7 +99,7 @@ class RootActionsEnableGroup(TestGroup):
self.test_runner.run_check_root_enabled()
-@test(depends_on_groups=[groups.ROOT_ACTION_ENABLE],
+@test(depends_on_classes=[RootActionsEnableGroup],
groups=[GROUP, groups.ROOT_ACTION_DISABLE])
class RootActionsDisableGroup(TestGroup):
"""Test Root Actions Disable functionality."""
@@ -132,9 +131,8 @@ class RootActionsDisableGroup(TestGroup):
self.backup_runner2.run_backup_create_completed()
-@test(depends_on_groups=[groups.ROOT_ACTION_DISABLE],
- groups=[GROUP, groups.ROOT_ACTION_INST, groups.ROOT_ACTION_INST_CREATE],
- runs_after_groups=[groups.INST_ACTIONS_RESIZE_WAIT])
+@test(depends_on_classes=[RootActionsDisableGroup],
+ groups=[GROUP, groups.ROOT_ACTION_INST, groups.ROOT_ACTION_INST_CREATE])
class RootActionsInstCreateGroup(TestGroup):
"""Test Root Actions Instance Create functionality."""
@@ -156,10 +154,9 @@ class RootActionsInstCreateGroup(TestGroup):
self.backup_runner2.run_restore_from_backup(suffix='_root_disable')
-@test(depends_on_groups=[groups.ROOT_ACTION_INST_CREATE],
+@test(depends_on_classes=[RootActionsInstCreateGroup],
groups=[GROUP, groups.ROOT_ACTION_INST,
- groups.ROOT_ACTION_INST_CREATE_WAIT],
- runs_after_groups=[guest_log_group.GROUP])
+ groups.ROOT_ACTION_INST_CREATE_WAIT])
class RootActionsInstCreateWaitGroup(TestGroup):
"""Wait for Root Actions Instance Create to complete."""
@@ -197,7 +194,7 @@ class RootActionsInstCreateWaitGroup(TestGroup):
instance_id, root_creds)
-@test(depends_on_groups=[groups.ROOT_ACTION_INST_CREATE_WAIT],
+@test(depends_on_classes=[RootActionsInstCreateWaitGroup],
groups=[GROUP, groups.ROOT_ACTION_INST, groups.ROOT_ACTION_INST_DELETE])
class RootActionsInstDeleteGroup(TestGroup):
"""Test Root Actions Instance Delete functionality."""
@@ -231,10 +228,9 @@ class RootActionsInstDeleteGroup(TestGroup):
self.backup_runner2.run_delete_backup()
-@test(depends_on_groups=[groups.ROOT_ACTION_INST_DELETE],
+@test(depends_on_classes=[RootActionsInstDeleteGroup],
groups=[GROUP, groups.ROOT_ACTION_INST,
- groups.ROOT_ACTION_INST_DELETE_WAIT],
- runs_after_groups=[groups.INST_DELETE])
+ groups.ROOT_ACTION_INST_DELETE_WAIT])
class RootActionsInstDeleteWaitGroup(TestGroup):
"""Wait for Root Actions Instance Delete to complete."""
diff --git a/trove/tests/scenario/groups/user_actions_group.py b/trove/tests/scenario/groups/user_actions_group.py
index b3ae309d..d4690f90 100644
--- a/trove/tests/scenario/groups/user_actions_group.py
+++ b/trove/tests/scenario/groups/user_actions_group.py
@@ -16,7 +16,6 @@
from proboscis import test
from trove.tests.scenario import groups
-from trove.tests.scenario.groups import guest_log_group
from trove.tests.scenario.groups.test_group import TestGroup
from trove.tests.scenario.runners import test_runners
@@ -42,7 +41,7 @@ class DatabaseActionsRunnerFactory(test_runners.RunnerFactory):
_runner_cls = 'DatabaseActionsRunner'
-@test(depends_on_groups=[groups.INST_CREATE_WAIT],
+@test(depends_on_groups=[groups.ROOT_ACTION_INST_DELETE_WAIT],
groups=[GROUP, groups.USER_ACTION_CREATE])
class UserActionsCreateGroup(TestGroup):
"""Test User Actions Create functionality."""
@@ -171,7 +170,7 @@ class UserActionsCreateGroup(TestGroup):
self.test_runner.run_system_user_attribute_update()
-@test(depends_on_groups=[groups.USER_ACTION_CREATE],
+@test(depends_on_classes=[UserActionsCreateGroup],
groups=[GROUP, groups.USER_ACTION_DELETE])
class UserActionsDeleteGroup(TestGroup):
"""Test User Actions Delete functionality."""
@@ -198,7 +197,7 @@ class UserActionsDeleteGroup(TestGroup):
@test(groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_CREATE],
- runs_after_groups=[groups.INST_ACTIONS_RESIZE_WAIT])
+ depends_on_classes=[UserActionsDeleteGroup])
class UserActionsInstCreateGroup(TestGroup):
"""Test User Actions Instance Create functionality."""
@@ -215,10 +214,9 @@ class UserActionsInstCreateGroup(TestGroup):
create_helper_user=False, name_suffix='_user')
-@test(depends_on_groups=[groups.USER_ACTION_INST_CREATE],
+@test(depends_on_classes=[UserActionsInstCreateGroup],
groups=[GROUP, groups.USER_ACTION_INST,
- groups.USER_ACTION_INST_CREATE_WAIT],
- runs_after_groups=[guest_log_group.GROUP])
+ groups.USER_ACTION_INST_CREATE_WAIT])
class UserActionsInstCreateWaitGroup(TestGroup):
"""Wait for User Actions Instance Create to complete."""
@@ -238,7 +236,7 @@ class UserActionsInstCreateWaitGroup(TestGroup):
self.instance_create_runner.run_validate_initialized_instance()
-@test(depends_on_groups=[groups.USER_ACTION_INST_CREATE_WAIT],
+@test(depends_on_classes=[UserActionsInstCreateWaitGroup],
groups=[GROUP, groups.USER_ACTION_INST, groups.USER_ACTION_INST_DELETE])
class UserActionsInstDeleteGroup(TestGroup):
"""Test User Actions Instance Delete functionality."""
@@ -254,10 +252,9 @@ class UserActionsInstDeleteGroup(TestGroup):
self.instance_create_runner.run_initialized_instance_delete()
-@test(depends_on_groups=[groups.USER_ACTION_INST_DELETE],
+@test(depends_on_classes=[UserActionsInstDeleteGroup],
groups=[GROUP, groups.USER_ACTION_INST,
- groups.USER_ACTION_INST_DELETE_WAIT],
- runs_after_groups=[groups.INST_DELETE])
+ groups.USER_ACTION_INST_DELETE_WAIT])
class UserActionsInstDeleteWaitGroup(TestGroup):
"""Wait for User Actions Instance Delete to complete."""
diff --git a/trove/tests/scenario/runners/backup_runners.py b/trove/tests/scenario/runners/backup_runners.py
index 99dc2df9..c3810411 100644
--- a/trove/tests/scenario/runners/backup_runners.py
+++ b/trove/tests/scenario/runners/backup_runners.py
@@ -225,11 +225,6 @@ class BackupRunner(TestRunner):
self.assert_backup_list(
backup_list, self.backup_count_for_ds_prior_to_create + 1)
- def run_backup_list_filter_different_datastore(self):
- backup_list = self.auth_client.backups.list(
- datastore='Test_Datastore_1')
- self.assert_backup_list(backup_list, 0)
-
def run_backup_list_filter_datastore_not_found(
self, expected_exception=exceptions.NotFound,
expected_http_code=404):
diff --git a/trove/tests/unittests/backup/test_backupagent.py b/trove/tests/unittests/backup/test_backupagent.py
index db8bf7bf..3802caca 100644
--- a/trove/tests/unittests/backup/test_backupagent.py
+++ b/trove/tests/unittests/backup/test_backupagent.py
@@ -231,9 +231,10 @@ class BackupAgentTest(trove_testtools.TestCase):
self.assertIsNotNone(inno_backup_ex.cmd)
str_innobackup_cmd = ('sudo innobackupex'
' --stream=xbstream'
- ' %(extra_opts)s '
+ ' %(extra_opts)s'
' --user=os_admin --password=123'
- ' --host=127.0.0.1'
+ ' --host=localhost'
+ ' --socket=/var/run/mysqld/mysqld.sock'
' /var/lib/mysql/data 2>/tmp/innobackupex.log'
' | gzip |'
' openssl enc -aes-256-cbc -salt '
diff --git a/trove/tests/unittests/guestagent/test_backups.py b/trove/tests/unittests/guestagent/test_backups.py
index d354cbd4..88daf026 100644
--- a/trove/tests/unittests/guestagent/test_backups.py
+++ b/trove/tests/unittests/guestagent/test_backups.py
@@ -78,15 +78,17 @@ ZIP = "gzip"
UNZIP = "gzip -d -c"
ENCRYPT = "openssl enc -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
DECRYPT = "openssl enc -d -aes-256-cbc -salt -pass pass:default_aes_cbc_key"
-XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s "
- " --user=os_admin --password=password --host=127.0.0.1"
+XTRA_BACKUP_RAW = ("sudo innobackupex --stream=xbstream %(extra_opts)s"
+ " --user=os_admin --password=password --host=localhost"
+ " --socket=/var/run/mysqld/mysqld.sock"
" /var/lib/mysql/data 2>/tmp/innobackupex.log")
XTRA_BACKUP = XTRA_BACKUP_RAW % {'extra_opts': ''}
XTRA_BACKUP_EXTRA_OPTS = XTRA_BACKUP_RAW % {'extra_opts': '--no-lock'}
XTRA_BACKUP_INCR = ('sudo innobackupex --stream=xbstream'
' --incremental --incremental-lsn=%(lsn)s'
- ' %(extra_opts)s '
- ' --user=os_admin --password=password --host=127.0.0.1'
+ ' %(extra_opts)s'
+ ' --user=os_admin --password=password --host=localhost'
+ ' --socket=/var/run/mysqld/mysqld.sock'
' /var/lib/mysql/data'
' 2>/tmp/innobackupex.log')
SQLDUMP_BACKUP_RAW = ("mysqldump --all-databases %(extra_opts)s "
diff --git a/trove/tests/unittests/guestagent/test_dbaas.py b/trove/tests/unittests/guestagent/test_dbaas.py
index 2d4b88b5..ee122740 100644
--- a/trove/tests/unittests/guestagent/test_dbaas.py
+++ b/trove/tests/unittests/guestagent/test_dbaas.py
@@ -1368,7 +1368,7 @@ class MySqlAppTest(trove_testtools.TestCase):
self.mySqlApp._save_authentication_properties("some_password")
write_file_mock.assert_called_once_with(
MySqlApp.get_client_auth_file(),
- {'client': {'host': '127.0.0.1',
+ {'client': {'host': 'localhost',
'password': 'some_password',
'user': mysql_common_service.ADMIN_USER_NAME}},
codec=MySqlApp.CFG_CODEC)
@@ -2225,9 +2225,8 @@ class MySqlAppStatusTest(trove_testtools.TestCase):
super(MySqlAppStatusTest, self).tearDown()
def test_get_actual_db_status(self):
-
mysql_common_service.utils.execute_with_timeout = \
- Mock(return_value=(None, None))
+ Mock(return_value=("111", None))
self.mySqlAppStatus = MySqlAppStatus.get()
status = self.mySqlAppStatus._get_actual_db_status()
@@ -2259,19 +2258,6 @@ class MySqlAppStatusTest(trove_testtools.TestCase):
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
- @patch('trove.guestagent.datastore.mysql_common.service.LOG')
- def test_get_actual_db_status_error_blocked(self, *args):
-
- mysql_common_service.utils.execute_with_timeout = MagicMock(
- side_effect=[ProcessExecutionError(), ("some output", None)])
- mysql_common_service.load_mysqld_options = Mock()
- mysql_common_service.os.path.exists = Mock(return_value=True)
-
- self.mySqlAppStatus = MySqlAppStatus.get()
- status = self.mySqlAppStatus._get_actual_db_status()
-
- self.assertEqual(rd_instance.ServiceStatuses.BLOCKED, status)
-
class TestRedisApp(BaseAppTest.AppTestCase):
diff --git a/trove/tests/unittests/taskmanager/test_models.py b/trove/tests/unittests/taskmanager/test_models.py
index 8d7b1915..92c9fa5a 100644
--- a/trove/tests/unittests/taskmanager/test_models.py
+++ b/trove/tests/unittests/taskmanager/test_models.py
@@ -858,19 +858,6 @@ class BuiltInstanceTasksTest(trove_testtools.TestCase):
self.instance_task.server.reboot.assert_any_call()
self.instance_task.set_datastore_status_to_paused.assert_any_call()
- @patch.object(utils, 'poll_until')
- @patch('trove.taskmanager.models.LOG')
- def test_reboot_datastore_not_ready(self, mock_logging, mock_poll):
- mock_poll.side_effect = PollTimeOut
- self.instance_task.server.reboot = Mock()
- self.instance_task.set_datastore_status_to_paused = Mock()
-
- self.instance_task.reboot()
-
- self.instance_task._guest.stop_db.assert_any_call()
- assert not self.instance_task.server.reboot.called
- assert not self.instance_task.set_datastore_status_to_paused.called
-
@patch.object(BaseInstance, 'update_db')
def test_detach_replica(self, mock_update_db):
with patch.object(self.instance_task, 'reset_task_status') as tr_mock:
diff --git a/trove/tests/util/__init__.py b/trove/tests/util/__init__.py
index 55edb986..3ddd6f90 100644
--- a/trove/tests/util/__init__.py
+++ b/trove/tests/util/__init__.py
@@ -39,6 +39,7 @@ from proboscis.asserts import fail
from proboscis import SkipTest
from six.moves.urllib.parse import unquote
from sqlalchemy import create_engine
+from sqlalchemy.sql.expression import text
import tenacity
from troveclient.compat import Dbaas
@@ -53,6 +54,7 @@ from trove.tests.util.users import Requirements
WHITE_BOX = test_config.white_box
+FLUSH = text("FLUSH PRIVILEGES;")
CONF = cfg.CONF
diff --git a/trove/tests/util/mysql.py b/trove/tests/util/mysql.py
index 8d8c5de3..15f7acd4 100644
--- a/trove/tests/util/mysql.py
+++ b/trove/tests/util/mysql.py
@@ -18,7 +18,7 @@
import re
-from oslo_db.sqlalchemy import session
+from oslo_db.sqlalchemy import engines
import pexpect
from sqlalchemy.exc import OperationalError
try:
@@ -114,9 +114,8 @@ class SqlAlchemyConnection(object):
@staticmethod
def _init_engine(user, password, host):
- return session.EngineFacade(
- "mysql+pymysql://%s:%s@%s:3306" % (user, password, host)
- ).get_engine()
+ return engines.create_engine(
+ "mysql+pymysql://%s:%s@%s:3306" % (user, password, host))
class PexpectMySqlConnection(object):