summaryrefslogtreecommitdiff
path: root/gate
diff options
context:
space:
mode:
authorLee Yarwood <lyarwood@redhat.com>2021-03-05 11:33:29 +0000
committerLee Yarwood <lyarwood@redhat.com>2021-04-29 11:05:58 +0100
commit91e53e4c2b90ea57aeac4ec522dd7c8c54961d09 (patch)
tree6e5db651d83a63919158ee4933696f270bee6df4 /gate
parenteba9d596daa91d8f702b719afb88cb89f2d5bb32 (diff)
downloadnova-91e53e4c2b90ea57aeac4ec522dd7c8c54961d09.tar.gz
zuul: Replace grenade and nova-grenade-multinode with grenade-multinode
If2608406776e0d5a06b726e65b55881e70562d18 dropped the single node grenade job from the integrated-gate-compute template as it duplicates the existing grenade-multinode job. However it doesn't remove the remianing single node grenade job still present in the Nova project. This change replaces the dsvm based nova-grenade-multinode job with the zuulv3 native grenade-multinode based job. Various legacy playbooks and hook scripts are also removed as they are no longer used. Note that this does result in a loss of coverage for ceph that should be replaced as soon as a zuulv3 native ceph based multinode job is available. Change-Id: I02b2b851a74f24816d2f782a66d94de81ee527b0
Diffstat (limited to 'gate')
-rwxr-xr-xgate/live_migration/hooks/ceph.sh208
-rwxr-xr-xgate/live_migration/hooks/nfs.sh50
-rwxr-xr-xgate/live_migration/hooks/run_tests.sh72
-rwxr-xr-xgate/live_migration/hooks/utils.sh11
4 files changed, 0 insertions, 341 deletions
diff --git a/gate/live_migration/hooks/ceph.sh b/gate/live_migration/hooks/ceph.sh
deleted file mode 100755
index 3d596ff0b3..0000000000
--- a/gate/live_migration/hooks/ceph.sh
+++ /dev/null
@@ -1,208 +0,0 @@
-#!/bin/bash
-
-function prepare_ceph {
- git clone https://opendev.org/openstack/devstack-plugin-ceph /tmp/devstack-plugin-ceph
- source /tmp/devstack-plugin-ceph/devstack/settings
- source /tmp/devstack-plugin-ceph/devstack/lib/ceph
- install_ceph
- configure_ceph
- #install ceph-common package and additional python3 ceph libraries on compute nodes
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m raw -a "executable=/bin/bash
- USE_PYTHON3=${USE_PYTHON3:-True}
- source $BASE/new/devstack/functions
- source $BASE/new/devstack/functions-common
- git clone https://opendev.org/openstack/devstack-plugin-ceph /tmp/devstack-plugin-ceph
- source /tmp/devstack-plugin-ceph/devstack/lib/ceph
- install_ceph_remote
- "
-
- #copy ceph admin keyring to compute nodes
- sudo cp /etc/ceph/ceph.client.admin.keyring /tmp/ceph.client.admin.keyring
- sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.client.admin.keyring
- sudo chmod 644 /tmp/ceph.client.admin.keyring
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.admin.keyring dest=/etc/ceph/ceph.client.admin.keyring owner=ceph group=ceph"
- sudo rm -f /tmp/ceph.client.admin.keyring
- #copy ceph.conf to compute nodes
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/etc/ceph/ceph.conf dest=/etc/ceph/ceph.conf owner=root group=root"
-
- start_ceph
-}
-
-function _ceph_configure_glance {
- GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
- sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
- mon "allow r" \
- osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
- sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
- sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
-
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=DEFAULT option=show_image_direct_url value=True"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=default_store value=rbd"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=stores value='file, http, rbd'"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_ceph_conf value=$CEPH_CONF_FILE"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_user value=$GLANCE_CEPH_USER"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_pool value=$GLANCE_CEPH_POOL"
-
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
- if [[ $CEPH_REPLICAS -ne 1 ]]; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
- fi
-
- #copy glance keyring to compute only node
- sudo cp /etc/ceph/ceph.client.glance.keyring /tmp/ceph.client.glance.keyring
- sudo chown $STACK_USER:$STACK_USER /tmp/ceph.client.glance.keyring
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.glance.keyring dest=/etc/ceph/ceph.client.glance.keyring"
- sudo rm -f /tmp/ceph.client.glance.keyring
-}
-
-function configure_and_start_glance {
- _ceph_configure_glance
- echo 'check processes before glance-api stop'
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
-
- # restart glance
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@g-api"
-
- echo 'check processes after glance-api stop'
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
-}
-
-function _ceph_configure_nova {
- #setup ceph for nova, we don't reuse configure_ceph_nova - as we need to emulate case where cinder is not configured for ceph
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
- NOVA_CONF=${NOVA_CPU_CONF:-/etc/nova/nova.conf}
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_user value=${CINDER_CEPH_USER}"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_secret_uuid value=${CINDER_CEPH_UUID}"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_key value=false"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_partition value=-2"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=disk_cachemodes value='network=writeback'"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_type value=rbd"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_pool value=${NOVA_CEPH_POOL}"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_ceph_conf value=${CEPH_CONF_FILE}"
-
- sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
- mon "allow r" \
- osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
- sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
- sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
-
- #copy cinder keyring to compute only node
- sudo cp /etc/ceph/ceph.client.cinder.keyring /tmp/ceph.client.cinder.keyring
- sudo chown stack:stack /tmp/ceph.client.cinder.keyring
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.cinder.keyring dest=/etc/ceph/ceph.client.cinder.keyring"
- sudo rm -f /tmp/ceph.client.cinder.keyring
-
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
- if [[ $CEPH_REPLICAS -ne 1 ]]; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
- fi
-}
-
-function _wait_for_nova_compute_service_state {
- source $BASE/new/devstack/openrc admin admin
- local status=$1
- local attempt=1
- local max_attempts=24
- local attempt_sleep=5
- local computes_count=$(openstack compute service list | grep -c nova-compute)
- local computes_ready=$(openstack compute service list | grep nova-compute | grep $status | wc -l)
-
- echo "Waiting for $computes_count computes to report as $status"
- while [ "$computes_ready" -ne "$computes_count" ]; do
- if [ "$attempt" -eq "$max_attempts" ]; then
- echo "Failed waiting for computes to report as ${status}, ${computes_ready}/${computes_count} ${status} after ${max_attempts} attempts"
- exit 4
- fi
- echo "Waiting ${attempt_sleep} seconds for ${computes_count} computes to report as ${status}, ${computes_ready}/${computes_count} ${status} after ${attempt}/${max_attempts} attempts"
- sleep $attempt_sleep
- attempt=$((attempt+1))
- computes_ready=$(openstack compute service list | grep nova-compute | grep $status | wc -l)
- done
- echo "All computes are now reporting as ${status} after ${attempt} attempts"
-}
-
-function configure_and_start_nova {
-
- echo "Checking all n-cpu services"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "pgrep -u stack -a nova-compute"
-
- # stop nova-compute
- echo "Stopping all n-cpu services"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl stop devstack@n-cpu"
-
- # Wait for the service to be marked as down
- _wait_for_nova_compute_service_state "down"
-
- _ceph_configure_nova
-
- #import secret to libvirt
- _populate_libvirt_secret
-
- # start nova-compute
- echo "Starting all n-cpu services"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl start devstack@n-cpu"
-
- echo "Checking all n-cpu services"
- # test that they are all running again
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "pgrep -u stack -a nova-compute"
-
- # Wait for the service to be marked as up
- _wait_for_nova_compute_service_state "up"
-}
-
-function _ceph_configure_cinder {
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
- if [[ $CEPH_REPLICAS -ne 1 ]]; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
- fi
-
- CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf}
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_backend_name value=ceph"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_driver value=cinder.volume.drivers.rbd.RBDDriver"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_ceph_conf value=$CEPH_CONF_FILE"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_pool value=$CINDER_CEPH_POOL"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_user value=$CINDER_CEPH_USER"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_uuid value=$CINDER_CEPH_UUID"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_flatten_volume_from_snapshot value=False"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_max_clone_depth value=5"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=default_volume_type value=ceph"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=enabled_backends value=ceph"
-
-}
-
-function configure_and_start_cinder {
- _ceph_configure_cinder
-
- # restart cinder
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "systemctl restart devstack@c-vol"
-
- source $BASE/new/devstack/openrc
-
- export OS_USERNAME=admin
- export OS_PROJECT_NAME=admin
- lvm_type=$(cinder type-list | awk -F "|" 'NR==4{ print $2}')
- cinder type-delete $lvm_type
- openstack volume type create --os-volume-api-version 1 --property volume_backend_name="ceph" ceph
-}
-
-function _populate_libvirt_secret {
- cat > /tmp/secret.xml <<EOF
-<secret ephemeral='no' private='no'>
- <uuid>${CINDER_CEPH_UUID}</uuid>
- <usage type='ceph'>
- <name>client.${CINDER_CEPH_USER} secret</name>
- </usage>
-</secret>
-EOF
-
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/secret.xml dest=/tmp/secret.xml"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-define --file /tmp/secret.xml"
- local secret=$(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
- # TODO(tdurakov): remove this escaping as https://github.com/ansible/ansible/issues/13862 fixed
- secret=${secret//=/'\='}
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $secret"
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/tmp/secret.xml state=absent"
-
-}
diff --git a/gate/live_migration/hooks/nfs.sh b/gate/live_migration/hooks/nfs.sh
deleted file mode 100755
index acadb36d6c..0000000000
--- a/gate/live_migration/hooks/nfs.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-function nfs_setup {
- if uses_debs; then
- module=apt
- elif is_fedora; then
- module=yum
- fi
- $ANSIBLE all --become -f 5 -i "$WORKSPACE/inventory" -m $module \
- -a "name=nfs-common state=present"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m $module \
- -a "name=nfs-kernel-server state=present"
-
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-User value=nova"
-
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-Group value=nova"
-
- for SUBNODE in $SUBNODES ; do
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m lineinfile -a "dest=/etc/exports line='/opt/stack/data/nova/instances $SUBNODE(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)'"
- done
-
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "exportfs -a"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=restarted"
- GetDistro
- if [[ ! ${DISTRO} =~ (xenial) ]]; then
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=idmapd state=restarted"
- fi
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 111 -j ACCEPT"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 111 -j ACCEPT"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 2049 -j ACCEPT"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 2049 -j ACCEPT"
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "mount -t nfs4 -o proto\=tcp,port\=2049 $primary_node:/ /opt/stack/data/nova/instances/"
-}
-
-function nfs_configure_tempest {
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
-}
-
-function nfs_verify_setup {
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/opt/stack/data/nova/instances/test_file state=touch"
- if [ ! -e '/opt/stack/data/nova/instances/test_file' ]; then
- die $LINENO "NFS configuration failure"
- fi
-}
-
-function nfs_teardown {
- #teardown nfs shared storage
- $ANSIBLE subnodes --become -f 5 -i "$WORKSPACE/inventory" -m shell -a "umount -t nfs4 /opt/stack/data/nova/instances/"
- $ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=stopped"
-} \ No newline at end of file
diff --git a/gate/live_migration/hooks/run_tests.sh b/gate/live_migration/hooks/run_tests.sh
deleted file mode 100755
index 8334df633d..0000000000
--- a/gate/live_migration/hooks/run_tests.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-# Live migration dedicated ci job will be responsible for testing different
-# environments based on underlying storage, used for ephemerals.
-# This hook allows to inject logic of environment reconfiguration in ci job.
-# Base scenario for this would be:
-#
-# 1. test with all local storage (use default for volumes)
-# 2. test with NFS for root + ephemeral disks
-# 3. test with Ceph for root + ephemeral disks
-# 4. test with Ceph for volumes and root + ephemeral disk
-
-set -xe
-cd $BASE/new/tempest
-
-source $BASE/new/devstack/functions
-source $BASE/new/devstack/functions-common
-source $BASE/new/devstack/lib/nova
-source $WORKSPACE/devstack-gate/functions.sh
-source $BASE/new/nova/gate/live_migration/hooks/utils.sh
-source $BASE/new/nova/gate/live_migration/hooks/nfs.sh
-source $BASE/new/nova/gate/live_migration/hooks/ceph.sh
-primary_node=$(cat /etc/nodepool/primary_node_private)
-SUBNODES=$(cat /etc/nodepool/sub_nodes_private)
-SERVICE_HOST=$primary_node
-STACK_USER=${STACK_USER:-stack}
-
-echo '1. test with all local storage (use default for volumes)'
-echo 'NOTE: test_volume_backed_live_migration is skipped due to https://bugs.launchpad.net/nova/+bug/1524898'
-echo 'NOTE: test_live_block_migration_paused is skipped due to https://bugs.launchpad.net/nova/+bug/1901739'
-run_tempest "block migration test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration|test_live_block_migration_paused))"
-
-# TODO(mriedem): Run $BASE/new/nova/gate/test_evacuate.sh for local storage
-
-#all tests bellow this line use shared storage, need to update tempest.conf
-echo 'disabling block_migration in tempest'
-$ANSIBLE primary --become -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
-
-echo '2. NFS testing is skipped due to setup failures with Ubuntu 16.04'
-#echo '2. test with NFS for root + ephemeral disks'
-
-#nfs_setup
-#nfs_configure_tempest
-#nfs_verify_setup
-#run_tempest "NFS shared storage test" "live_migration"
-#nfs_teardown
-
-# The nova-grenade-multinode job also runs resize and cold migration tests
-# so we check for a grenade-only variable.
-if [[ -n "$GRENADE_NEW_BRANCH" ]]; then
- echo '3. test cold migration and resize'
- run_tempest "cold migration and resize test" "test_resize_server|test_cold_migration|test_revert_cold_migration"
-else
- echo '3. cold migration and resize is skipped for non-grenade jobs'
-fi
-
-echo '4. test with Ceph for root + ephemeral disks'
-# Discover and set variables for the OS version so the devstack-plugin-ceph
-# scripts can find the correct repository to install the ceph packages.
-GetOSVersion
-USE_PYTHON3=${USE_PYTHON3:-True}
-prepare_ceph
-GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
-configure_and_start_glance
-
-configure_and_start_nova
-run_tempest "Ceph nova&glance test" "^.*test_live_migration(?!.*(test_volume_backed_live_migration))"
-
-set +e
-#echo '5. test with Ceph for volumes and root + ephemeral disk'
-
-#configure_and_start_cinder
-#run_tempest "Ceph nova&glance&cinder test" "live_migration"
diff --git a/gate/live_migration/hooks/utils.sh b/gate/live_migration/hooks/utils.sh
deleted file mode 100755
index 9f98ca2e25..0000000000
--- a/gate/live_migration/hooks/utils.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-function run_tempest {
- local message=$1
- local tempest_regex=$2
- sudo -H -u tempest tox -eall -- $tempest_regex --concurrency=$TEMPEST_CONCURRENCY
- exitcode=$?
- if [[ $exitcode -ne 0 ]]; then
- die $LINENO "$message failure"
- fi
-}