summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst7
-rw-r--r--tempest/api/volume/base.py31
-rw-r--r--tempest/api/volume/test_volumes_snapshots.py17
-rw-r--r--tempest/common/waiters.py11
-rw-r--r--tempest/config.py12
-rw-r--r--tempest/scenario/test_network_advanced_server_ops.py2
-rwxr-xr-xtempest/tests/common/test_waiters.py23
-rw-r--r--zuul.d/project.yaml5
8 files changed, 92 insertions, 16 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 17e2a4908..caf954b4f 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -194,6 +194,13 @@ functionality, like listing servers or flavors or creating volumes. The
attribute should be sparingly applied to only the tests that sanity-check the
most essential functionality of an OpenStack cloud.
+Multinode Attribute
+^^^^^^^^^^^^^^^^^^^
+The ``type='multinode'`` attribute is used to signify that a test is desired
+to be executed in a multinode environment. By marking the tests with this
+attribute we can avoid running tests which aren't that beneficial for the
+multinode setup and thus reduce the consumption of resources.
+
Test fixtures and resources
---------------------------
Test level resources should be cleaned-up after the test execution. Clean-up
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 49f9e2217..9ba9949d7 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -20,6 +20,7 @@ from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib.decorators import cleanup_order
+from tempest.lib import exceptions as lib_exc
import tempest.test
CONF = config.CONF
@@ -126,12 +127,32 @@ class BaseVolumeTest(api_version_utils.BaseMicroversionTest,
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.cleanup(test_utils.call_and_ignore_notfound_exc,
- self.delete_volume, self.volumes_client, volume['id'])
+ self._delete_volume_for_cleanup,
+ self.volumes_client, volume['id'])
if wait_until:
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], wait_until)
return volume
+ @staticmethod
+ def _delete_volume_for_cleanup(volumes_client, volume_id):
+ """Delete a volume (only) for cleanup.
+
+ If it is attached to a server, wait for it to become available,
+ assuming we have already deleted the server and just need nova to
+ complete the delete operation before it is available to be deleted.
+ Otherwise proceed to the regular delete_volume().
+ """
+ try:
+ vol = volumes_client.show_volume(volume_id)['volume']
+ if vol['status'] == 'in-use':
+ waiters.wait_for_volume_resource_status(volumes_client,
+ volume_id,
+ 'available')
+ except lib_exc.NotFound:
+ pass
+ BaseVolumeTest.delete_volume(volumes_client, volume_id)
+
@cleanup_order
def create_snapshot(self, volume_id=1, **kwargs):
"""Wrapper utility that returns a test snapshot."""
@@ -183,15 +204,17 @@ class BaseVolumeTest(api_version_utils.BaseMicroversionTest,
snapshots_client.delete_snapshot(snapshot_id)
snapshots_client.wait_for_resource_deletion(snapshot_id)
- def attach_volume(self, server_id, volume_id):
+ def attach_volume(self, server_id, volume_id, wait_for_detach=True):
"""Attach a volume to a server"""
self.servers_client.attach_volume(
server_id, volumeId=volume_id,
device='/dev/%s' % CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
volume_id, 'in-use')
- self.addCleanup(waiters.wait_for_volume_resource_status,
- self.volumes_client, volume_id, 'available')
+ if wait_for_detach:
+ self.addCleanup(waiters.wait_for_volume_resource_status,
+ self.volumes_client, volume_id, 'available',
+ server_id, self.servers_client)
self.addCleanup(self.servers_client.detach_volume, server_id,
volume_id)
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index b3a04f8e1..95521e7e0 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -44,12 +44,17 @@ class VolumesSnapshotTestJSON(base.BaseVolumeTest):
@utils.services('compute')
def test_snapshot_create_delete_with_volume_in_use(self):
"""Test create/delete snapshot from volume attached to server"""
- # Create a test instance
- server = self.create_server(wait_until='SSHABLE')
# NOTE(zhufl) Here we create volume from self.image_ref for adding
# coverage for "creating snapshot from non-blank volume".
volume = self.create_volume(imageRef=self.image_ref)
- self.attach_volume(server['id'], volume['id'])
+
+ # Create a test instance
+ server = self.create_server(wait_until='SSHABLE')
+
+ # NOTE(danms): We are attaching this volume to a server, but we do
+ # not need to block on detach during cleanup because we will be
+ # deleting the server anyway.
+ self.attach_volume(server['id'], volume['id'], wait_for_detach=False)
# Snapshot a volume which attached to an instance with force=False
self.assertRaises(lib_exc.BadRequest, self.create_snapshot,
@@ -81,7 +86,11 @@ class VolumesSnapshotTestJSON(base.BaseVolumeTest):
# Create a server and attach it
server = self.create_server(wait_until='SSHABLE')
- self.attach_volume(server['id'], self.volume_origin['id'])
+ # NOTE(danms): We are attaching this volume to a server, but we do
+ # not need to block on detach during cleanup because we will be
+ # deleting the server anyway.
+ self.attach_volume(server['id'], self.volume_origin['id'],
+ wait_for_detach=False)
# Now that the volume is attached, create other snapshots
snapshot2 = self.create_snapshot(self.volume_origin['id'], force=True)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 45a7b8a59..c5da412d3 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -303,12 +303,16 @@ def wait_for_image_copied_to_stores(client, image_id):
raise lib_exc.TimeoutException(message)
-def wait_for_volume_resource_status(client, resource_id, status):
+def wait_for_volume_resource_status(client, resource_id, status,
+ server_id=None, servers_client=None):
"""Waits for a volume resource to reach a given status.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
+
+ If server_id and servers_client are provided, dump the console for that
+ server on failure.
"""
resource_name = re.findall(
r'(volume|group-snapshot|snapshot|backup|group)',
@@ -330,6 +334,11 @@ def wait_for_volume_resource_status(client, resource_id, status):
raise exceptions.VolumeExtendErrorException(volume_id=resource_id)
if int(time.time()) - start >= client.build_timeout:
+ if server_id and servers_client:
+ console_output = servers_client.get_console_output(
+ server_id)['output']
+ LOG.debug('Console output for %s\nbody=\n%s',
+ server_id, console_output)
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
(resource_name, resource_id, status, resource_status,
diff --git a/tempest/config.py b/tempest/config.py
index 00b394eff..dfc0a8eb3 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -975,12 +975,12 @@ ValidationGroup = [
default='ecdsa',
help='Type of key to use for ssh connections. '
'Valid types are rsa, ecdsa'),
- cfg.IntOpt('allowed_network_downtime',
- default=5.0,
- help="Allowed VM network connection downtime during live "
- "migration, in seconds. "
- "When the measured downtime exceeds this value, an "
- "exception is raised."),
+ cfg.FloatOpt('allowed_network_downtime',
+ default=5.0,
+ help="Allowed VM network connection downtime during live "
+ "migration, in seconds. "
+ "When the measured downtime exceeds this value, an "
+ "exception is raised."),
]
volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f4f37b087..e6c6eb69a 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -275,7 +275,7 @@ class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
LOG.debug("Downtime seconds measured with downtime_meter = %r",
downtime)
allowed_downtime = CONF.validation.allowed_network_downtime
- self.assertLess(
+ self.assertLessEqual(
downtime, allowed_downtime,
"Downtime of {} seconds is higher than expected '{}'".format(
downtime, allowed_downtime))
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 2695048b0..93c949e64 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -386,6 +386,29 @@ class TestVolumeWaiters(base.TestCase):
mock_sleep.assert_called_once_with(1)
@mock.patch.object(time, 'sleep')
+ def test_wait_for_volume_status_timeout_console(self, mock_sleep):
+ # Tests that the wait method gets the server console log if the
+ # timeout is hit.
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1)
+ servers_client = mock.Mock()
+ servers_client.get_console_output.return_value = {
+ 'output': 'console log'}
+ volume = {'volume': {'status': 'detaching'}}
+ mock_show = mock.Mock(return_value=volume)
+ client.show_volume = mock_show
+ volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_volume_resource_status,
+ client, volume_id, 'available',
+ server_id='someserver',
+ servers_client=servers_client)
+ servers_client.get_console_output.assert_called_once_with(
+ 'someserver')
+
+ @mock.patch.object(time, 'sleep')
def test_wait_for_volume_status_error_extending(self, mock_sleep):
# Tests that the wait method raises VolumeExtendErrorException if
# the volume status is 'error_extending'.
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index d20186ece..3df61d8bc 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -156,6 +156,11 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-all:
irrelevant-files: *tempest-irrelevant-files
+ - tempest-slow-parallel
+ - tempest-full-parallel
+ - tempest-full-zed-extra-tests
+ - tempest-full-yoga-extra-tests
+ - tempest-full-xena-extra-tests
- neutron-ovs-tempest-dvr-ha-multinode-full:
irrelevant-files: *tempest-irrelevant-files
- nova-tempest-v2-api: