summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorwhoami-rajat <rajatdhasmana@gmail.com>2019-12-20 13:59:35 +0000
committerRajat Dhasmana <rajatdhasmana@gmail.com>2020-02-21 10:20:03 +0000
commite3c9fa4efa8ff68058716f683b37c091eccf6036 (patch)
treec5fe98522b40f22005d9c14e7327f858cd3f4dc4
parentc4deee6fa4d87875fb9c37abf31ebf5816c3a542 (diff)
downloadcinder-e3c9fa4efa8ff68058716f683b37c091eccf6036.tar.gz
Fix: Create new cache entry when xtremio reaches snap limit
When we reach xtremio_volumes_per_glance_cache, it is not able to create more clones for the image-volume therefore requires to create a new cache entry. Keeping in mind the case in [1], we can get CinderException for various reasons from different drivers during the clone operation. So we define a new exception for the xtremio driver to force create a new cache entry when the limit reaches and not enforce the same for other drivers. [1] https://bugs.launchpad.net/cinder/+bug/1552734 Conflicts: cinder/exception.py cinder/tests/unit/volume/flows/test_create_volume_flow.py Closes-Bug: #1858169 Change-Id: I2bf964d5a7b2048db9be1ea3eb97cd517e112c5b (cherry picked from commit d22e54c254954bba5b91c38a266308a252c7b62a)
-rw-r--r--cinder/exception.py19
-rw-r--r--cinder/tests/unit/volume/flows/test_create_volume_flow.py66
-rw-r--r--cinder/volume/drivers/dell_emc/xtremio.py3
-rw-r--r--cinder/volume/flows/manager/create_volume.py20
4 files changed, 68 insertions, 40 deletions
diff --git a/cinder/exception.py b/cinder/exception.py
index 6a56f2b0f..90529cb02 100644
--- a/cinder/exception.py
+++ b/cinder/exception.py
@@ -1372,3 +1372,22 @@ class ServiceUserTokenNoAuth(CinderException):
message = _("The [service_user] send_service_user_token option was "
"requested, but no service auth could be loaded. Please check "
"the [service_user] configuration section.")
+
+
+class UnsupportedNVMETProtocol(Invalid):
+ message = _("An invalid 'target_protocol' "
+ "value was provided: %(protocol)s")
+
+
+# NVMET driver
+class NVMETTargetAddError(CinderException):
+ message = "Failed to add subsystem: %(subsystem)s"
+
+
+class NVMETTargetDeleteError(CinderException):
+ message = "Failed to delete subsystem: %(subsystem)s"
+
+
+class SnapshotLimitReached(CinderException):
+ message = _("Exceeded the configured limit of "
+ "%(set_limit)s snapshots per volume.")
diff --git a/cinder/tests/unit/volume/flows/test_create_volume_flow.py b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
index d775433c6..72c35e2bf 100644
--- a/cinder/tests/unit/volume/flows/test_create_volume_flow.py
+++ b/cinder/tests/unit/volume/flows/test_create_volume_flow.py
@@ -1303,21 +1303,9 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
image_location = 'someImageLocationStr'
image_id = fakes.IMAGE_ID
image_meta = mock.MagicMock()
- image_info = imageutils.QemuImgInfo()
- image_info.virtual_size = '1073741824'
- mock_qemu_info.return_value = image_info
-
volume = fake_volume.fake_volume_obj(self.ctxt, size=1,
host='foo@bar#pool')
- image_volume = fake_volume.fake_db_volume(size=2)
- self.mock_db.volume_create.return_value = image_volume
-
- if cloning_supported:
- mock_create_from_src.side_effect = exception.CinderException(
- 'Error during cloning')
- else:
- mock_create_from_src.side_effect = NotImplementedError(
- 'Driver does not support clone')
+ self.mock_driver.clone_image.return_value = (None, False)
manager = create_volume_manager.CreateVolumeFromSpecTask(
self.mock_volume_manager,
@@ -1325,29 +1313,41 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
self.mock_driver,
image_volume_cache=self.mock_cache
)
-
- model_update = manager._create_from_image_cache_or_download(
- self.ctxt,
- volume,
- image_location,
- image_id,
- image_meta,
- self.mock_image_service,
- update_cache=False)
+ if cloning_supported:
+ mock_create_from_src.side_effect = exception.SnapshotLimitReached(
+ 'Error during cloning')
+ self.assertRaises(
+ exception.SnapshotLimitReached,
+ manager._create_from_image,
+ self.ctxt,
+ volume,
+ image_location,
+ image_id,
+ image_meta,
+ self.mock_image_service)
+ else:
+ mock_create_from_src.side_effect = NotImplementedError(
+ 'Driver does not support clone')
+ model_update = manager._create_from_image(
+ self.ctxt,
+ volume,
+ image_location,
+ image_id,
+ image_meta,
+ self.mock_image_service)
+ mock_create_from_img_dl.assert_called_once()
+ self.assertEqual(mock_create_from_img_dl.return_value,
+ model_update)
# Ensure cloning was attempted and that it failed
mock_create_from_src.assert_called_once()
- mock_create_from_img_dl.assert_called_once()
- self.assertEqual(mock_create_from_img_dl.return_value, model_update)
-
- # Ensure a new cache entry is created when cloning fails, but
- # only when the driver supports cloning.
- if cloning_supported:
- (self.mock_volume_manager.
- _create_image_cache_volume_entry.assert_called_once())
- else:
- (self.mock_volume_manager.
- _create_image_cache_volume_entry.assert_not_called())
+ with mock.patch(
+ 'cinder.volume.flows.manager.create_volume.'
+ 'CreateVolumeFromSpecTask') as volume_manager:
+ (volume_manager.CreateVolumeFromSpecTask.
+ _create_from_image_cache_or_download.called_once())
+ (volume_manager.CreateVolumeFromSpecTask.
+ _create_from_image_cache.called_once())
@mock.patch('cinder.volume.flows.manager.create_volume.'
'CreateVolumeFromSpecTask.'
diff --git a/cinder/volume/drivers/dell_emc/xtremio.py b/cinder/volume/drivers/dell_emc/xtremio.py
index c95be5602..4b12b6aa2 100644
--- a/cinder/volume/drivers/dell_emc/xtremio.py
+++ b/cinder/volume/drivers/dell_emc/xtremio.py
@@ -535,8 +535,7 @@ class XtremIOVolumeDriver(san.SanDriver):
src_vref['id'])
limit = self.configuration.safe_get('xtremio_volumes_per_glance_cache')
if cache and limit and limit > 0 and limit <= vol['num-of-dest-snaps']:
- raise exception.CinderException('Exceeded the configured limit of '
- '%d snapshots per volume' % limit)
+ raise exception.SnapshotLimitReached(set_limit=limit)
try:
self.client.create_snapshot(src_vref['id'], volume['id'])
except exception.XtremIOSnapshotsLimitExceeded as e:
diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
index 80d80e41a..010309fb5 100644
--- a/cinder/volume/flows/manager/create_volume.py
+++ b/cinder/volume/flows/manager/create_volume.py
@@ -706,6 +706,14 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
cache_entry['volume_id']
)
return model_update, True
+ except exception.SnapshotLimitReached:
+ # If this exception occurred when cloning the image-volume,
+ # it is because the image-volume reached its snapshot limit.
+ # Delete current cache entry and create a "fresh" entry
+ # NOTE: This will not delete the existing image-volume and
+ # only delete the cache entry
+ with excutils.save_and_reraise_exception():
+ self.image_volume_cache.evict(context, cache_entry)
except NotImplementedError:
LOG.warning('Backend does not support creating image-volume '
'clone. Image will be downloaded from Glance.')
@@ -789,17 +797,18 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
image_id,
image_meta
)
+ except exception.SnapshotLimitReached:
+ # This exception will be handled by the caller's
+ # (_create_from_image) retry decorator
+ with excutils.save_and_reraise_exception():
+ LOG.debug("Snapshot limit reached. Creating new "
+ "image-volume.")
except exception.CinderException as e:
LOG.warning('Failed to create volume from image-volume '
'cache, image will be downloaded from Glance. '
'Error: %(exception)s',
{'exception': e})
- # If an exception occurred when cloning the image-volume,
- # it may be the image-volume reached its snapshot limit.
- # Create another "fresh" cache entry.
- update_cache = True
-
# Don't cache unless directed.
if not cloned and update_cache:
should_create_cache_entry = True
@@ -871,6 +880,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
return model_update
+ @utils.retry(exception.SnapshotLimitReached, retries=1)
def _create_from_image(self, context, volume,
image_location, image_id, image_meta,
image_service, **kwargs):