summaryrefslogtreecommitdiff
path: root/cinder/volume/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'cinder/volume/drivers')
-rw-r--r--cinder/volume/drivers/dell_emc/powerstore/utils.py4
-rw-r--r--cinder/volume/drivers/hpe/hpe_3par_common.py63
-rw-r--r--cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py5
-rw-r--r--cinder/volume/drivers/pure.py37
-rw-r--r--cinder/volume/drivers/rbd.py6
5 files changed, 86 insertions, 29 deletions
diff --git a/cinder/volume/drivers/dell_emc/powerstore/utils.py b/cinder/volume/drivers/dell_emc/powerstore/utils.py
index 52c74a587..dd02fe93c 100644
--- a/cinder/volume/drivers/dell_emc/powerstore/utils.py
+++ b/cinder/volume/drivers/dell_emc/powerstore/utils.py
@@ -15,12 +15,12 @@
"""Utilities for Dell EMC PowerStore Cinder driver."""
-from distutils import version
import functools
import re
from oslo_log import log as logging
from oslo_utils import units
+from packaging import version
from cinder.common import constants
from cinder import exception
@@ -186,4 +186,4 @@ def is_group_a_cg_snapshot_type(func):
def version_gte(ver1, ver2):
- return version.LooseVersion(ver1) >= version.LooseVersion(ver2)
+ return version.parse(ver1) >= version.parse(ver2)
diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py
index 971fac3e8..2d9534a16 100644
--- a/cinder/volume/drivers/hpe/hpe_3par_common.py
+++ b/cinder/volume/drivers/hpe/hpe_3par_common.py
@@ -81,6 +81,7 @@ FLASH_CACHE_API_VERSION = 30201200
COMPRESSION_API_VERSION = 30301215
SRSTATLD_API_VERSION = 30201200
REMOTE_COPY_API_VERSION = 30202290
+API_VERSION_2023 = 100000000
hpe3par_opts = [
cfg.StrOpt('hpe3par_api_url',
@@ -300,11 +301,14 @@ class HPE3PARCommon(object):
4.0.16 - In multi host env, fix multi-detach operation. Bug #1958122
4.0.17 - Added get_manageable_volumes and get_manageable_snapshots.
Bug #1819903
+ 4.0.18 - During conversion of volume to base volume,
+ error out if it has child snapshot(s). Bug #1994521
+ 4.0.19 - Update code to work with new WSAPI (of 2023). Bug #2015746
"""
- VERSION = "4.0.17"
+ VERSION = "4.0.19"
stats = {}
@@ -704,9 +708,12 @@ class HPE3PARCommon(object):
compression = self.get_compression_policy(
type_info['hpe3par_keys'])
- optional = {'online': True, 'snapCPG': snapcpg,
+ optional = {'online': True,
'tpvv': tpvv, 'tdvv': tdvv}
+ if self.API_VERSION < API_VERSION_2023:
+ optional['snapCPG'] = snapcpg
+
if compression is not None:
optional['compression'] = compression
@@ -1004,7 +1011,7 @@ class HPE3PARCommon(object):
'comment': json.dumps(new_comment)}
# Ensure that snapCPG is set
- if 'snapCPG' not in vol:
+ if 'snapCPG' not in vol and self.API_VERSION < API_VERSION_2023:
new_vals['snapCPG'] = vol['userCPG']
LOG.info("Virtual volume %(disp)s '%(new)s' snapCPG "
"is empty so it will be set to: %(cpg)s",
@@ -2393,9 +2400,14 @@ class HPE3PARCommon(object):
comments['qos'] = qos
extras = {'comment': json.dumps(comments),
- 'snapCPG': snap_cpg,
'tpvv': tpvv}
+ LOG.debug("self.API_VERSION: %(version)s",
+ {'version': self.API_VERSION})
+
+ if self.API_VERSION < API_VERSION_2023:
+ extras['snapCPG'] = snap_cpg
+
# Only set the dedup option if the backend supports it.
if self.API_VERSION >= DEDUP_API_VERSION:
extras['tdvv'] = tdvv
@@ -2466,7 +2478,7 @@ class HPE3PARCommon(object):
{'src': src_name, 'dest': dest_name})
optional = {'tpvv': tpvv, 'online': True}
- if snap_cpg is not None:
+ if snap_cpg is not None and self.API_VERSION < API_VERSION_2023:
optional['snapCPG'] = snap_cpg
if self.API_VERSION >= DEDUP_API_VERSION:
@@ -3139,6 +3151,21 @@ class HPE3PARCommon(object):
compression = self.get_compression_policy(
type_info['hpe3par_keys'])
+
+ # If volume (osv-) has snapshot, while converting the volume
+ # to base volume (omv-), snapshot cannot be transferred to
+ # new base volume (omv-) i.e it remain with volume (osv-).
+ # So error out for such volume.
+ snap_list = self.client.getVolumeSnapshots(volume_name)
+ if snap_list:
+ snap_str = ",".join(snap_list)
+ msg = (_("Volume %(name)s has dependent snapshots: %(snap)s."
+ " Either flatten or remove the dependent snapshots:"
+ " %(snap)s for the conversion of volume %(name)s to"
+ " succeed." % {'name': volume_name,
+ 'snap': snap_str}))
+ raise exception.VolumeIsBusy(message=msg)
+
# Create a physical copy of the volume
task_id = self._copy_volume(volume_name, temp_vol_name,
cpg, cpg, type_info['tpvv'],
@@ -3162,16 +3189,18 @@ class HPE3PARCommon(object):
comment = self._get_3par_vol_comment(volume_name)
if comment:
self.client.modifyVolume(temp_vol_name, {'comment': comment})
- LOG.debug('Volume rename completed: convert_to_base_volume: '
- 'id=%s.', volume['id'])
+ LOG.debug('Assigned the comment: convert_to_base_volume: '
+ 'id=%s.', volume['id'])
- # Delete source volume after the copy is complete
+ # Delete source volume (osv-) after the copy is complete
self.client.deleteVolume(volume_name)
LOG.debug('Delete src volume completed: convert_to_base_volume: '
'id=%s.', volume['id'])
- # Rename the new volume to the original name
+ # Rename the new volume (omv-) to the original name (osv-)
self.client.modifyVolume(temp_vol_name, {'newName': volume_name})
+ LOG.debug('Volume rename completed: convert_to_base_volume: '
+ 'id=%s.', volume['id'])
LOG.info('Completed: convert_to_base_volume: '
'id=%s.', volume['id'])
@@ -4358,15 +4387,17 @@ class HPE3PARCommon(object):
local_cpg)
rcg_target = {'targetName': target['backend_id'],
'mode': replication_mode_num,
- 'snapCPG': cpg,
'userCPG': cpg}
+ if self.API_VERSION < API_VERSION_2023:
+ rcg_target['snapCPG'] = cpg
rcg_targets.append(rcg_target)
sync_target = {'targetName': target['backend_id'],
'syncPeriod': replication_sync_period}
sync_targets.append(sync_target)
- optional = {'localSnapCPG': vol_settings['snap_cpg'],
- 'localUserCPG': local_cpg}
+ optional = {'localUserCPG': local_cpg}
+ if self.API_VERSION < API_VERSION_2023:
+ optional['localSnapCPG'] = vol_settings['snap_cpg']
pool = volume_utils.extract_host(volume['host'], level='pool')
domain = self.get_domain(pool)
if domain:
@@ -4381,6 +4412,8 @@ class HPE3PARCommon(object):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
+ LOG.debug("created rcg %(name)s", {'name': rcg_name})
+
# Add volume to remote copy group.
rcg_targets = []
for target in self._replication_targets:
@@ -5300,7 +5333,11 @@ class ModifyVolumeTask(flow_utils.CinderTask):
comment_dict = self._get_new_comment(
old_comment, new_vvs, new_qos, new_type_name, new_type_id)
- if new_snap_cpg != old_snap_cpg:
+ LOG.debug("API_VERSION: %(ver_1)s, API_VERSION_2023: %(ver_2)s",
+ {'ver_1': common.API_VERSION,
+ 'ver_2': API_VERSION_2023})
+ if (new_snap_cpg != old_snap_cpg and
+ common.API_VERSION < API_VERSION_2023):
# Modify the snap_cpg. This will fail with snapshots.
LOG.info("Modifying %(volume_name)s snap_cpg from "
"%(old_snap_cpg)s to %(new_snap_cpg)s.",
diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py
index 36f283b40..dfed6d49f 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py
@@ -94,10 +94,7 @@ class DS8KHTTPSConnection(connection.VerifiedHTTPSConnection):
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
#
- # disable pylint because pylint doesn't support importing
- # from six.moves yet. see:
- # https://bitbucket.org/logilab/pylint/issue/550/
- self._tunnel() # pylint: disable=E1101
+ self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py
index bf38d88f9..fcaecda08 100644
--- a/cinder/volume/drivers/pure.py
+++ b/cinder/volume/drivers/pure.py
@@ -111,9 +111,8 @@ PURE_OPTS = [
"IPv4 and IPv6 subnets. This parameter supersedes "
"pure_nvme_cidr."),
cfg.StrOpt("pure_nvme_transport", default="roce",
- choices=['roce'],
- help="The NVMe transport layer to be used by the NVMe driver. "
- "This only supports RoCE at this time."),
+ choices=['roce', 'tcp'],
+ help="The NVMe transport layer to be used by the NVMe driver."),
cfg.BoolOpt("pure_eradicate_on_delete",
default=False,
help="When enabled, all Pure volumes, snapshots, and "
@@ -159,6 +158,7 @@ ERR_MSG_NOT_CONNECTED = "is not connected"
ERR_MSG_ALREADY_BELONGS = "already belongs to"
ERR_MSG_EXISTING_CONNECTIONS = "cannot be deleted due to existing connections"
ERR_MSG_ALREADY_IN_USE = "already in use"
+ERR_MSG_ARRAY_LIMIT = "limit reached"
EXTRA_SPECS_REPL_ENABLED = "replication_enabled"
EXTRA_SPECS_REPL_TYPE = "replication_type"
@@ -406,6 +406,13 @@ class PureBaseVolumeDriver(san.SanDriver):
"unsupported. Please upgrade your backend to "
"a supported version.")
raise PureDriverException(msg)
+ if version.parse(array_info["version"]) < version.parse(
+ '6.4.2'
+ ) and self._storage_protocol == constants.NVMEOF_TCP:
+ msg = _("FlashArray Purity version less than 6.4.2 "
+ "unsupported for NVMe-TCP. Please upgrade your "
+ "backend to a supported version.")
+ raise PureDriverException(msg)
self._array.array_name = array_info["array_name"]
self._array.array_id = array_info["id"]
@@ -2418,8 +2425,9 @@ class PureBaseVolumeDriver(san.SanDriver):
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and (
- ERR_MSG_ALREADY_EXISTS
- in err.text):
+ ERR_MSG_ALREADY_EXISTS in err.text
+ or ERR_MSG_ARRAY_LIMIT in err.text
+ ):
ctxt.reraise = False
LOG.info("Skipping add array %(target_array)s to pod"
" %(pod_name)s since it's already added.",
@@ -3217,6 +3225,9 @@ class PureNVMEDriver(PureBaseVolumeDriver, driver.BaseVD):
if self.configuration.pure_nvme_transport == "roce":
self.transport_type = "rdma"
self._storage_protocol = constants.NVMEOF_ROCE
+ else:
+ self.transport_type = "tcp"
+ self._storage_protocol = constants.NVMEOF_TCP
def _get_nguid(self, pure_vol_name):
"""Return the NGUID based on the volume's serial number
@@ -3331,14 +3342,24 @@ class PureNVMEDriver(PureBaseVolumeDriver, driver.BaseVD):
return props
def _get_target_nvme_ports(self, array):
- """Return list of nvme-enabled port descriptions."""
+ """Return list of correct nvme-enabled port descriptions."""
ports = array.list_ports()
+ valid_nvme_ports = []
nvme_ports = [port for port in ports if port["nqn"]]
+ for port in range(0, len(nvme_ports)):
+ if "ETH" in nvme_ports[port]["name"]:
+ port_detail = array.get_network_interface(
+ interface=nvme_ports[port]["name"]
+ )
+ if port_detail["services"][0] == "nvme-" + \
+ self.configuration.pure_nvme_transport:
+ valid_nvme_ports.append(nvme_ports[port])
if not nvme_ports:
raise PureDriverException(
- reason=_("No nvme-enabled ports on target array.")
+ reason=_("No %(type)s enabled ports on target array.") %
+ {"type": self._storage_protocol}
)
- return nvme_ports
+ return valid_nvme_ports
@utils.retry(PureRetryableException, retries=HOST_CREATE_MAX_RETRIES)
def _connect(self, array, vol_name, connector):
diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py
index 6cc86c2c5..e710fd356 100644
--- a/cinder/volume/drivers/rbd.py
+++ b/cinder/volume/drivers/rbd.py
@@ -968,7 +968,8 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
with RBDVolumeProxy(self, vol_name) as image:
image_features = image.features()
change_features = self.MULTIATTACH_EXCLUSIONS & image_features
- image.update_features(change_features, False)
+ if change_features != 0:
+ image.update_features(change_features, False)
return {'provider_location':
self._dumps({'saved_features': image_features})}
@@ -980,7 +981,8 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
provider_location = json.loads(volume.provider_location)
image_features = provider_location['saved_features']
change_features = self.MULTIATTACH_EXCLUSIONS & image_features
- image.update_features(change_features, True)
+ if change_features != 0:
+ image.update_features(change_features, True)
except IndexError:
msg = "Could not find saved image features."
raise RBDDriverException(reason=msg)