summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorraghavendrat <raghavendra.tilay@hpe.com>2022-01-17 10:17:47 +0000
committerRaghavendra Tilay <raghavendra.tilay@hpe.com>2022-05-24 09:00:04 +0000
commitf6d8353fd13fed3d2aecfc813fd9b66cb1dfdc78 (patch)
treeb5d2ea81b5ad37834056377dfe4ac5aefb932fa5
parentc47a4f27ac5967ef63c2b9e4cdd88b0913075359 (diff)
downloadcinder-f6d8353fd13fed3d2aecfc813fd9b66cb1dfdc78.tar.gz
HPE 3PAR: In multi host env, fix multi-detach operation
In multi host environment, if volume is attached to instances present on different hosts, detach operation works partially. As a result, at later time the volume cannot be deleted. Details are explained in launchpad bug. This patch performs following: During detach volume from instance, both possibilities are considered: the instances can reside: [1] either on same host. [2] or on different hosts. case [1]: In such case, behaviour is same as earlier i.e vlun is not deleted upon each detach operation i.e skip remainder of terminate volume connection. The vluns are deleted only on last detach operation. case [2]: In such case, vlun of that host on 3par array is deleted separately upon each detach operation. Closes-Bug: #1958122 Change-Id: I47e8e86a495802a21570e23ecf7428fccc8b3d60 (cherry picked from commit 3ed2f38e54b84073f03ce1148c6dfcbbd37fc032)
-rw-r--r--cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py69
-rw-r--r--cinder/volume/drivers/hpe/hpe_3par_common.py57
-rw-r--r--releasenotes/notes/hpe-3par-fix-multi-detach-in-multi-host-env-3f2211f29a336b6e.yaml6
3 files changed, 121 insertions, 11 deletions
diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
index 695f262f6..bf48ce037 100644
--- a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
+++ b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
@@ -10384,13 +10384,13 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
def test_migrate_volume_attached(self):
self.migrate_volume_attached()
- def test_terminate_connection_multiattach(self):
+ def test_terminate_connection_multiattach_same_host(self):
ctx = context.get_admin_context()
mock_client = self.setup_driver()
att_1 = fake_volume.volume_attachment_ovo(
- ctx, id=uuidutils.generate_uuid())
+ ctx, id=uuidutils.generate_uuid(), attached_host='same_host')
att_2 = fake_volume.volume_attachment_ovo(
- ctx, id=uuidutils.generate_uuid())
+ ctx, id=uuidutils.generate_uuid(), attached_host='same_host')
volume = fake_volume.fake_volume_obj(
ctx, multiattach=True, host=self.FAKE_CINDER_HOST)
volume.volume_attachment.objects = [att_1, att_2]
@@ -10399,7 +10399,7 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
mock_create_client.return_value = mock_client
self.driver.terminate_connection(volume, self.connector)
- # When volume is having mulitple instances attached, there
+ # When volume is attached to mulitple instances on same host, there
# should be no call to delete the VLUN(s) or the host. We
# can assert these methods were not called to make sure the
# proper code execution is followed.
@@ -10407,6 +10407,67 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
self.assertEqual(0, mock_client.deleteVLUN.call_count)
self.assertEqual(0, mock_client.deleteHost.call_count)
+ def test_terminate_connection_multiattach_different_host(self):
+ ctx = context.get_admin_context()
+ att_1 = fake_volume.volume_attachment_ovo(
+ ctx, id=uuidutils.generate_uuid(), attached_host='host_one')
+ att_2 = fake_volume.volume_attachment_ovo(
+ ctx, id=uuidutils.generate_uuid(), attached_host='host_two')
+ volume = fake_volume.fake_volume_obj(
+ ctx, multiattach=True, host=self.FAKE_CINDER_HOST)
+ volume.volume_attachment.objects = [att_1, att_2]
+
+ vol_name = 'osv-HlF355XlSg.xcORfS0afag'
+
+ # When volume is attached to instances on different hosts,
+ # VLUN(s) of that host should be deleted. We can assert
+ # appropriate methods were called.
+
+ mock_client = self.setup_driver()
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+
+ mock_client.getHostVLUNs.return_value = [
+ {'active': False,
+ 'volumeName': vol_name,
+ 'lun': None, 'type': 0}]
+
+ mock_client.queryHost.return_value = {
+ 'members': [{
+ 'name': self.FAKE_HOST
+ }]
+ }
+
+ with mock.patch.object(hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+ self.driver.terminate_connection(
+ volume,
+ self.connector,
+ force=True)
+
+ expected = [
+ mock.call.queryHost(iqns=[self.connector['initiator']]),
+ mock.call.getHostVLUNs(self.FAKE_HOST),
+ mock.call.deleteVLUN(
+ vol_name,
+ None,
+ hostname=self.FAKE_HOST),
+ mock.call.getHostVLUNs(self.FAKE_HOST),
+ mock.call.modifyHost(
+ 'fakehost',
+ {'pathOperation': 2,
+ 'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
+ mock.call.removeVolumeMetaData(vol_name, CHAP_USER_KEY),
+ mock.call.removeVolumeMetaData(vol_name, CHAP_PASS_KEY)]
+
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected +
+ self.standard_logout)
+
@ddt.data('volume', 'volume_name_id')
def test_terminate_connection(self, volume_attr):
volume = getattr(self, volume_attr)
diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py
index c60b8838b..002f10bb8 100644
--- a/cinder/volume/drivers/hpe/hpe_3par_common.py
+++ b/cinder/volume/drivers/hpe/hpe_3par_common.py
@@ -297,11 +297,12 @@ class HPE3PARCommon(object):
enabled. bug #1834660
4.0.14 - Added Peer Persistence feature
4.0.15 - Support duplicated FQDN in network. Bug #1834695
+ 4.0.16 - In multi host env, fix multi-detach operation. Bug #1958122
"""
- VERSION = "4.0.15"
+ VERSION = "4.0.16"
stats = {}
@@ -3198,18 +3199,60 @@ class HPE3PARCommon(object):
attachment_list = volume.volume_attachment
LOG.debug("Volume attachment list: %(atl)s",
{'atl': attachment_list})
+
try:
attachment_list = attachment_list.objects
except AttributeError:
pass
if attachment_list is not None and len(attachment_list) > 1:
- LOG.info("Volume %(volume)s is attached to multiple "
- "instances on host %(host_name)s, "
- "skip terminate volume connection",
- {'volume': volume.name,
- 'host_name': volume.host.split('@')[0]})
- return
+ # There are two possibilities: the instances can reside:
+ # [1] either on same host.
+ # [2] or on different hosts.
+ #
+ # case [1]:
+ # In such case, behaviour is same as earlier i.e vlun is
+ # not deleted now i.e skip remainder of terminate volume
+ # connection.
+ #
+ # case [2]:
+ # In such case, vlun of that host on 3par array should
+ # be deleted now. Otherwise, it remains as stale entry on
+ # 3par array; which later leads to error during volume
+ # deletion.
+
+ same_host = False
+ num_hosts = len(attachment_list)
+ all_hostnames = []
+ all_hostnames.append(hostname)
+
+ count = 0
+ for i in range(num_hosts):
+ hostname_i = str(attachment_list[i].attached_host)
+ if hostname == hostname_i:
+ # current host
+ count = count + 1
+ if count > 1:
+ # volume attached to multiple instances on
+ # current host
+ same_host = True
+ else:
+ # different host
+ all_hostnames.append(hostname_i)
+
+ if same_host:
+ LOG.info("Volume %(volume)s is attached to multiple "
+ "instances on same host %(host_name)s, "
+ "skip terminate volume connection",
+ {'volume': volume.name,
+ 'host_name': volume.host.split('@')[0]})
+ return
+ else:
+ hostnames = ",".join(all_hostnames)
+ LOG.info("Volume %(volume)s is attached to instances "
+ "on multiple hosts %(hostnames)s. Proceed with "
+ "deletion of vlun on this host.",
+ {'volume': volume.name, 'hostnames': hostnames})
# does 3par know this host by a different name?
hosts = None
diff --git a/releasenotes/notes/hpe-3par-fix-multi-detach-in-multi-host-env-3f2211f29a336b6e.yaml b/releasenotes/notes/hpe-3par-fix-multi-detach-in-multi-host-env-3f2211f29a336b6e.yaml
new file mode 100644
index 000000000..e63ff4c89
--- /dev/null
+++ b/releasenotes/notes/hpe-3par-fix-multi-detach-in-multi-host-env-3f2211f29a336b6e.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ HPE 3PAR driver `Bug #1958122 <https://bugs.launchpad.net/cinder/+bug/1958122>`_:
+ Fixed issue of multi-detach operation in multi host environment.
+