summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2020-09-30 03:26:40 +0000
committerGerrit Code Review <review@openstack.org>2020-09-30 03:26:41 +0000
commit7fd85e9292795042c05db6b58f36df95d16fd016 (patch)
tree3ee268fc4f800dbd0389e0cb059b5540bd19483a
parent58f0e737973f36cd759ee054bcdf8f4f644273b3 (diff)
parent5dd1fa9a7873052e3fd87e41b960ae4b3de2136a (diff)
downloadcinder-7fd85e9292795042c05db6b58f36df95d16fd016.tar.gz
Merge "PowerMax Driver - Legacy volumes fail to live migrate" into stable/train
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py11
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py512
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/common.py23
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/fc.py3
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/iscsi.py3
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/masking.py9
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/migrate.py423
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/utils.py25
-rw-r--r--releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml13
9 files changed, 1017 insertions, 5 deletions
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py
index 92965da30..3842b12bc 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py
@@ -1218,3 +1218,14 @@ class PowerMaxData(object):
volume_metadata = {
'DeviceID': device_id, 'ArrayID': array, 'ArrayModel': array_model}
+
+ staging_sg = 'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG'
+ staging_mv1 = 'STG-myhostA-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-MV'
+ staging_mv2 = 'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-MV'
+ staging_mvs = [staging_mv1, staging_mv2]
+ legacy_mv1 = 'OS-myhostA-No_SLO-e14f48b8-MV'
+ legacy_mv2 = 'OS-myhostB-No_SLO-e14f48b8-MV'
+ legacy_shared_sg = 'OS-myhostA-No_SLO-SG'
+ legacy_mvs = [legacy_mv1, legacy_mv2]
+ legacy_not_shared_mv = 'OS-myhostA-SRP_1-Diamond-NONE-MV'
+ legacy_not_shared_sg = 'OS-myhostA-SRP_1-Diamond-NONE-SG'
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py
new file mode 100644
index 000000000..e8f2d3c4c
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py
@@ -0,0 +1,512 @@
+# Copyright (c) 2020 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from copy import deepcopy
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
+ powermax_data as tpd)
+from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
+ powermax_fake_objects as tpfo)
+from cinder.volume.drivers.dell_emc.powermax import iscsi
+from cinder.volume.drivers.dell_emc.powermax import migrate
+from cinder.volume.drivers.dell_emc.powermax import provision
+from cinder.volume.drivers.dell_emc.powermax import rest
+from cinder.volume import volume_utils
+
+
+class PowerMaxMigrateTest(test.TestCase):
+ def setUp(self):
+ self.data = tpd.PowerMaxData()
+ volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ super(PowerMaxMigrateTest, self).setUp()
+ configuration = tpfo.FakeConfiguration(
+ None, 'MaskingTests', 1, 1, san_ip='1.1.1.1',
+ san_login='smc', vmax_array=self.data.array, vmax_srp='SRP_1',
+ san_password='smc', san_api_port=8443,
+ vmax_port_groups=[self.data.port_group_name_f])
+ rest.PowerMaxRest._establish_rest_session = mock.Mock(
+ return_value=tpfo.FakeRequestsSession())
+ driver = iscsi.PowerMaxISCSIDriver(configuration=configuration)
+ self.driver = driver
+ self.common = self.driver.common
+ self.migrate = self.common.migrate
+
+ def test_get_masking_view_component_dict_shared_format_1(self):
+ """Test for get_masking_view_component_dict, legacy case 1."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-No_SLO-8970da0c-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('No_SLO', component_dict['no_slo'])
+ self.assertEqual('-8970da0c', component_dict['uuid'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_shared_format_2(self):
+ """Test for get_masking_view_component_dict, legacy case 2."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-No_SLO-F-8970da0c-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('-F', component_dict['protocol'])
+ self.assertEqual('No_SLO', component_dict['no_slo'])
+ self.assertEqual('-8970da0c', component_dict['uuid'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_shared_format_3(self):
+ """Test for get_masking_view_component_dict, legacy case 3."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-SRP_1-Silver-NONE-74346a64-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('SRP_1', component_dict['srp'])
+ self.assertEqual('Silver', component_dict['slo'])
+ self.assertEqual('NONE', component_dict['workload'])
+ self.assertEqual('-74346a64', component_dict['uuid'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_shared_format_4(self):
+ """Test for get_masking_view_component_dict, legacy case 4."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-SRP_1-Bronze-DSS-I-1b454e9f-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('SRP_1', component_dict['srp'])
+ self.assertEqual('Bronze', component_dict['slo'])
+ self.assertEqual('DSS', component_dict['workload'])
+ self.assertEqual('-I', component_dict['protocol'])
+ self.assertEqual('-1b454e9f', component_dict['uuid'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_non_shared_format_5(self):
+ """Test for get_masking_view_component_dict, legacy case 5."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-No_SLO-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('No_SLO', component_dict['no_slo'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_non_shared_format_6(self):
+ """Test for get_masking_view_component_dict, legacy case 6."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-No_SLO-F-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('No_SLO', component_dict['no_slo'])
+ self.assertEqual('-F', component_dict['protocol'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_non_shared_format_7(self):
+ """Test for get_masking_view_component_dict, legacy case 7."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-SRP_1-Diamond-OLTP-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('SRP_1', component_dict['srp'])
+ self.assertEqual('Diamond', component_dict['slo'])
+ self.assertEqual('OLTP', component_dict['workload'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_non_shared_format_8(self):
+ """Test for get_masking_view_component_dict, legacy case 8."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-SRP_1-Gold-NONE-F-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('SRP_1', component_dict['srp'])
+ self.assertEqual('Gold', component_dict['slo'])
+ self.assertEqual('NONE', component_dict['workload'])
+ self.assertEqual('-F', component_dict['protocol'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_host_with_dashes_no_slo(
+ self):
+ """Test for get_masking_view_component_dict, dashes in host."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-host-with-dashes-No_SLO-I-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('host-with-dashes', component_dict['host'])
+ self.assertEqual('No_SLO', component_dict['no_slo'])
+ self.assertEqual('-I', component_dict['protocol'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_host_with_dashes_slo(self):
+ """Test for get_masking_view_component_dict, dashes and slo."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-host-with-dashes-SRP_1-Diamond-NONE-I-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('host-with-dashes', component_dict['host'])
+ self.assertEqual('SRP_1', component_dict['srp'])
+ self.assertEqual('Diamond', component_dict['slo'])
+ self.assertEqual('NONE', component_dict['workload'])
+ self.assertEqual('-I', component_dict['protocol'])
+ self.assertEqual('MV', component_dict['postfix'])
+
+ def test_get_masking_view_component_dict_replication_enabled(self):
+ """Test for get_masking_view_component_dict, replication enabled."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-SRP_1-Diamond-OLTP-I-RE-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('-I', component_dict['protocol'])
+ self.assertEqual('Diamond', component_dict['slo'])
+ self.assertEqual('OLTP', component_dict['workload'])
+ self.assertEqual('-RE', component_dict['RE'])
+
+ def test_get_masking_view_component_dict_compression_disabled(self):
+ """Test for get_masking_view_component_dict, compression disabled."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-SRP_1-Bronze-DSS_REP-I-CD-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('-I', component_dict['protocol'])
+ self.assertEqual('Bronze', component_dict['slo'])
+ self.assertEqual('DSS_REP', component_dict['workload'])
+ self.assertEqual('-CD', component_dict['CD'])
+
+ def test_get_masking_view_component_dict_CD_RE(self):
+ """Test for get_masking_view_component_dict, CD and RE."""
+ component_dict = self.migrate.get_masking_view_component_dict(
+ 'OS-myhost-SRP_1-Platinum-OLTP_REP-I-CD-RE-MV', 'SRP_1')
+ self.assertEqual('OS', component_dict['prefix'])
+ self.assertEqual('myhost', component_dict['host'])
+ self.assertEqual('-I', component_dict['protocol'])
+ self.assertEqual('Platinum', component_dict['slo'])
+ self.assertEqual('OLTP_REP', component_dict['workload'])
+ self.assertEqual('-CD', component_dict['CD'])
+ self.assertEqual('-RE', component_dict['RE'])
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_perform_migration',
+ return_value=True)
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_get_mvs_and_sgs_from_volume',
+ return_value=(tpd.PowerMaxData.legacy_mvs,
+ [tpd.PowerMaxData.legacy_shared_sg]))
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ 'get_volume_host_list',
+ return_value=['myhostB'])
+ def test_do_migrate_if_candidate(
+ self, mock_mvs, mock_os_host, mock_migrate):
+ self.assertTrue(self.migrate.do_migrate_if_candidate(
+ self.data.array, self.data.srp, self.data.device_id,
+ self.data.test_volume, self.data.connector))
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_get_mvs_and_sgs_from_volume',
+ return_value=([tpd.PowerMaxData.legacy_not_shared_mv],
+ [tpd.PowerMaxData.legacy_not_shared_sg]))
+ def test_do_migrate_if_candidate_not_shared(
+ self, mock_mvs):
+ self.assertFalse(self.migrate.do_migrate_if_candidate(
+ self.data.array, self.data.srp, self.data.device_id,
+ self.data.test_volume, self.data.connector))
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_get_mvs_and_sgs_from_volume',
+ return_value=(tpd.PowerMaxData.legacy_mvs,
+ [tpd.PowerMaxData.legacy_shared_sg,
+ 'non_fast_sg']))
+ def test_do_migrate_if_candidate_in_multiple_sgs(
+ self, mock_mvs):
+ self.assertFalse(self.migrate.do_migrate_if_candidate(
+ self.data.array, self.data.srp, self.data.device_id,
+ self.data.test_volume, self.data.connector))
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_perform_migration',
+ return_value=True)
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_get_mvs_and_sgs_from_volume',
+ return_value=(tpd.PowerMaxData.legacy_mvs,
+ [tpd.PowerMaxData.legacy_shared_sg]))
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ 'get_volume_host_list',
+ return_value=['myhostA', 'myhostB'])
+ def test_dp_migrate_if_candidate_multiple_os_hosts(
+ self, mock_mvs, mock_os_host, mock_migrate):
+ self.assertFalse(self.migrate.do_migrate_if_candidate(
+ self.data.array, self.data.srp, self.data.device_id,
+ self.data.test_volume, self.data.connector))
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_delete_staging_masking_views')
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_get_mvs_and_sgs_from_volume',
+ side_effect=[(tpd.PowerMaxData.staging_mvs,
+ [tpd.PowerMaxData.staging_sg]),
+ ([tpd.PowerMaxData.staging_mv2],
+ [tpd.PowerMaxData.staging_sg])])
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_masking_views',
+ return_value=tpd.PowerMaxData.staging_mvs)
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_storage_group_with_vol',
+ return_value=tpd.PowerMaxData.staging_sg)
+ def test_perform_migration(self, mock_sg, mock_mvs, mock_new, mock_del):
+ """Test to perform migration"""
+ source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG'
+ mv_details_list = list()
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1'))
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1'))
+ self.assertTrue(self.migrate._perform_migration(
+ self.data.array, self.data.device_id, mv_details_list,
+ source_sg_name, 'myhostB'))
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_storage_group_with_vol',
+ return_value=None)
+ def test_perform_migration_storage_group_fail(self, mock_sg):
+ """Test to perform migration"""
+ source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG'
+ mv_details_list = list()
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1'))
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1'))
+ self.assertRaises(
+ exception.VolumeBackendAPIException,
+ self.migrate._perform_migration, self.data.array,
+ self.data.device_id, mv_details_list,
+ source_sg_name, 'myhostB')
+ with self.assertRaisesRegex(
+ exception.VolumeBackendAPIException,
+ 'MIGRATE - Unable to create staging storage group.'):
+ self.migrate._perform_migration(
+ self.data.array, self.data.device_id, mv_details_list,
+ source_sg_name, 'myhostB')
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_masking_views',
+ return_value=[])
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_storage_group_with_vol',
+ return_value=tpd.PowerMaxData.staging_sg)
+ def test_perform_migration_masking_views_fail(self, mock_sg, mock_mvs):
+ """Test to perform migration"""
+ source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG'
+ mv_details_list = list()
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1'))
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1'))
+ with self.assertRaisesRegex(
+ exception.VolumeBackendAPIException,
+ 'MIGRATE - Unable to create staging masking views.'):
+ self.migrate._perform_migration(
+ self.data.array, self.data.device_id, mv_details_list,
+ source_sg_name, 'myhostB')
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_get_mvs_and_sgs_from_volume',
+ return_value=(tpd.PowerMaxData.staging_mvs,
+ [tpd.PowerMaxData.staging_sg,
+ tpd.PowerMaxData.staging_sg]))
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_masking_views',
+ return_value=tpd.PowerMaxData.staging_mvs)
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_storage_group_with_vol',
+ return_value=tpd.PowerMaxData.staging_sg)
+ def test_perform_migration_sg_list_len_fail(
+ self, mock_sg, mock_mvs, mock_new):
+ """Test to perform migration"""
+ source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG'
+ mv_details_list = list()
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1'))
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1'))
+
+ exception_message = (
+ r"MIGRATE - The current storage group list has 2 "
+ r"members. The list is "
+ r"\[\'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG\', "
+ r"\'STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG\'\]. "
+ r"Will not proceed with cleanup. Please contact customer "
+ r"representative.")
+
+ with self.assertRaisesRegex(
+ exception.VolumeBackendAPIException,
+ exception_message):
+ self.migrate._perform_migration(
+ self.data.array, self.data.device_id, mv_details_list,
+ source_sg_name, 'myhostB')
+
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_get_mvs_and_sgs_from_volume',
+ return_value=(tpd.PowerMaxData.staging_mvs,
+ ['not_staging_sg']))
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_masking_views',
+ return_value=tpd.PowerMaxData.staging_mvs)
+ @mock.patch.object(migrate.PowerMaxMigrate,
+ '_create_stg_storage_group_with_vol',
+ return_value=tpd.PowerMaxData.staging_sg)
+ def test_perform_migration_stg_sg_mismatch_fail(
+ self, mock_sg, mock_mvs, mock_new):
+ """Test to perform migration"""
+ source_sg_name = 'OS-myhost-SRP_1-Diamond-OLTP-F-SG'
+ mv_details_list = list()
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostA-SRP_1-Diamond-OLTP-F-1b454e9f-MV', 'SRP_1'))
+ mv_details_list.append(self.migrate.get_masking_view_component_dict(
+ 'OS-myhostB-SRP_1-Diamond-OLTP-F-8970da0c-MV', 'SRP_1'))
+ with self.assertRaisesRegex(
+ exception.VolumeBackendAPIException,
+ 'MIGRATE - The current storage group not_staging_sg does not '
+ 'match STG-myhostB-4732de9b-98a4-4b6d-ae4b-3cafb3d34220-SG. '
+ 'Will not proceed with cleanup. Please contact customer '
+ 'representative.'):
+ self.migrate._perform_migration(
+ self.data.array, self.data.device_id, mv_details_list,
+ source_sg_name, 'myhostB')
+
+ @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view')
+ def test_delete_staging_masking_views(self, mock_del):
+ self.assertTrue(self.migrate._delete_staging_masking_views(
+ self.data.array, self.data.staging_mvs, 'myhostB'))
+ mock_del.assert_called_once()
+
+ @mock.patch.object(rest.PowerMaxRest, 'delete_masking_view')
+ def test_delete_staging_masking_views_no_host_match(self, mock_del):
+ self.assertFalse(self.migrate._delete_staging_masking_views(
+ self.data.array, self.data.staging_mvs, 'myhostC'))
+ mock_del.assert_not_called()
+
+ @mock.patch.object(rest.PowerMaxRest, 'create_masking_view')
+ @mock.patch.object(rest.PowerMaxRest, 'get_masking_view',
+ return_value=tpd.PowerMaxData.maskingview[0])
+ def test_create_stg_masking_views(self, mock_get, mock_create):
+ mv_detail_list = list()
+ for masking_view in self.data.legacy_mvs:
+ masking_view_dict = self.migrate.get_masking_view_component_dict(
+ masking_view, 'SRP_1')
+ if masking_view_dict:
+ mv_detail_list.append(masking_view_dict)
+ self.assertIsNotNone(self.migrate._create_stg_masking_views(
+ self.data.array, mv_detail_list, self.data.staging_sg,
+ self.data.extra_specs))
+ self.assertEqual(2, mock_create.call_count)
+
+ @mock.patch.object(rest.PowerMaxRest, 'create_masking_view')
+ @mock.patch.object(rest.PowerMaxRest, 'get_masking_view',
+ side_effect=[tpd.PowerMaxData.maskingview[0], None])
+ def test_create_stg_masking_views_mv_not_created(
+ self, mock_get, mock_create):
+ mv_detail_list = list()
+ for masking_view in self.data.legacy_mvs:
+ masking_view_dict = self.migrate.get_masking_view_component_dict(
+ masking_view, 'SRP_1')
+ if masking_view_dict:
+ mv_detail_list.append(masking_view_dict)
+ self.assertIsNone(self.migrate._create_stg_masking_views(
+ self.data.array, mv_detail_list, self.data.staging_sg,
+ self.data.extra_specs))
+
+ @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg')
+ @mock.patch.object(provision.PowerMaxProvision, 'create_storage_group',
+ return_value=tpd.PowerMaxData.staging_mvs[0])
+ def test_create_stg_storage_group_with_vol(self, mock_mv, mock_create):
+ self.migrate._create_stg_storage_group_with_vol(
+ self.data.array, 'myhostB', self.data.extra_specs)
+ mock_create.assert_called_once()
+
+ @mock.patch.object(provision.PowerMaxProvision, 'create_volume_from_sg')
+ @mock.patch.object(provision.PowerMaxProvision, 'create_storage_group',
+ return_value=None)
+ def test_create_stg_storage_group_with_vol_None(
+ self, mock_mv, mock_create):
+ self.assertIsNone(self.migrate._create_stg_storage_group_with_vol(
+ self.data.array, 'myhostB', self.data.extra_specs))
+
+ @mock.patch.object(rest.PowerMaxRest,
+ 'get_masking_views_from_storage_group',
+ return_value=tpd.PowerMaxData.legacy_mvs)
+ @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume',
+ return_value=[tpd.PowerMaxData.legacy_shared_sg])
+ def test_get_mvs_and_sgs_from_volume(self, mock_sgs, mock_mvs):
+ mv_list, sg_list = self.migrate._get_mvs_and_sgs_from_volume(
+ self.data.array, self.data.device_id)
+ mock_mvs.assert_called_once()
+ self.assertEqual([self.data.legacy_shared_sg], sg_list)
+ self.assertEqual(self.data.legacy_mvs, mv_list)
+
+ @mock.patch.object(rest.PowerMaxRest,
+ 'get_masking_views_from_storage_group')
+ @mock.patch.object(rest.PowerMaxRest, 'get_storage_groups_from_volume',
+ return_value=list())
+ def test_get_mvs_and_sgs_from_volume_empty_sg_list(
+ self, mock_sgs, mock_mvs):
+ mv_list, sg_list = self.migrate._get_mvs_and_sgs_from_volume(
+ self.data.array, self.data.device_id)
+ mock_mvs.assert_not_called()
+ self.assertTrue(len(sg_list) == 0)
+ self.assertTrue(len(mv_list) == 0)
+
+ def test_get_volume_host_list(self):
+ volume1 = deepcopy(self.data.test_volume)
+ volume1.volume_attachment.objects = [self.data.test_volume_attachment]
+ os_host_list = self.migrate.get_volume_host_list(
+ volume1, self.data.connector)
+ self.assertEqual('HostX', os_host_list[0])
+
+ def test_get_volume_host_list_no_attachments(self):
+ _volume_attachment = deepcopy(self.data.test_volume_attachment)
+ _volume_attachment.update({'connector': None})
+ volume1 = deepcopy(self.data.test_volume)
+ volume1.volume_attachment.objects = [_volume_attachment]
+ os_host_list = self.migrate.get_volume_host_list(
+ volume1, self.data.connector)
+ self.assertTrue(len(os_host_list) == 0)
+
+ @mock.patch.object(rest.PowerMaxRest,
+ 'delete_masking_view')
+ @mock.patch.object(rest.PowerMaxRest,
+ 'get_masking_views_from_storage_group',
+ return_value=[tpd.PowerMaxData.staging_mv1])
+ @mock.patch.object(rest.PowerMaxRest,
+ 'get_volumes_in_storage_group',
+ return_value=[tpd.PowerMaxData.volume_id])
+ def test_cleanup_staging_objects(self, mock_vols, mock_mvs, mock_del_mv):
+ self.migrate.cleanup_staging_objects(
+ self.data.array, [self.data.staging_sg], self.data.extra_specs)
+ mock_del_mv.assert_called_once_with(
+ self.data.array, self.data.staging_mv1)
+
+ @mock.patch.object(rest.PowerMaxRest,
+ 'delete_masking_view')
+ def test_cleanup_staging_objects_not_staging(self, mock_del_mv):
+ self.migrate.cleanup_staging_objects(
+ self.data.array, [self.data.storagegroup_name_f],
+ self.data.extra_specs)
+ mock_del_mv.assert_not_called()
+
+ @mock.patch.object(rest.PowerMaxRest,
+ 'get_masking_views_from_storage_group')
+ @mock.patch.object(rest.PowerMaxRest,
+ 'get_volumes_in_storage_group',
+ return_value=[tpd.PowerMaxData.device_id,
+ tpd.PowerMaxData.device_id2], )
+ def test_cleanup_staging_objects_multiple_vols(self, mock_vols, mock_mvs):
+ self.migrate.cleanup_staging_objects(
+ self.data.array, [self.data.storagegroup_name_f],
+ self.data.extra_specs)
+ mock_mvs.assert_not_called()
diff --git a/cinder/volume/drivers/dell_emc/powermax/common.py b/cinder/volume/drivers/dell_emc/powermax/common.py
index 5b8146054..88f860360 100644
--- a/cinder/volume/drivers/dell_emc/powermax/common.py
+++ b/cinder/volume/drivers/dell_emc/powermax/common.py
@@ -34,6 +34,7 @@ from cinder.utils import retry
from cinder.volume import configuration
from cinder.volume.drivers.dell_emc.powermax import masking
from cinder.volume.drivers.dell_emc.powermax import metadata as volume_metadata
+from cinder.volume.drivers.dell_emc.powermax import migrate
from cinder.volume.drivers.dell_emc.powermax import provision
from cinder.volume.drivers.dell_emc.powermax import rest
from cinder.volume.drivers.dell_emc.powermax import utils
@@ -167,6 +168,7 @@ class PowerMaxCommon(object):
self.provision = provision.PowerMaxProvision(self.rest)
self.volume_metadata = volume_metadata.PowerMaxVolumeMetadata(
self.rest, version, LOG.isEnabledFor(logging.DEBUG))
+ self.migrate = migrate.PowerMaxMigrate(prtcl, self.rest)
# Configuration/Attributes
self.protocol = prtcl
@@ -687,12 +689,17 @@ class PowerMaxCommon(object):
volume_name = volume.name
LOG.debug("Detaching volume %s.", volume_name)
reset = False if is_multiattach else True
+ if is_multiattach:
+ storage_group_names = self.rest.get_storage_groups_from_volume(
+ array, device_id)
self.masking.remove_and_reset_members(
array, volume, device_id, volume_name,
extra_specs, reset, connector, async_grp=async_grp)
if is_multiattach:
self.masking.return_volume_to_fast_managed_group(
array, device_id, extra_specs)
+ self.migrate.cleanup_staging_objects(
+ array, storage_group_names, extra_specs)
def _unmap_lun(self, volume, connector):
"""Unmaps a volume from the host.
@@ -816,7 +823,8 @@ class PowerMaxCommon(object):
if self.utils.is_volume_failed_over(volume):
extra_specs = rep_extra_specs
device_info_dict, is_multiattach = (
- self.find_host_lun_id(volume, connector['host'], extra_specs))
+ self.find_host_lun_id(volume, connector.get('host'), extra_specs,
+ connector=connector))
masking_view_dict = self._populate_masking_dict(
volume, connector, extra_specs)
masking_view_dict[utils.IS_MULTIATTACH] = is_multiattach
@@ -1444,20 +1452,29 @@ class PowerMaxCommon(object):
return founddevice_id
def find_host_lun_id(self, volume, host, extra_specs,
- rep_extra_specs=None):
+ rep_extra_specs=None, connector=None):
"""Given the volume dict find the host lun id for a volume.
:param volume: the volume dict
:param host: host from connector (can be None on a force-detach)
:param extra_specs: the extra specs
:param rep_extra_specs: rep extra specs, passed in if metro device
+ :param connector: connector object can be none.
:returns: dict -- the data dict
"""
maskedvols = {}
is_multiattach = False
volume_name = volume.name
device_id = self._find_device_on_array(volume, extra_specs)
- if rep_extra_specs is not None:
+ if connector:
+ if self.migrate.do_migrate_if_candidate(
+ extra_specs[utils.ARRAY], extra_specs[utils.SRP],
+ device_id, volume, connector):
+ LOG.debug("MIGRATE - Successfully migrated from device "
+ "%(dev)s from legacy shared storage groups, "
+ "pre Pike release.",
+ {'dev': device_id})
+ if rep_extra_specs:
device_id = self.get_remote_target_device(
extra_specs[utils.ARRAY], volume, device_id)[0]
extra_specs = rep_extra_specs
diff --git a/cinder/volume/drivers/dell_emc/powermax/fc.py b/cinder/volume/drivers/dell_emc/powermax/fc.py
index 86172039e..845a49efb 100644
--- a/cinder/volume/drivers/dell_emc/powermax/fc.py
+++ b/cinder/volume/drivers/dell_emc/powermax/fc.py
@@ -122,9 +122,10 @@ class PowerMaxFCDriver(san.SanDriver, driver.FibreChannelDriver):
4.1.4 - Legacy volume not found fix (#1867163)
4.1.5 - Allowing for default volume type in group (#1866871)
4.1.6 - Pools bug fix allowing 'None' variants (bug #1873253)
+ 4.1.7 - Fix to enable legacy volumes to live migrate (#1867163)
"""
- VERSION = "4.1.6"
+ VERSION = "4.1.7"
# ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI"
diff --git a/cinder/volume/drivers/dell_emc/powermax/iscsi.py b/cinder/volume/drivers/dell_emc/powermax/iscsi.py
index 32f447e65..a5e338934 100644
--- a/cinder/volume/drivers/dell_emc/powermax/iscsi.py
+++ b/cinder/volume/drivers/dell_emc/powermax/iscsi.py
@@ -127,9 +127,10 @@ class PowerMaxISCSIDriver(san.SanISCSIDriver):
4.1.4 - Legacy volume not found fix (#1867163)
4.1.5 - Allowing for default volume type in group (#1866871)
4.1.6 - Pools bug fix allowing 'None' variants (bug #1873253)
+ 4.1.7 - Fix to enable legacy volumes to live migrate (#1867163)
"""
- VERSION = "4.1.6"
+ VERSION = "4.1.7"
# ThirdPartySystems wiki
CI_WIKI_NAME = "EMC_VMAX_CI"
diff --git a/cinder/volume/drivers/dell_emc/powermax/masking.py b/cinder/volume/drivers/dell_emc/powermax/masking.py
index abeb8778d..1fe96331f 100644
--- a/cinder/volume/drivers/dell_emc/powermax/masking.py
+++ b/cinder/volume/drivers/dell_emc/powermax/masking.py
@@ -1063,6 +1063,7 @@ class PowerMaxMasking(object):
:param reset: flag to indicate if reset is required -- bool
:param async_grp: the async rep group
"""
+
move = False
short_host_name = None
storagegroup_names = (self.rest.get_storage_groups_from_volume(
@@ -1659,6 +1660,14 @@ class PowerMaxMasking(object):
sg_list = self.rest.get_storage_group_list(
serial_number, params={
'child': 'true', 'volumeId': device_id})
+ # You need to put in something here for legacy
+ if not sg_list.get('storageGroupId'):
+ storage_group_list = self.rest.get_storage_groups_from_volume(
+ serial_number, device_id)
+ if storage_group_list and len(storage_group_list) == 1:
+ if 'STG-' in storage_group_list[0]:
+ return mv_dict
+
split_pool = extra_specs['pool_name'].split('+')
src_slo = split_pool[0]
src_wl = split_pool[1] if len(split_pool) == 4 else 'NONE'
diff --git a/cinder/volume/drivers/dell_emc/powermax/migrate.py b/cinder/volume/drivers/dell_emc/powermax/migrate.py
new file mode 100644
index 000000000..73fda32a3
--- /dev/null
+++ b/cinder/volume/drivers/dell_emc/powermax/migrate.py
@@ -0,0 +1,423 @@
+# Copyright (c) 2020 Dell Inc. or its subsidiaries.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from oslo_log import log as logging
+
+from cinder import exception
+from cinder.i18n import _
+from cinder.volume.drivers.dell_emc.powermax import masking
+from cinder.volume.drivers.dell_emc.powermax import provision
+from cinder.volume.drivers.dell_emc.powermax import utils
+
+LOG = logging.getLogger(__name__)
+
+
+class PowerMaxMigrate(object):
+ """Upgrade class for Rest based PowerMax volume drivers.
+
+ This upgrade class is for Dell EMC PowerMax volume drivers
+ based on UniSphere Rest API.
+ It supports VMAX 3 and VMAX All Flash and PowerMax arrays.
+
+ """
+ def __init__(self, prtcl, rest):
+ self.rest = rest
+ self.utils = utils.PowerMaxUtils()
+ self.masking = masking.PowerMaxMasking(prtcl, self.rest)
+ self.provision = provision.PowerMaxProvision(self.rest)
+
+ def do_migrate_if_candidate(
+ self, array, srp, device_id, volume, connector):
+ """Check and migrate if the volume is a candidate
+
+ If the volume is in the legacy (SMIS) masking view structure
+ move it to staging storage group within a staging masking view.
+
+ :param array: array serial number
+ :param srp: the SRP
+ :param device_id: the volume device id
+ :param volume: the volume object
+ :param connector: the connector object
+ """
+ mv_detail_list = list()
+
+ masking_view_list, storage_group_list = (
+ self._get_mvs_and_sgs_from_volume(
+ array, device_id))
+
+ for masking_view in masking_view_list:
+ masking_view_dict = self.get_masking_view_component_dict(
+ masking_view, srp)
+ if masking_view_dict:
+ mv_detail_list.append(masking_view_dict)
+
+ if not mv_detail_list:
+ return False
+
+ if len(storage_group_list) != 1:
+ LOG.warning("MIGRATE - The volume %(dev_id)s is not in one "
+ "storage group as is expected for migration. "
+ "The volume is in storage groups %(sg_list)s."
+ "Migration will not proceed.",
+ {'dev_id': device_id,
+ 'sg_list': storage_group_list})
+ return False
+ else:
+ source_storage_group_name = storage_group_list[0]
+
+ # Get the host that OpenStack has volume exposed to (it should only
+ # be one host).
+ os_host_list = self.get_volume_host_list(volume, connector)
+ if len(os_host_list) != 1:
+ LOG.warning("MIGRATE - OpenStack has recorded that "
+ "%(dev_id)s is attached to hosts %(os_hosts)s "
+ "and not 1 host as is expected. "
+ "Migration will not proceed.",
+ {'dev_id': device_id,
+ 'os_hosts': os_host_list})
+ return False
+ else:
+ os_host_name = os_host_list[0]
+ LOG.info("MIGRATE - Volume %(dev_id)s is a candidate for "
+ "migration. The OpenStack host is %(os_host_name)s."
+ "The volume is in storage group %(sg_name)s.",
+ {'dev_id': device_id,
+ 'os_host_name': os_host_name,
+ 'sg_name': source_storage_group_name})
+ return self._perform_migration(
+ array, device_id, mv_detail_list, source_storage_group_name,
+ os_host_name)
+
+ def _perform_migration(
+ self, array, device_id, mv_detail_list, source_storage_group_name,
+ os_host_name):
+ """Perform steps so we can get the volume in a correct state.
+
+ :param array: the storage array
+ :param device_id: the device_id
+ :param mv_detail_list: the masking view list
+ :param source_storage_group_name: the source storage group
+ :param os_host_name: the host the volume is exposed to
+ :returns: boolean
+ """
+ extra_specs = {utils.INTERVAL: 3, utils.RETRIES: 200}
+ stg_sg_name = self._create_stg_storage_group_with_vol(
+ array, os_host_name, extra_specs)
+ if not stg_sg_name:
+ # Throw an exception here
+ exception_message = _("MIGRATE - Unable to create staging "
+ "storage group.")
+ LOG.error(exception_message)
+ raise exception.VolumeBackendAPIException(
+ message=exception_message)
+ LOG.info("MIGRATE - Staging storage group %(stg_sg_name)s has "
+ "been successfully created.", {'stg_sg_name': stg_sg_name})
+
+ new_stg_mvs = self._create_stg_masking_views(
+ array, mv_detail_list, stg_sg_name, extra_specs)
+ LOG.info("MIGRATE - Staging masking views %(new_stg_mvs)s have "
+ "been successfully created.", {'new_stg_mvs': new_stg_mvs})
+
+ if not new_stg_mvs:
+ exception_message = _("MIGRATE - Unable to create staging "
+ "masking views.")
+ LOG.error(exception_message)
+ raise exception.VolumeBackendAPIException(
+ message=exception_message)
+
+ # Move volume from old storage group to new staging storage group
+ self.move_volume_from_legacy_to_staging(
+ array, device_id, source_storage_group_name,
+ stg_sg_name, extra_specs)
+
+ LOG.info("MIGRATE - Device id %(device_id)s has been successfully "
+ "moved from %(src_sg)s to %(tgt_sg)s.",
+ {'device_id': device_id,
+ 'src_sg': source_storage_group_name,
+ 'tgt_sg': stg_sg_name})
+
+ new_masking_view_list, new_storage_group_list = (
+ self._get_mvs_and_sgs_from_volume(
+ array, device_id))
+
+ if len(new_storage_group_list) != 1:
+ exception_message = (_(
+ "MIGRATE - The current storage group list has %(list_len)d "
+ "members. The list is %(sg_list)s. Will not proceed with "
+ "cleanup. Please contact customer representative.") % {
+ 'list_len': len(new_storage_group_list),
+ 'sg_list': new_storage_group_list})
+ LOG.error(exception_message)
+ raise exception.VolumeBackendAPIException(
+ message=exception_message)
+ else:
+ current_storage_group_name = new_storage_group_list[0]
+ if current_storage_group_name.lower() != stg_sg_name.lower():
+ exception_message = (_(
+ "MIGRATE - The current storage group %(sg_1)s "
+ "does not match %(sg_2)s. Will not proceed with "
+ "cleanup. Please contact customer representative.") % {
+ 'sg_1': current_storage_group_name,
+ 'sg_2': stg_sg_name})
+ LOG.error(exception_message)
+ raise exception.VolumeBackendAPIException(
+ message=exception_message)
+
+ if not self._delete_staging_masking_views(
+ array, new_masking_view_list, os_host_name):
+ exception_message = _("MIGRATE - Unable to delete staging masking "
+ "views. Please contact customer "
+ "representative.")
+ LOG.error(exception_message)
+ raise exception.VolumeBackendAPIException(
+ message=exception_message)
+
+ final_masking_view_list, final_storage_group_list = (
+ self._get_mvs_and_sgs_from_volume(
+ array, device_id))
+ if len(final_masking_view_list) != 1:
+ exception_message = (_(
+ "MIGRATE - The final masking view list has %(list_len)d "
+ "entries and not 1 entry as is expected. The list is "
+ "%(mv_list)s. Please contact customer representative.") % {
+ 'list_len': len(final_masking_view_list),
+ 'sg_list': final_masking_view_list})
+ LOG.error(exception_message)
+ raise exception.VolumeBackendAPIException(
+ message=exception_message)
+
+ return True
+
+ def move_volume_from_legacy_to_staging(
+ self, array, device_id, source_storage_group_name,
+ stg_sg_name, extra_specs):
+ """Move the volume from legacy SG to staging SG
+
+ :param array: array serial number
+ :param device_id: the device id of the volume
+ :param source_storage_group_name: the source storage group
+ :param stg_sg_name: the target staging storage group
+ :param extra_specs: the extra specs
+ """
+ num_vol_in_sg = self.rest.get_num_vols_in_sg(
+ array, source_storage_group_name)
+ if num_vol_in_sg == 1:
+ # Can't move last volume and leave masking view empty
+ # so creating a holder volume
+ temp_vol_size = '1'
+ hold_vol_name = 'hold-' + str(uuid.uuid1())
+ self.provision.create_volume_from_sg(
+ array, hold_vol_name, source_storage_group_name,
+ temp_vol_size, extra_specs)
+ LOG.info("MIGRATE - Volume %(vol)s has been created because "
+ "there was only one volume remaining in storage group "
+ "%(src_sg)s and we are attempting a move it to staging "
+ "storage group %(tgt_sg)s.",
+ {'vol': hold_vol_name,
+ 'src_sg': source_storage_group_name,
+ 'tgt_sg': stg_sg_name})
+
+ self.rest.move_volume_between_storage_groups(
+ array, device_id, source_storage_group_name,
+ stg_sg_name, extra_specs)
+
+ def _delete_staging_masking_views(
+ self, array, masking_view_list, os_host_name):
+ """Delete the staging masking views
+
+ Delete the staging masking views except the masking view
+ exposed to the OpenStack compute
+
+ :param array: array serial number
+ :param masking_view_list: masking view namelist
+ :param os_host_name: the host the volume is exposed to in OpenStack
+ :returns: boolean
+ """
+ delete_mv_list = list()
+ safe_to_delete = False
+ for masking_view_name in masking_view_list:
+ if os_host_name in masking_view_name:
+ safe_to_delete = True
+ else:
+ delete_mv_list.append(masking_view_name)
+ if safe_to_delete:
+ for delete_mv in delete_mv_list:
+ self.rest.delete_masking_view(array, delete_mv)
+ LOG.info("MIGRATE - Masking view %(delete_mv)s has been "
+ "successfully deleted.",
+ {'delete_mv': delete_mv})
+ return safe_to_delete
+
+ def _create_stg_masking_views(
+ self, array, mv_detail_list, stg_sg_name, extra_specs):
+ """Create a staging masking views
+
+ :param array: array serial number
+ :param mv_detail_list: masking view detail list
+ :param stg_sg_name: staging storage group name
+ :param extra_specs: the extra specs
+ :returns: masking view list
+ """
+ new_masking_view_list = list()
+ for mv_detail in mv_detail_list:
+ host_name = mv_detail.get('host')
+ masking_view_name = mv_detail.get('mv_name')
+ masking_view_components = self.rest.get_masking_view(
+ array, masking_view_name)
+ # Create a staging masking view
+ random_uuid = uuid.uuid1()
+ staging_mv_name = 'STG-' + host_name + '-' + str(
+ random_uuid) + '-MV'
+ if masking_view_components:
+ self.rest.create_masking_view(
+ array, staging_mv_name, stg_sg_name,
+ masking_view_components.get('portGroupId'),
+ masking_view_components.get('hostId'), extra_specs)
+ masking_view_dict = self.rest.get_masking_view(
+ array, staging_mv_name)
+ if masking_view_dict:
+ new_masking_view_list.append(staging_mv_name)
+ else:
+ LOG.warning("Failed to create staging masking view "
+ "%(mv_name)s. Migration cannot proceed.",
+ {'mv_name': masking_view_name})
+ return None
+ return new_masking_view_list
+
+ def _create_stg_storage_group_with_vol(self, array, os_host_name,
+ extra_specs):
+ """Create a staging storage group and add volume
+
+ :param array: array serial number
+ :param os_host_name: the openstack host name
+ :param extra_specs: the extra specs
+ :returns: storage group name
+ """
+ random_uuid = uuid.uuid1()
+ # Create a staging SG
+ stg_sg_name = 'STG-' + os_host_name + '-' + (
+ str(random_uuid) + '-SG')
+ temp_vol_name = 'tempvol-' + str(random_uuid)
+ temp_vol_size = '1'
+
+ _stg_storage_group = self.provision.create_storage_group(
+ array, stg_sg_name,
+ None, None, None, extra_specs)
+ if _stg_storage_group:
+ self.provision.create_volume_from_sg(
+ array, temp_vol_name, stg_sg_name,
+ temp_vol_size, extra_specs)
+ return stg_sg_name
+ else:
+ return None
+
+ def _get_mvs_and_sgs_from_volume(self, array, device_id):
+ """Given a device Id get its storage groups and masking views.
+
+ :param array: array serial number
+ :param device_id: the volume device id
+ :returns: masking view list, storage group list
+ """
+ final_masking_view_list = []
+ storage_group_list = self.rest.get_storage_groups_from_volume(
+ array, device_id)
+ for sg in storage_group_list:
+ masking_view_list = self.rest.get_masking_views_from_storage_group(
+ array, sg)
+ final_masking_view_list.extend(masking_view_list)
+ return final_masking_view_list, storage_group_list
+
+ def get_masking_view_component_dict(
+ self, masking_view_name, srp):
+ """Get components from input string.
+
+ :param masking_view_name: the masking view name -- str
+ :param srp: the srp -- str
+ :returns: object components -- dict
+ """
+ regex_str_share = (
+ r'^(?P<prefix>OS)-(?P<host>.+?)((?P<srp>' + srp + r')-'
+ r'(?P<slo>.+?)-(?P<workload>.+?)|(?P<no_slo>No_SLO))'
+ r'((?P<protocol>-I|-F)|)'
+ r'(?P<CD>-CD|)(?P<RE>-RE|)'
+ r'(?P<uuid>-[0-9A-Fa-f]{8}|)'
+ r'-(?P<postfix>MV)$')
+
+ object_dict = self.utils.get_object_components_and_correct_host(
+ regex_str_share, masking_view_name)
+
+ if object_dict:
+ object_dict['mv_name'] = masking_view_name
+ return object_dict
+
+ def get_volume_host_list(self, volume, connector):
+ """Get host list attachments from connector object
+
+ :param volume: the volume object
+ :param connector: the connector object
+ :returns os_host_list
+ """
+ os_host_list = list()
+ if connector is not None:
+ attachment_list = volume.volume_attachment
+ LOG.debug("Volume attachment list: %(atl)s. "
+ "Attachment type: %(at)s",
+ {'atl': attachment_list, 'at': type(attachment_list)})
+ try:
+ att_list = attachment_list.objects
+ except AttributeError:
+ att_list = attachment_list
+ if att_list is not None:
+ host_list = [att.connector['host'] for att in att_list if
+ att is not None and att.connector is not None]
+ for host_name in host_list:
+ os_host_list.append(self.utils.get_host_short_name(host_name))
+ return os_host_list
+
+ def cleanup_staging_objects(
+ self, array, storage_group_names, extra_specs):
+ """Delete the staging masking views and storage groups
+
+ :param array: the array serial number
+ :param storage_group_names: a list of storage group names
+ :param extra_specs: the extra specs
+ """
+ def _do_cleanup(sg_name, device_id):
+ masking_view_list = (
+ self.rest.get_masking_views_from_storage_group(
+ array, sg_name))
+ for masking_view in masking_view_list:
+ if 'STG-' in masking_view:
+ self.rest.delete_masking_view(array, masking_view)
+ self.rest.remove_vol_from_sg(
+ array, sg_name, device_id,
+ extra_specs)
+ self.rest.delete_volume(array, device_id)
+ self.rest.delete_storage_group(array, sg_name)
+
+ for storage_group_name in storage_group_names:
+ if 'STG-' in storage_group_name:
+ volume_list = self.rest.get_volumes_in_storage_group(
+ array, storage_group_name)
+ if len(volume_list) == 1:
+ try:
+ _do_cleanup(storage_group_name, volume_list[0])
+ except Exception:
+ LOG.warning("MIGRATE - An attempt was made to "
+ "cleanup after a legacy live migration, "
+ "but it failed. You may choose to "
+ "cleanup manually.")
diff --git a/cinder/volume/drivers/dell_emc/powermax/utils.py b/cinder/volume/drivers/dell_emc/powermax/utils.py
index c48e65684..dbb9ba985 100644
--- a/cinder/volume/drivers/dell_emc/powermax/utils.py
+++ b/cinder/volume/drivers/dell_emc/powermax/utils.py
@@ -1001,3 +1001,28 @@ class PowerMaxUtils(object):
and 'NONE' not in extra_specs.get(WORKLOAD)):
workload = extra_specs.get(WORKLOAD)
return service_level, workload
+
+ def get_object_components_and_correct_host(self, regex_str, input_str):
+ """Get components from input string.
+
+ :param regex_str: the regex -- str
+ :param input_str: the input string -- str
+ :returns: object components -- dict
+ """
+ object_dict = self.get_object_components(regex_str, input_str)
+ if object_dict and 'host' in object_dict:
+ if object_dict['host'].endswith('-'):
+ object_dict['host'] = object_dict['host'][:-1]
+ return object_dict
+
+ @staticmethod
+ def get_object_components(regex_str, input_str):
+ """Get components from input string.
+
+ :param regex_str: the regex -- str
+ :param input_str: the input string -- str
+ :returns: dict
+ """
+ full_str = re.compile(regex_str)
+ match = full_str.match(input_str)
+ return match.groupdict() if match else None
diff --git a/releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml b/releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml
new file mode 100644
index 000000000..5f5716d1a
--- /dev/null
+++ b/releasenotes/notes/powermax-auto-migration-5cc57773c23fef02.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ This PowerMax driver moves the legacy shared volume from the masking
+ view structure in Ocata and prior releases (when SMI-S was supported) to
+ staging masking view(s) in Pike and later releases (U4P REST).
+ In Ocata, the live migration process shared the storage group,
+ containing the volume, among the different compute nodes. In Pike,
+ we changed the masking view structure to facilitate a cleaner live
+ migration process where only the intended volume is migrated without
+ exposing other volumes in the storage group. The staging storage group
+ and masking views facilitate a seamless live migration operation in
+ upgraded releases.