summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuul <zuul@review.opendev.org>2020-11-06 15:21:05 +0000
committerGerrit Code Review <review@openstack.org>2020-11-06 15:21:05 +0000
commit13d48f8589d8981cebac6978f4b22d840f5c9132 (patch)
tree93714c8b3a756359f6c59410e8fb1c85c48f57c3
parentd1eab14c0a968b6a2c2e4603b0ed855958cff438 (diff)
parent5fec81daa6b0cd694bb8987abdd88e0e6106bdfc (diff)
downloadcinder-13d48f8589d8981cebac6978f4b22d840f5c9132.tar.gz
Merge "NetApp SolidFire: Fix replication" into stable/steinstein-em14.3.1
-rw-r--r--cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py942
-rw-r--r--cinder/volume/drivers/solidfire.py865
-rw-r--r--releasenotes/notes/fix-solidfire-replication-dcb3e59b29950933.yaml8
3 files changed, 1463 insertions, 352 deletions
diff --git a/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py b/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py
index 8e505d4c5..4bb95b615 100644
--- a/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py
+++ b/cinder/tests/unit/volume/drivers/solidfire/test_solidfire.py
@@ -27,8 +27,10 @@ from cinder import context
from cinder import exception
from cinder.objects import fields
from cinder import test
+from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_group_snapshot
from cinder.tests.unit import fake_snapshot
+from cinder.tests.unit import fake_volume
from cinder.tests.unit.image import fake as fake_image
from cinder.tests.unit import utils as test_utils
from cinder.volume import configuration as conf
@@ -67,7 +69,8 @@ class SolidFireVolumeTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
- self.configuration = conf.Configuration(None)
+ self.configuration = conf.BackendGroupConfiguration(
+ [], conf.SHARED_CONF_GROUP)
self.configuration.sf_allow_tenant_qos = True
self.configuration.san_is_local = True
self.configuration.sf_emulate_512 = True
@@ -97,14 +100,17 @@ class SolidFireVolumeTestCase(test.TestCase):
'compressionPercent': 100,
'deDuplicationPercent': 100,
'thinProvisioningPercent': 100}}}
- self.mock_volume = {'project_id': 'testprjid',
- 'name': 'testvol',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
- 'volume_type_id': 'fast',
- 'created_at': timeutils.utcnow(),
- 'attributes':
- {'uuid': '262b9ce2-a71a-4fbe-830c-c20c5596caea'}}
+ vol_updates = {'project_id': 'testprjid',
+ 'name': 'testvol',
+ 'size': 1,
+ 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
+ 'volume_type_id': 'fast',
+ 'created_at': timeutils.utcnow(),
+ 'attributes':
+ {'uuid': '262b9ce2-a71a-4fbe-830c-c20c5596caea'}}
+ ctx = context.get_admin_context()
+ self.mock_volume = fake_volume.fake_volume_obj(ctx, **vol_updates)
+
self.fake_image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501',
'updated_at': datetime.datetime(2013, 9,
28, 15,
@@ -136,6 +142,28 @@ class SolidFireVolumeTestCase(test.TestCase):
'qos': None,
'iqn': 'super_fake_iqn'}
+ self.cluster_pairs = (
+ [{'uniqueID': 'lu9f',
+ 'endpoint': {'passwd': 'admin', 'port': 443,
+ 'url': 'https://192.168.139.102:443',
+ 'svip': '10.10.8.134',
+ 'mvip': '192.168.139.102',
+ 'login': 'admin'},
+ 'name': 'AutoTest2-6AjG-FOR-TEST-ONLY',
+ 'clusterPairID': 33,
+ 'uuid': '9c499d4b-8fff-48b4-b875-27601d5d9889',
+ 'svip': '10.10.23.2',
+ 'mvipNodeID': 1,
+ 'repCount': 1,
+ 'encryptionAtRestState': 'disabled',
+ 'attributes': {},
+ 'mvip': '192.168.139.102',
+ 'ensemble': ['10.10.5.130'],
+ 'svipNodeID': 1}])
+
+ self.mvip = '192.168.139.102'
+ self.svip = '10.10.8.134'
+
self.fake_sfsnap_name = '%s%s' % (self.configuration.sf_volume_prefix,
self.snap.id)
self.fake_sfsnaps = [{'snapshotID': '5',
@@ -210,7 +238,7 @@ class SolidFireVolumeTestCase(test.TestCase):
return {'result': {'volumeID': 6}, 'id': 2}
elif method is 'ModifyVolume':
- return
+ return {'result': {}, 'id': 1}
elif method is 'ListVolumesForAccount' and version == '1.0':
test_name = 'OS-VOLID-a720b3c0-d1f0-11e1-9b23-0800200c9a66'
@@ -379,6 +407,9 @@ class SolidFireVolumeTestCase(test.TestCase):
'maxIOPS': '2000',
'burstIOPS': '3000'}}
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **testvol)
+
def _fake_get_volume_type(ctxt, type_id):
return test_type
@@ -428,6 +459,9 @@ class SolidFireVolumeTestCase(test.TestCase):
'targetSecret': 'shhhh',
'username': 'prefix-testprjid'}]
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **testvol)
+
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
with mock.patch.object(sfv,
'_get_sfaccounts_for_tenant',
@@ -453,6 +487,10 @@ class SolidFireVolumeTestCase(test.TestCase):
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
+
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **testvol)
+
fake_sfaccounts = [{'accountID': 5,
'targetSecret': 'shhhh',
'username': 'prefix-testprjid'}]
@@ -475,14 +513,19 @@ class SolidFireVolumeTestCase(test.TestCase):
model_update.get('provider_geometry', None))
def test_create_delete_snapshot(self):
- testsnap = {'project_id': 'testprjid',
- 'name': 'testvol',
- 'volume_size': 1,
- 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66',
- 'volume_id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
- 'volume_type_id': None,
- 'created_at': timeutils.utcnow(),
- 'provider_id': '8 99 None'}
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx)
+
+ testsnap_dict = {'project_id': 'testprjid',
+ 'name': testvol.name,
+ 'volume_size': testvol.size,
+ 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66',
+ 'volume_id': testvol.id,
+ 'volume_type_id': None,
+ 'created_at': timeutils.utcnow(),
+ 'provider_id': '8 99 None',
+ 'volume': testvol}
+ testsnap = fake_snapshot.fake_snapshot_obj(ctx, **testsnap_dict)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
fake_uuid = 'UUID-b831c4d1-d1f0-11e1-9b23-0800200c9a66'
@@ -496,7 +539,11 @@ class SolidFireVolumeTestCase(test.TestCase):
'_get_sfaccounts_for_tenant',
return_value=[{'accountID': 5,
'username':
- 'prefix-testprjid'}]):
+ 'prefix-testprjid'}]),\
+ mock.patch.object(sfv, '_retrieve_replication_settings',
+ return_value=["Async", {}]),\
+ mock.patch.object(sfv, '_get_sf_volume',
+ return_value={'volumeID': 33}):
sfv.create_snapshot(testsnap)
sfv.delete_snapshot(testsnap)
@@ -513,19 +560,23 @@ class SolidFireVolumeTestCase(test.TestCase):
'name': 'UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'attributes': {}})
- testvol = {'project_id': 'testprjid',
- 'name': 'testvol',
- 'size': 1,
- 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
- 'volume_type_id': None,
- 'created_at': timeutils.utcnow()}
+ updates_vol_a = {'project_id': 'testprjid',
+ 'name': 'testvol',
+ 'size': 1,
+ 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
+ 'volume_type_id': None,
+ 'created_at': timeutils.utcnow()}
- testvol_b = {'project_id': 'testprjid',
- 'name': 'testvol',
- 'size': 1,
- 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66',
- 'volume_type_id': None,
- 'created_at': timeutils.utcnow()}
+ updates_vol_b = {'project_id': 'testprjid',
+ 'name': 'testvol',
+ 'size': 1,
+ 'id': 'b831c4d1-d1f0-11e1-9b23-0800200c9a66',
+ 'volume_type_id': None,
+ 'created_at': timeutils.utcnow()}
+
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **updates_vol_a)
+ testvol_b = fake_volume.fake_volume_obj(ctx, **updates_vol_b)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
with mock.patch.object(sfv,
@@ -1066,6 +1117,70 @@ class SolidFireVolumeTestCase(test.TestCase):
sfv.extend_volume,
testvol, 2)
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_qos_setting')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver,
+ '_retrieve_replication_settings')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
+ def test_extend_replicated_volume(self, mock_create_cluster_reference,
+ mock_retrieve_replication_settings,
+ mock_issue_api_request,
+ mock_retrieve_qos_setting,
+ mock_get_sf_volume,
+ mock_get_sfaccount):
+
+ mock_create_cluster_reference.return_value = {
+ 'mvip': self.mvip,
+ 'svip': self.svip}
+
+ mock_retrieve_replication_settings.return_value = "Async"
+ mock_retrieve_qos_setting.return_value = None
+ self.fake_sfvol['volumePairs'] = [{'remoteVolumeID': 26}]
+ mock_get_sf_volume.return_value = self.fake_sfvol
+ mock_get_sfaccount.return_value = self.fake_sfaccount
+
+ ctx = context.get_admin_context()
+ utc_now = timeutils.utcnow().isoformat()
+ vol_fields = {
+ 'id': f_uuid,
+ 'created_at': utc_now
+ }
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+ sfv.cluster_pairs = self.cluster_pairs
+ sfv.active_cluster['mvip'] = self.mvip
+ sfv.active_cluster['svip'] = self.svip
+
+ mock_issue_api_request.reset_mock()
+ updates = sfv.extend_volume(vol, vol.size + 10)
+ self.assertIsNone(updates)
+
+ modify_params = {
+ 'volumeID': self.fake_sfvol['volumeID'],
+ 'totalSize': int((vol.size + 10) * units.Gi),
+ 'qos': None
+ }
+ modify_params2 = modify_params.copy()
+ modify_params2['volumeID'] = 26
+
+ expected_calls = [
+ mock.call("ModifyVolume", modify_params, version='5.0'),
+ mock.call("ModifyVolume", modify_params2, version='5.0',
+ endpoint=self.cluster_pairs[0]['endpoint'])
+ ]
+
+ mock_issue_api_request.assert_has_calls(expected_calls)
+ mock_create_cluster_reference.assert_called()
+ mock_retrieve_replication_settings.assert_called_with(vol)
+ mock_retrieve_qos_setting.assert_called_with(vol, vol.size + 10)
+ mock_get_sf_volume.assert_called_with(
+ vol.id, {'accountID': self.fake_sfaccount['accountID']})
+ mock_get_sfaccount.assert_called_with(vol.project_id)
+
def test_set_by_qos_spec_with_scoping(self):
size = 1
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
@@ -1168,15 +1283,19 @@ class SolidFireVolumeTestCase(test.TestCase):
'qos:minIOPS': ('1000', u'500'),
'qos:maxIOPS': ('10000', u'1000')}}
host = None
- testvol = {'project_id': 'testprjid',
+ updates = {'project_id': 'testprjid',
'name': 'test_volume',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- self.assertTrue(sfv.retype(self.ctxt,
- testvol,
- type_ref, diff, host))
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **updates)
+
+ migrated, updates = sfv.retype(self.ctxt, testvol, type_ref,
+ diff, host)
+ self.assertTrue(migrated)
+ self.assertEqual({}, updates)
def test_retype_with_qos_spec(self):
test_type = {'name': 'sf-1',
@@ -1214,16 +1333,112 @@ class SolidFireVolumeTestCase(test.TestCase):
'minIOPS': ('1000', '500'),
'maxIOPS': ('10000', '1000')}}
host = None
- testvol = {'project_id': 'testprjid',
+ updates = {'project_id': 'testprjid',
'name': 'test_volume',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **updates)
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
- self.assertTrue(sfv.retype(self.ctxt,
- testvol,
- test_type, diff, host))
+ migrated, updates = sfv.retype(self.ctxt, testvol, test_type,
+ diff, host)
+ self.assertTrue(migrated)
+ self.assertEqual({}, updates)
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_set_rep_by_volume_type')
+ @mock.patch.object(solidfire.SolidFireDriver,
+ '_retrieve_replication_settings')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params')
+ @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_disable_replication')
+ @mock.patch.object(solidfire.SolidFireDriver, '_set_qos_by_volume_type')
+ def test_retype_replicated(self,
+ mock_set_qos_by_volume_type,
+ mock_disable_replication,
+ mock_replicate_volume,
+ mock_get_default_volume_params,
+ mock_retrieve_replication_settings,
+ mock_set_rep_by_volume_type,
+ mock_get_sf_volume,
+ mock_get_sfaccount):
+
+ all_mocks = locals()
+ mock_get_sf_volume.return_value = None
+ mock_get_sfaccount.return_value = self.fake_sfaccount
+ mock_retrieve_replication_settings.return_value = 'Async'
+
+ ctx = context.get_admin_context()
+ type_fields = {'extra_specs': {'replication_enabled': '<is> True'},
+ 'id': fakes.get_fake_uuid()}
+ src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
+
+ fake_provider_id = "%s %s %s" % (
+ self.fake_sfvol['volumeID'],
+ fakes.FAKE_UUID,
+ self.cluster_pairs[0]['uuid'])
+ utc_now = timeutils.utcnow().isoformat()
+ vol_fields = {
+ 'id': fakes.FAKE_UUID,
+ 'created_at': utc_now,
+ 'volume_type': src_vol_type,
+ 'volume_type_id': src_vol_type.id,
+ 'provider_id': fake_provider_id
+ }
+
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+ dst_vol_type = fake_volume.fake_volume_type_obj(ctx)
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+ sfv.cluster_pairs = self.cluster_pairs
+ sfv.active_cluster['mvip'] = self.mvip
+ sfv.active_cluster['svip'] = self.svip
+
+ self.assertRaises(exception.VolumeNotFound,
+ sfv.retype, ctx, vol, dst_vol_type, None, None)
+ mock_get_sfaccount.assert_called_once_with(vol.project_id)
+ mock_get_sf_volume.assert_called_once_with(
+ vol.id, {'accountID': self.fake_sfaccount['accountID']})
+
+ mock_get_sfaccount.reset_mock()
+ mock_get_sf_volume.reset_mock()
+ expected = {"key": "value"}
+ mock_get_sf_volume.return_value = self.fake_sfvol
+ mock_replicate_volume.return_value = expected
+ mock_set_rep_by_volume_type.side_effect = [src_vol_type, dst_vol_type]
+
+ retyped, updates = sfv.retype(ctx, vol, dst_vol_type, None, None)
+ self.assertDictEqual(expected, updates)
+
+ mock_get_sfaccount.assert_called_once_with(vol.project_id)
+ mock_get_sf_volume.assert_called_once_with(
+ vol.id, {'accountID': self.fake_sfaccount['accountID']})
+ mock_get_default_volume_params.assert_called()
+ mock_disable_replication.assert_not_called()
+ mock_replicate_volume.assert_called_once()
+ mock_retrieve_replication_settings.assert_called_once()
+ mock_set_qos_by_volume_type.assert_called_once()
+
+ expected = {}
+ for mk in all_mocks.values():
+ if isinstance(mk, mock.MagicMock):
+ mk.reset_mock()
+
+ mock_set_rep_by_volume_type.side_effect = [src_vol_type, None]
+ retyped, updates = sfv.retype(ctx, vol, dst_vol_type, None, None)
+ self.assertDictEqual(expected, updates)
+ mock_get_sfaccount.assert_called_once_with(vol.project_id)
+ mock_get_sf_volume.assert_called_once_with(
+ vol.id, {'accountID': self.fake_sfaccount['accountID']})
+ mock_get_default_volume_params.assert_not_called()
+ mock_disable_replication.assert_called_with(vol)
+ mock_replicate_volume.assert_not_called()
+ mock_retrieve_replication_settings.assert_not_called()
+ mock_set_qos_by_volume_type.assert_called_once()
def test_update_cluster_status(self):
self.mock_object(solidfire.SolidFireDriver,
@@ -1267,11 +1482,14 @@ class SolidFireVolumeTestCase(test.TestCase):
def test_manage_existing_volume(self):
external_ref = {'name': 'existing volume', 'source-id': 5}
- testvol = {'project_id': 'testprjid',
+ updates = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **updates)
+
self.mock_object(solidfire.SolidFireDriver,
'_issue_api_request',
self.fake_issue_api_request)
@@ -1296,12 +1514,181 @@ class SolidFireVolumeTestCase(test.TestCase):
@mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
@mock.patch.object(solidfire.SolidFireDriver, '_create_template_account')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params')
+ @mock.patch.object(solidfire.SolidFireDriver,
+ '_retrieve_replication_settings')
+ @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_model_info')
+ @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
+ def test_manage_existing_replicated_fail(
+ self,
+ mock_create_cluster_reference,
+ mock_update_cluster_status,
+ mock_get_model_info,
+ mock_replicate_volume,
+ mock_retrieve_replication_settings,
+ mock_get_default_volume_params,
+ mock_get_create_account,
+ mock_create_template_account,
+ mock_issue_api_request):
+
+ mock_retrieve_replication_settings.return_value = 'Async'
+ mock_get_default_volume_params.return_value = {'totalSize': 50}
+ mock_get_create_account.return_value = self.fake_sfaccount
+ mock_replicate_volume.side_effect = solidfire.SolidFireAPIException
+
+ ctx = context.get_admin_context()
+ type_fields = {'extra_specs': {'replication_enabled': '<is> True'},
+ 'id': fakes.get_fake_uuid()}
+ vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
+
+ fake_provider_id = "%s %s %s" % (
+ self.fake_sfvol['volumeID'],
+ fakes.FAKE_UUID,
+ self.cluster_pairs[0]['uuid'])
+ utc_now = timeutils.utcnow().isoformat()
+ vol_fields = {
+ 'id': fakes.FAKE_UUID,
+ 'created_at': utc_now,
+ 'volume_type': vol_type,
+ 'volume_type_id': vol_type.id,
+ 'provider_id': fake_provider_id
+ }
+
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+ sfv.active_cluster['mvip'] = self.mvip
+ sfv.active_cluster['svip'] = self.svip
+
+ external_ref = {}
+ self.assertRaises(solidfire.SolidFireAPIException,
+ sfv.manage_existing, vol, external_ref)
+
+ self.fake_sfvol['volumePairs'] = [{'remoteVolumeID': 26}]
+ mock_issue_api_request.return_value = {
+ 'result': {'volumes': [self.fake_sfvol]}}
+ external_ref = {'source-id': 6, 'name': 'new-being-managed'}
+ self.assertRaises(solidfire.SolidFireDriverException,
+ sfv.manage_existing, vol, external_ref)
+
+ mock_get_default_volume_params.return_value = {'totalSize': 50}
+ self.fake_sfvol['volumePairs'] = []
+ mock_issue_api_request.return_value = {
+ 'result': {'volumes': [self.fake_sfvol]}}
+ self.assertRaises(solidfire.SolidFireAPIException,
+ sfv.manage_existing, vol, external_ref)
+
+ modify_attributes = {'uuid': vol.id,
+ 'is_clone': 'False',
+ 'os_imported_at': utc_now + "+00:00",
+ 'old_name': 'new-being-managed'}
+ modify_params1 = {'volumeID': self.fake_sfvol['volumeID'],
+ 'attributes': modify_attributes}
+ modify_params2 = {'volumeID': self.fake_sfvol['volumeID'],
+ 'attributes': self.fake_sfvol['attributes']}
+ calls = [mock.call('ListActiveVolumes',
+ {'startVolumeID': self.fake_sfvol['volumeID'],
+ 'limit': 1}),
+ mock.call('ModifyVolume', modify_params1, version='5.0'),
+ mock.call('ModifyVolume', modify_params2, version='5.0')]
+
+ mock_issue_api_request.assert_has_calls(calls)
+ mock_get_model_info.assert_not_called()
+ mock_create_cluster_reference.assert_called_once()
+ mock_update_cluster_status.assert_called_once()
+ mock_replicate_volume.assert_called()
+ mock_retrieve_replication_settings.assert_called_with(vol)
+ mock_get_default_volume_params.assert_called_with(vol)
+ mock_get_create_account.assert_called_with(vol.project_id)
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_set_rep_by_volume_type')
+ @mock.patch.object(solidfire.SolidFireDriver,
+ '_retrieve_replication_settings')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params')
+ @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_disable_replication')
+ @mock.patch.object(solidfire.SolidFireDriver, '_set_qos_by_volume_type')
+ def test_manage_existing_replicated(
+ self,
+ mock_set_qos_by_volume_type,
+ mock_disable_replication,
+ mock_replicate_volume,
+ mock_get_default_volume_params,
+ mock_retrieve_replication_settings,
+ mock_set_rep_by_volume_type,
+ mock_get_sf_volume,
+ mock_get_sfaccount):
+
+ mock_get_sf_volume.return_value = None
+ mock_get_sfaccount.return_value = self.fake_sfaccount
+ mock_retrieve_replication_settings.return_value = 'Async'
+
+ ctx = context.get_admin_context()
+ type_fields = {'extra_specs': {'replication_enabled': '<is> True'},
+ 'id': fakes.get_fake_uuid()}
+ src_vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
+
+ fake_provider_id = "%s %s %s" % (
+ self.fake_sfvol['volumeID'],
+ fakes.FAKE_UUID,
+ self.cluster_pairs[0]['uuid'])
+ utc_now = timeutils.utcnow().isoformat()
+ vol_fields = {
+ 'id': fakes.FAKE_UUID,
+ 'created_at': utc_now,
+ 'volume_type': src_vol_type,
+ 'volume_type_id': src_vol_type.id,
+ 'provider_id': fake_provider_id
+ }
+
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+ dst_vol_type = fake_volume.fake_volume_type_obj(ctx)
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+ sfv.cluster_pairs = self.cluster_pairs
+ sfv.active_cluster['mvip'] = self.mvip
+ sfv.active_cluster['svip'] = self.svip
+
+ self.assertRaises(exception.VolumeNotFound,
+ sfv.retype, ctx, vol, dst_vol_type, None, None)
+ mock_get_sfaccount.assert_called_once_with(vol.project_id)
+ mock_get_sf_volume.assert_called_once_with(
+ vol.id, {'accountID': self.fake_sfaccount['accountID']})
+
+ mock_get_sfaccount.reset_mock()
+ mock_get_sf_volume.reset_mock()
+ expected = {"key": "value"}
+ mock_get_sf_volume.return_value = self.fake_sfvol
+ mock_replicate_volume.return_value = expected
+ mock_set_rep_by_volume_type.side_effect = [src_vol_type, dst_vol_type]
+
+ retyped, updates = sfv.retype(ctx, vol, dst_vol_type, None, None)
+ self.assertDictEqual(expected, updates)
+
+ mock_get_sfaccount.assert_called_once_with(vol.project_id)
+ mock_get_sf_volume.assert_called_once_with(
+ vol.id, {'accountID': self.fake_sfaccount['accountID']})
+ mock_get_default_volume_params.assert_called()
+ mock_disable_replication.assert_not_called()
+ mock_replicate_volume.assert_called_once()
+ mock_retrieve_replication_settings.assert_called_once()
+ mock_set_qos_by_volume_type.assert_called_once()
+ mock_set_rep_by_volume_type.assert_called()
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_template_account')
def test_create_volume_for_migration(self,
_mock_create_template_account,
_mock_issue_api_request):
_mock_issue_api_request.side_effect = self.fake_issue_api_request
_mock_create_template_account.return_value = 1
- testvol = {'project_id': 'testprjid',
+ testvol = {'project_id': 'testpsrjid',
'name': 'testvol',
'size': 1,
'id': 'b830b3c0-d1f0-11e1-9b23-1900200c9a77',
@@ -1309,6 +1696,8 @@ class SolidFireVolumeTestCase(test.TestCase):
'created_at': timeutils.utcnow(),
'migration_status': 'target:'
'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
+ ctx = context.get_admin_context()
+ testvol = fake_volume.fake_volume_obj(ctx, **testvol)
fake_sfaccounts = [{'accountID': 5,
'targetSecret': 'shhhh',
'username': 'prefix-testprjid'}]
@@ -1327,7 +1716,7 @@ class SolidFireVolumeTestCase(test.TestCase):
'_do_volume_create',
side_effect=_fake_do_v_create):
- proj_id, sf_vol_object = sfv.create_volume(testvol)
+ project_id, sf_vol_object = sfv.create_volume(testvol)
self.assertEqual('a720b3c0-d1f0-11e1-9b23-0800200c9a66',
sf_vol_object['attributes']['uuid'])
self.assertEqual('b830b3c0-d1f0-11e1-9b23-1900200c9a77',
@@ -1607,7 +1996,7 @@ class SolidFireVolumeTestCase(test.TestCase):
'qos': None,
'iqn': test_name}]}}
- def _fake_issue_api_req(method, params, version=0):
+ def _fake_issue_api_req(method, params, version=0, endpoint=None):
return fake_response
with mock.patch.object(
@@ -2470,32 +2859,24 @@ class SolidFireVolumeTestCase(test.TestCase):
def test_set_rep_by_volume_type(self):
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
- sfv.cluster_pairs = [{'cluster_id': 'fake-id', 'cluster_mvip':
- 'fake-mvip'}]
+ sfv.cluster_pairs = self.cluster_pairs
ctxt = None
type_id = '290edb2a-f5ea-11e5-9ce9-5e5517507c66'
fake_type = {'extra_specs': {'replication_enabled': '<is> True'}}
with mock.patch.object(volume_types,
'get_volume_type',
return_value=fake_type):
- self.assertEqual('fake-id', sfv._set_rep_by_volume_type(
- ctxt,
- type_id)['targets']['cluster_id'])
+ self.assertEqual('Async', sfv._set_rep_by_volume_type(
+ ctxt, type_id))
def test_replicate_volume(self):
+ replication_status = fields.ReplicationStatus.ENABLED
+ fake_vol = {'project_id': 1, 'volumeID': 1, 'size': 1}
+ params = {'attributes': {}}
+ sf_account = {'initiatorSecret': 'shhh', 'targetSecret': 'dont-tell'}
+ model_update = {'provider_id': '1 2 xxxx'}
sfv = solidfire.SolidFireDriver(configuration=self.configuration)
- sfv.cluster_pairs = (
- [{'uniqueID': 'lu9f', 'endpoint': {'passwd': 'admin', 'port':
- 443, 'url':
- 'https://192.168.139.102:443',
- 'svip': '10.10.8.134', 'mvip':
- '192.168.139.102', 'login':
- 'admin'}, 'name':
- 'AutoTest2-6AjG-FOR-TEST-ONLY', 'clusterPairID': 33, 'uuid':
- '9c499d4b-8fff-48b4-b875-27601d5d9889', 'svip': '10.10.23.2',
- 'mvipNodeID': 1, 'repCount': 1, 'encryptionAtRestState':
- 'disabled', 'attributes': {}, 'mvip': '192.168.139.102',
- 'ensemble': ['10.10.5.130'], 'svipNodeID': 1}])
+ sfv.cluster_pairs = self.cluster_pairs
with mock.patch.object(sfv,
'_issue_api_request',
@@ -2505,14 +2886,10 @@ class SolidFireVolumeTestCase(test.TestCase):
return_value={'accountID': 1}),\
mock.patch.object(sfv,
'_do_volume_create',
- return_value={'provider_id': '1 2 xxxx'}):
- self.assertEqual({'provider_id': '1 2 xxxx'},
- sfv._replicate_volume(
- {'project_id': 1, 'volumeID': 1},
- {'attributes': {}},
- {'initiatorSecret': 'shhh',
- 'targetSecret': 'dont-tell'},
- {}))
+ return_value=model_update):
+ self.assertEqual({'replication_status': replication_status},
+ sfv._replicate_volume(fake_vol, params,
+ sf_account, {}))
def test_pythons_try_except(self):
def _fake_retrieve_rep(vol):
@@ -2525,7 +2902,7 @@ class SolidFireVolumeTestCase(test.TestCase):
return_value={'accountID': 5}),\
mock.patch.object(sfv,
'_retrieve_qos_setting',
- return_value=None),\
+ return_value=None), \
mock.patch.object(sfv,
'_do_volume_create',
return_value={'provider_id': '1 2 xxxx'}),\
@@ -2645,3 +3022,430 @@ class SolidFireVolumeTestCase(test.TestCase):
self.assertRaises(exception.VolumeSnapshotNotFound,
sfv.revert_to_snapshot,
self.ctxt, self.vol, self.snap)
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account')
+ @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs')
+ @mock.patch.object(solidfire.SolidFireDriver, '_snapshot_discovery')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_model_info')
+ @mock.patch.object(solidfire.SolidFireDriver, '_update_attributes')
+ @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
+ @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_default_volume_params')
+ @mock.patch.object(solidfire.SolidFireDriver,
+ '_retrieve_replication_settings')
+ @mock.patch.object(solidfire.SolidFireDriver, '_replicate_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
+ def test_do_clone_volume_rep_disabled(self,
+ mock_create_cluster_reference,
+ mock_replicate_volume,
+ mock_retrieve_replication_settings,
+ mock_get_default_volume_params,
+ mock_set_cluster_pairs,
+ mock_update_cluster_status,
+ mock_update_attributes,
+ mock_get_model_info,
+ mock_issue_api_request,
+ mock_snapshot_discovery,
+ mock_test_set_cluster_pairs,
+ mock_get_create_account):
+
+ all_mocks = locals()
+
+ def reset_mocks():
+ for mk in all_mocks.values():
+ if isinstance(mk, mock.MagicMock):
+ mk.reset_mock()
+
+ sf_volume_params = {'volumeID': 1, 'snapshotID': 2, 'newSize': 3}
+ mock_snapshot_discovery.return_value = (sf_volume_params, True,
+ self.fake_sfvol)
+ mock_get_create_account.return_value = self.fake_sfaccount
+
+ ctx = context.get_admin_context()
+ vol_fields = {'updated_at': timeutils.utcnow(),
+ 'created_at': timeutils.utcnow()}
+ src_vol = fake_volume.fake_volume_obj(ctx)
+ dst_vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+
+ mock_create_cluster_reference.return_value = {
+ 'mvip': self.mvip,
+ 'svip': self.svip}
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = False
+
+ reset_mocks()
+ mock_issue_api_request.return_value = {
+ 'error': {'code': 000, 'name': 'DummyError',
+ 'message': 'This is a fake error response'},
+ 'id': 1}
+
+ self.assertRaises(solidfire.SolidFireAPIException,
+ sfv._do_clone_volume, src_vol.id,
+ dst_vol, sf_src_snap=self.fake_sfsnaps[0])
+
+ clone_vol_params = {
+ 'snapshotID': self.fake_sfsnaps[0]['snapshotID'],
+ 'volumeID': self.fake_sfsnaps[0]['volumeID'],
+ 'newSize': dst_vol.size * units.Gi,
+ 'name': '%(prefix)s%(id)s' % {
+ 'prefix': self.configuration.sf_volume_prefix,
+ 'id': dst_vol.id},
+ 'newAccountID': self.fake_sfaccount['accountID']}
+
+ mock_get_create_account.assert_called_with(dst_vol.project_id)
+ mock_issue_api_request.assert_called_once_with(
+ 'CloneVolume', clone_vol_params, version='6.0')
+ mock_test_set_cluster_pairs.assert_not_called()
+ mock_update_attributes.assert_not_called()
+ mock_get_model_info.assert_not_called()
+ mock_snapshot_discovery.assert_not_called()
+
+ reset_mocks()
+ mock_issue_api_request.side_effect = self.fake_issue_api_request
+ mock_get_default_volume_params.return_value = {}
+ mock_get_model_info.return_value = None
+ self.assertRaises(solidfire.SolidFireAPIException,
+ sfv._do_clone_volume, src_vol.id,
+ dst_vol, sf_src_snap=self.fake_sfsnaps[0])
+
+ mock_get_create_account.assert_called_with(dst_vol.project_id)
+ calls = [mock.call('CloneVolume', clone_vol_params, version='6.0'),
+ mock.call('ModifyVolume', {'volumeID': 6})]
+ mock_issue_api_request.assert_has_calls(calls)
+ mock_test_set_cluster_pairs.assert_not_called()
+ mock_update_attributes.assert_not_called()
+ mock_get_model_info.assert_called_once()
+ mock_snapshot_discovery.assert_not_called()
+
+ reset_mocks()
+ mock_retrieve_replication_settings.return_value = 'Async'
+ update = {'replication_status': fields.ReplicationStatus.ENABLED}
+ mock_replicate_volume.side_effect = solidfire.SolidFireDriverException
+ mock_update_attributes.return_value = {'result': {}, 'id': 1}
+ mock_get_model_info.return_value = {
+ 'provider_location': '1.1.1.1 iqn 0',
+ 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2c76370d66b '
+ '2FE0CQ8J196R',
+ 'provider_id': '%s %s cluster-id-01' % (
+ self.fake_sfvol['volumeID'],
+ self.fake_sfaccount['accountID'])
+ }
+
+ data, account, updates = sfv._do_clone_volume(
+ src_vol.id, dst_vol, sf_src_snap=self.fake_sfsnaps[0])
+
+ self.assertEqual({'result': {}, 'id': 1}, data)
+ self.assertEqual(25, account['accountID'])
+ self.assertEqual(self.fake_sfvol['volumeID'],
+ int(updates['provider_id'].split()[0]))
+
+ mock_get_create_account.assert_called_with(dst_vol.project_id)
+ calls = [mock.call('CloneVolume', clone_vol_params, version='6.0'),
+ mock.call('ModifyVolume', {'volumeID': 6})]
+
+ mock_issue_api_request.assert_has_calls(calls)
+ mock_test_set_cluster_pairs.assert_not_called()
+ mock_update_attributes.assert_not_called()
+ mock_get_model_info.assert_called_once()
+ mock_snapshot_discovery.assert_not_called()
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account')
+ @mock.patch.object(solidfire.SolidFireDriver, '_retrieve_qos_setting')
+ @mock.patch.object(solidfire.SolidFireDriver,
+ '_extract_sf_attributes_from_extra_specs')
+ def test_get_default_volume_params(
+ self, mock_extract_sf_attributes_from_extra_specs,
+ mock_retrieve_qos_setting, mock_get_create_account):
+
+ mock_extract_sf_attributes_from_extra_specs.return_value = [{
+ 'key1': 'value1',
+ 'key2': 'value2'
+ }]
+ mock_retrieve_qos_setting.return_value = None
+ mock_get_create_account.return_value = self.fake_sfaccount
+
+ ctx = context.get_admin_context()
+ type_fields = {'extra_specs': {'replication_enabled': '<is> True'}}
+ vol_type = fake_volume.fake_volume_type_obj(ctx, **type_fields)
+ utc_now = timeutils.utcnow().isoformat()
+ vol_fields = {
+ 'id': fakes.FAKE_UUID,
+ 'created_at': utc_now,
+ 'volume_type': vol_type,
+ 'volume_type_id': vol_type.id
+ }
+
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+
+ vol_name = '%s%s' % (self.configuration.sf_volume_prefix, vol.id)
+ expected_attr = {
+ 'uuid': vol.id,
+ 'is_clone': False,
+ 'created_at': utc_now + "+00:00",
+ 'cinder-name': vol.get('display_name', ""),
+ 'key1': 'value1',
+ 'key2': 'value2',
+ }
+
+ expected_params = {
+ 'name': vol_name,
+ 'accountID': self.fake_sfaccount['accountID'],
+ 'sliceCount': 1,
+ 'totalSize': int(vol.size * units.Gi),
+ 'enable512e': self.configuration.sf_emulate_512,
+ 'attributes': expected_attr,
+ 'qos': None
+ }
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+
+ params = sfv._get_default_volume_params(vol, False)
+
+ self.assertDictEqual(expected_params, params)
+ mock_extract_sf_attributes_from_extra_specs.assert_called()
+ mock_retrieve_qos_setting.assert_called()
+ mock_get_create_account.assert_called()
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sfvol_by_cinder_vref')
+ def test_disable_replication_fail(self, mock_get_sfvol_by_cinder_vref):
+
+ self.fake_sfvol['volumePairs'] = []
+ mock_get_sfvol_by_cinder_vref.return_value = self.fake_sfvol
+
+ ctx = context.get_admin_context()
+ utc_now = timeutils.utcnow().isoformat()
+ vol_fields = {
+ 'id': f_uuid,
+ 'created_at': utc_now
+ }
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+ sfv.cluster_pairs = self.cluster_pairs
+
+ expected = {'replication_status': fields.ReplicationStatus.DISABLED}
+ updates = sfv._disable_replication(vol)
+
+ self.assertDictEqual(expected, updates)
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sfvol_by_cinder_vref')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
+ def test_disable_replication(self, mock_create_cluster_reference,
+ mock_issue_api_request,
+ mock_get_sfvol_by_cinder_vref):
+
+ mock_create_cluster_reference.return_value = {
+ 'mvip': self.mvip,
+ 'svip': self.svip}
+
+ self.fake_sfvol['volumePairs'] = [{"remoteVolumeID": 26}]
+ mock_get_sfvol_by_cinder_vref.return_value = self.fake_sfvol
+
+ ctx = context.get_admin_context()
+ utc_now = timeutils.utcnow().isoformat()
+ vol_fields = {
+ 'id': f_uuid,
+ 'created_at': utc_now
+ }
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+ sfv.cluster_pairs = self.cluster_pairs
+ sfv.active_cluster['mvip'] = self.mvip
+ sfv.active_cluster['svip'] = self.svip
+
+ expected = {'replication_status': fields.ReplicationStatus.DISABLED}
+ mock_issue_api_request.reset_mock()
+ updates = sfv._disable_replication(vol)
+
+ self.assertDictEqual(expected, updates)
+
+ expected = [
+ mock.call("RemoveVolumePair",
+ {'volumeID': self.fake_sfvol['volumeID']}, '8.0'),
+ mock.call("RemoveVolumePair", {'volumeID': 26}, '8.0',
+ endpoint=sfv.cluster_pairs[0]['endpoint']),
+ mock.call("DeleteVolume", {'volumeID': 26},
+ endpoint=sfv.cluster_pairs[0]['endpoint']),
+ mock.call("PurgeDeletedVolume", {'volumeID': 26},
+ endpoint=sfv.cluster_pairs[0]['endpoint'])
+ ]
+
+ mock_issue_api_request.assert_has_calls(expected)
+ mock_create_cluster_reference.assert_called()
+ mock_get_sfvol_by_cinder_vref.assert_called()
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
+ @mock.patch.object(solidfire.SolidFireDriver, '_set_cluster_pairs')
+ @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_cluster_info')
+ @mock.patch.object(solidfire.SolidFireDriver, '_map_sf_volumes')
+ @mock.patch.object(solidfire.SolidFireDriver, '_failover_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_create_account')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_remote_info_by_id')
+ def test_failover_host(self, mock_get_remote_info_by_id,
+ mock_get_create_account,
+ mock_failover_volume,
+ mock_map_sf_volumes,
+ mock_get_cluster_info,
+ mock_update_cluster_status,
+ mock_set_cluster_pairs,
+ mock_create_cluster_reference,
+ mock_issue_api_request):
+
+ all_mocks = locals()
+
+ def reset_mocks():
+ for mk in all_mocks.values():
+ if isinstance(mk, mock.MagicMock):
+ mk.reset_mock()
+
+ ctx = context.get_admin_context()
+ vol_fields = {'updated_at': timeutils.utcnow(),
+ 'created_at': timeutils.utcnow()}
+
+ cinder_vols = []
+ sf_vols = []
+ for i in range(1, 6):
+ vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
+ sf_vol = self.fake_sfvol.copy()
+ sf_vol['volumeID'] = i
+ sf_vol['name'] = '%s%s' % (self.configuration.sf_volume_prefix,
+ vol.id)
+ sf_vol['access'] = 'replicationTarget'
+ sf_vol['attributes'] = {'uuid': vol.id}
+ sf_vol['cinder_id'] = vol.id
+
+ sf_vols.append(sf_vol)
+ cinder_vols.append(vol)
+
+ mock_map_sf_volumes.return_value = sf_vols
+ mock_create_cluster_reference.return_value = self.cluster_pairs[0]
+
+ fake_replication_device = {'backend_id': 'fake',
+ 'mvip': '0.0.0.0',
+ 'login': 'fake_login',
+ 'password': 'fake_pwd'}
+
+ self.configuration.replication_device = [fake_replication_device]
+
+ reset_mocks()
+ drv_args = {'active_backend_id': None}
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration,
+ **drv_args)
+ self.assertRaises(exception.InvalidReplicationTarget,
+ sfv.failover_host, ctx, cinder_vols, 'default', None)
+ mock_map_sf_volumes.assert_not_called()
+
+ reset_mocks()
+ drv_args = {'active_backend_id': 'default'}
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration,
+ **drv_args)
+ self.assertRaises(exception.UnableToFailOver,
+ sfv.failover_host, ctx, cinder_vols, 'default', None)
+ mock_map_sf_volumes.assert_not_called()
+
+ reset_mocks()
+ drv_args = {'active_backend_id': None}
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration,
+ **drv_args)
+ self.assertRaises(exception.InvalidReplicationTarget,
+ sfv.failover_host, ctx, cinder_vols,
+ secondary_id='not_fake_id', groups=None)
+ mock_map_sf_volumes.assert_not_called()
+
+ reset_mocks()
+ drv_args = {'active_backend_id': None}
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration,
+ **drv_args)
+ sfv.cluster_pairs = [None]
+ self.assertRaises(exception.UnableToFailOver,
+ sfv.failover_host, ctx, cinder_vols,
+ secondary_id='fake', groups=None)
+ mock_map_sf_volumes.assert_not_called()
+
+ reset_mocks()
+ drv_args = {'active_backend_id': None}
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration,
+ **drv_args)
+ sfv.cluster_pairs = self.cluster_pairs
+ sfv.cluster_pairs[0]['backend_id'] = 'fake'
+ sfv.replication_enabled = True
+ cluster_id, updates, _ = sfv.failover_host(
+ ctx, cinder_vols, secondary_id='fake', groups=None)
+ self.assertEqual(5, len(updates))
+ for update in updates:
+ self.assertEqual(fields.ReplicationStatus.FAILED_OVER,
+ update['updates']['replication_status'])
+
+ self.assertEqual('fake', cluster_id)
+ mock_get_create_account.assert_called()
+ mock_failover_volume.assert_called()
+ mock_map_sf_volumes.assert_called()
+ mock_get_cluster_info.assert_not_called()
+ mock_update_cluster_status.assert_called()
+ mock_set_cluster_pairs.assert_called()
+ mock_create_cluster_reference.assert_called()
+ mock_issue_api_request.assert_not_called()
+
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_cluster_reference')
+ @mock.patch.object(solidfire.SolidFireDriver, '_update_cluster_status')
+ def test_failover_volume(self, mock_update_cluster_status,
+ mock_create_cluster_reference,
+ mock_issue_api_request):
+
+ all_mocks = locals()
+
+ def reset_mocks():
+ for mk in all_mocks.values():
+ if isinstance(mk, mock.MagicMock):
+ mk.reset_mock()
+
+ mock_issue_api_request.return_value = self.fake_sfaccount
+
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ sfv.replication_enabled = True
+
+ fake_src_sfvol = {'volumeID': 600,
+ 'name': 'test_volume',
+ 'accountID': 25,
+ 'sliceCount': 1,
+ 'totalSize': 1 * units.Gi,
+ 'enable512e': True,
+ 'access': "replicationTarget",
+ 'status': "active",
+ 'attributes': {'uuid': f_uuid[0]},
+ 'qos': None,
+ 'iqn': 'super_fake_iqn'}
+
+ expected_src_params = {'volumeID': fake_src_sfvol['volumeID'],
+ 'access': 'replicationTarget'}
+
+ expected_tgt_params = {'volumeID': self.fake_sfvol['volumeID'],
+ 'access': 'readWrite'}
+
+ sfv._failover_volume(self.fake_sfvol, self.cluster_pairs[0],
+ fake_src_sfvol)
+
+ mock_issue_api_request.assert_has_calls(
+ [mock.call("ModifyVolume", expected_src_params),
+ mock.call("ModifyVolume", expected_tgt_params,
+ endpoint=self.cluster_pairs[0]['endpoint'])]
+ )
+ reset_mocks()
+
+ sfv._failover_volume(self.fake_sfvol, self.cluster_pairs[0])
+
+ mock_issue_api_request.assert_called_with(
+ "ModifyVolume",
+ expected_tgt_params,
+ endpoint=self.cluster_pairs[0]['endpoint']
+ )
diff --git a/cinder/volume/drivers/solidfire.py b/cinder/volume/drivers/solidfire.py
index 3c520c3a9..5b22d06ff 100644
--- a/cinder/volume/drivers/solidfire.py
+++ b/cinder/volume/drivers/solidfire.py
@@ -115,6 +115,35 @@ xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist'
xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup'
+class DuplicateSfVolumeNames(exception.Duplicate):
+ message = _("Detected more than one volume with name %(vol_name)s")
+
+
+class SolidFireAPIException(exception.VolumeBackendAPIException):
+ message = _("Bad response from SolidFire API")
+
+
+class SolidFireDriverException(exception.VolumeDriverException):
+ message = _("SolidFire Cinder Driver exception")
+
+
+class SolidFireAPIDataException(SolidFireAPIException):
+ message = _("Error in SolidFire API response: data=%(data)s")
+
+
+class SolidFireAccountNotFound(SolidFireDriverException):
+ message = _("Unable to locate account %(account_name)s on "
+ "Solidfire device")
+
+
+class SolidFireRetryableException(exception.VolumeBackendAPIException):
+ message = _("Retryable SolidFire Exception encountered")
+
+
+class SolidFireReplicationPairingError(exception.VolumeBackendAPIException):
+ message = _("Error on SF Keys")
+
+
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
@@ -207,9 +236,10 @@ class SolidFireDriver(san.SanISCSIDriver):
2.0.11 - Add ability to failback replicating volumes
2.0.12 - Fix bug #1744005
2.0.14 - Fix bug #1782588 qos settings on extend
+ 2.0.15 - Fix bug #1834013 NetApp SolidFire replication errors
"""
- VERSION = '2.0.14'
+ VERSION = '2.0.15'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_SolidFire_CI"
@@ -261,25 +291,44 @@ class SolidFireDriver(san.SanISCSIDriver):
self.verify_ssl = self.configuration.driver_ssl_cert_verify
self.target_driver = SolidFireISCSI(solidfire_driver=self,
configuration=self.configuration)
- self.default_cluster = self._create_cluster_reference()
- self.active_cluster = self.default_cluster
+
+ self._check_replication_configs()
# If we're failed over, we need to parse things out and set the active
# cluster appropriately
if self.failed_over_id:
- self.failed_over = True
- remote_info = self._get_remote_info_by_id(self.failed_over_id)
- if remote_info:
- self.active_cluster = self._create_cluster_reference(
- remote_info['endpoint'])
- else:
+ LOG.info("Running on failed-over mode. "
+ "Active backend-id: %s", self.failed_over_id)
+
+ repl_target = self.configuration.get('replication_device', [])
+
+ if not repl_target:
LOG.error('Failed to initialize SolidFire driver to '
'a remote cluster specified at id: %s',
self.failed_over_id)
+ raise SolidFireDriverException
+
+ remote_endpoint = self._build_repl_endpoint_info(
+ **repl_target[0])
+
+ self.active_cluster = self._create_cluster_reference(
+ remote_endpoint)
+
+ # When in failed-over state, we have only endpoint info from the
+ # primary cluster.
+ self.primary_cluster = {"endpoint": self._build_endpoint_info()}
+ self.failed_over = True
+ else:
+ self.primary_cluster = self._create_cluster_reference()
+ self.active_cluster = self.primary_cluster
+ if self.configuration.replication_device:
+ self._set_cluster_pairs()
+
+ LOG.debug("Active cluster: %s", self.active_cluster)
# NOTE(jdg): This works even in a failed over state, because what we
# do is use self.active_cluster in issue_api_request so by default we
- # always use the currently active cluster, override that by provding
+ # always use the currently active cluster, override that by providing
# an endpoint to issue_api_request if needed
try:
self._update_cluster_status()
@@ -290,8 +339,6 @@ class SolidFireDriver(san.SanISCSIDriver):
account = self.configuration.sf_template_account_name
self.template_account_id = self._create_template_account(account)
- self._set_cluster_pairs()
-
@staticmethod
def get_driver_options():
return sf_opts
@@ -308,7 +355,7 @@ class SolidFireDriver(san.SanISCSIDriver):
for rd in self.configuration.get('replication_device', []):
if rd.get('backend_id', None) == backend_id:
remote_endpoint = self._build_endpoint_info(**rd)
- remote_info = self._get_remote_cluster_info(remote_endpoint)
+ remote_info = self._get_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = (
@@ -324,55 +371,81 @@ class SolidFireDriver(san.SanISCSIDriver):
{'clusterPairingKey': pairing_info['clusterPairingKey']},
version='8.0',
endpoint=remote_device['endpoint'])['result']['clusterPairID']
- except exception.SolidFireAPIException as ex:
- if 'xPairingAlreadExists' in ex.msg:
+ except SolidFireAPIException as ex:
+ if 'xPairingAlreadyExists' in ex.msg:
LOG.debug('Pairing already exists during init.')
else:
with excutils.save_and_reraise_exception():
LOG.error('Cluster pairing failed: %s', ex.msg)
- LOG.debug(('Initialized Cluster pair with ID: %s'), pair_id)
+ LOG.debug('Initialized Cluster pair with ID: %s', pair_id)
remote_device['clusterPairID'] = pair_id
return pair_id
- def _get_remote_cluster_info(self, remote_endpoint):
- return self._issue_api_request(
- 'GetClusterInfo',
- {},
- endpoint=remote_endpoint)['result']['clusterInfo']
-
- def _set_cluster_pairs(self):
- if not self.configuration.get('replication_device', None):
- self.replication = False
+ def _get_cluster_info(self, remote_endpoint):
+ try:
+ return self._issue_api_request(
+ 'GetClusterInfo', {},
+ endpoint=remote_endpoint)['result']['clusterInfo']
+ except SolidFireAPIException:
+ msg = _("Replication device is unreachable!")
+ LOG.exception(msg)
+ raise
+
+ def _check_replication_configs(self):
+ repl_configs = self.configuration.replication_device
+ if not repl_configs:
return
+ # We only support one replication target. Checking if the user is
+ # trying to add more than one;
+ if len(repl_configs) > 1:
+ msg = _("SolidFire driver only supports one replication target "
+ "device.")
+ LOG.error(msg)
+ raise SolidFireDriverException(msg)
+
+ repl_configs = repl_configs[0]
+
+ # Check if the user is not using the same MVIP as source
+ # and replication target.
+ if repl_configs['mvip'] == self.configuration.san_ip:
+ msg = _("Source mvip cannot be the same "
+ "as the replication target.")
+ LOG.error(msg)
+ raise SolidFireDriverException(msg)
+
+ def _set_cluster_pairs(self):
+ repl_configs = self.configuration.replication_device[0]
existing_pairs = self._issue_api_request(
'ListClusterPairs',
{},
version='8.0')['result']['clusterPairs']
+ LOG.debug("Existing cluster pairs: %s", existing_pairs)
+
remote_pair = {}
- for rd in self.configuration.get('replication_device', []):
- remote_endpoint = self._build_endpoint_info(**rd)
- remote_info = self._get_remote_cluster_info(remote_endpoint)
- remote_info['endpoint'] = remote_endpoint
- if not remote_info['endpoint']['svip']:
- remote_info['endpoint']['svip'] = remote_info['svip'] + ':3260'
-
- for ep in existing_pairs:
- if rd['backend_id'] == ep['mvip']:
- remote_pair = ep
- LOG.debug("Found remote pair: %s", remote_pair)
- remote_info['clusterPairID'] = ep['clusterPairID']
- break
- if (not remote_pair and
- remote_info['mvip'] != self.active_cluster['mvip']):
- # NOTE(jdg): create_remote_pairing sets the
- # clusterPairID in remote_info for us
- self._create_remote_pairing(remote_info)
- self.cluster_pairs.append(remote_info)
- LOG.debug("Setting replication_enabled to True.")
- self.replication_enabled = True
+ remote_endpoint = self._build_repl_endpoint_info(**repl_configs)
+ remote_info = self._create_cluster_reference(remote_endpoint)
+ remote_info['backend_id'] = repl_configs['backend_id']
+
+ for ep in existing_pairs:
+ if repl_configs['mvip'] == ep['mvip']:
+ remote_pair = ep
+ LOG.debug("Found remote pair: %s", remote_pair)
+ remote_info['clusterPairID'] = ep['clusterPairID']
+ break
+
+ if (not remote_pair and
+ remote_info['mvip'] != self.active_cluster['mvip']):
+ LOG.debug("Setting up new cluster pairs.")
+ # NOTE(jdg): create_remote_pairing sets the
+ # clusterPairID in remote_info for us
+ self._create_remote_pairing(remote_info)
+
+ self.cluster_pairs.append(remote_info)
+ LOG.debug("Available cluster pairs: %s", self.cluster_pairs)
+ self.replication_enabled = True
def _create_cluster_reference(self, endpoint=None):
cluster_ref = {}
@@ -393,13 +466,21 @@ class SolidFireDriver(san.SanISCSIDriver):
{}, endpoint=cluster_ref['endpoint'])
['result']['clusterAPIVersion'])
- # FIXME(jdg): This is fine for the default/base cluster, but
- # if we have a secondary configured, and are using vlans etc
- # we don't use what's in the config (that's the primary only),
- # we need to set this from the replication_device config
- if self.configuration.get('sf_svip', None):
- cluster_ref['svip'] = (
- self.configuration.get('sf_svip'))
+ # NOTE(sfernand): If a custom svip is configured, we update the
+ # default storage ip to the configuration value.
+ # Otherwise, we update endpoint info with the default storage ip
+ # retrieved from GetClusterInfo API call.
+ svip = cluster_ref['endpoint'].get('svip')
+
+ if not svip:
+ svip = cluster_ref['svip']
+
+ if ':' not in svip:
+ svip += ':3260'
+
+ cluster_ref['svip'] = svip
+ cluster_ref['endpoint']['svip'] = svip
+
return cluster_ref
def _set_active_cluster(self, endpoint=None):
@@ -493,6 +574,18 @@ class SolidFireDriver(san.SanISCSIDriver):
params)['result']['accountID']
return id
+ def _build_repl_endpoint_info(self, **repl_device):
+ endpoint = {
+ 'mvip': repl_device.get('mvip'),
+ 'login': repl_device.get('login'),
+ 'passwd': repl_device.get('password'),
+ 'port': repl_device.get('port', 443),
+ 'url': 'https://%s:%s' % (repl_device.get('mvip'),
+ repl_device.get('port', 443)),
+ 'svip': repl_device.get('svip')
+ }
+ return endpoint
+
def _build_endpoint_info(self, **kwargs):
endpoint = {}
@@ -538,7 +631,13 @@ class SolidFireDriver(san.SanISCSIDriver):
'SolidFire API call.' % response['error']['name'])
LOG.debug(msg)
LOG.debug("API response: %s", response)
- raise exception.SolidFireRetryableException(message=msg)
+
+ raise SolidFireRetryableException(message=msg)
+
+ if (('error' in response) and
+ response['error']['name'] == 'xInvalidPairingKey'):
+ LOG.debug("Error on volume pairing!")
+ raise SolidFireReplicationPairingError
if 'error' in response:
msg = _('API response: %s') % response
@@ -554,11 +653,12 @@ class SolidFireDriver(san.SanISCSIDriver):
params,
endpoint=endpoint)['result']['volumes']
- def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None):
+ def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None,
+ endpoint=None):
# ListVolumesForAccount gives both Active and Deleted
# we require the solidfire accountID, uuid of volume
# is optional
- vols = self._get_volumes_by_sfaccount(sf_account_id)
+ vols = self._get_volumes_by_sfaccount(sf_account_id, endpoint=endpoint)
if cinder_uuid:
vlist = [v for v in vols if
cinder_uuid in v['name']]
@@ -647,7 +747,7 @@ class SolidFireDriver(san.SanISCSIDriver):
return sfaccount
- def _create_sfaccount(self, sf_account_name):
+ def _create_sfaccount(self, sf_account_name, endpoint=None):
"""Create account on SolidFire device if it doesn't already exist.
We're first going to check if the account already exists, if it does
@@ -655,7 +755,8 @@ class SolidFireDriver(san.SanISCSIDriver):
"""
- sfaccount = self._get_sfaccount_by_name(sf_account_name)
+ sfaccount = self._get_sfaccount_by_name(sf_account_name,
+ endpoint=endpoint)
if sfaccount is None:
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
@@ -664,8 +765,10 @@ class SolidFireDriver(san.SanISCSIDriver):
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
- self._issue_api_request('AddAccount', params)
- sfaccount = self._get_sfaccount_by_name(sf_account_name)
+ self._issue_api_request('AddAccount', params,
+ endpoint=endpoint)
+ sfaccount = self._get_sfaccount_by_name(sf_account_name,
+ endpoint=endpoint)
return sfaccount
@@ -676,7 +779,7 @@ class SolidFireDriver(san.SanISCSIDriver):
length=length,
symbolgroups=(string.ascii_uppercase + string.digits))
- def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None):
+ def _build_connection_info(self, sfaccount, vol, endpoint=None):
"""Gets the connection info for specified account and volume."""
if endpoint:
iscsi_portal = endpoint['svip']
@@ -687,39 +790,42 @@ class SolidFireDriver(san.SanISCSIDriver):
iscsi_portal += ':3260'
chap_secret = sfaccount['targetSecret']
+ vol_id = vol['volumeID']
+ iqn = vol['iqn']
+
+ conn_info = {
+ # NOTE(john-griffith): SF volumes are always at lun 0
+ 'provider_location': ('%s %s %s' % (iscsi_portal, iqn, 0)),
+ 'provider_auth': ('CHAP %s %s' % (sfaccount['username'],
+ chap_secret))
+ }
+
+ if not self.configuration.sf_emulate_512:
+ conn_info['provider_geometry'] = ('%s %s' % (4096, 4096))
+
+ conn_info['provider_id'] = (
+ self._create_provider_id_string(vol_id, sfaccount['accountID']))
+ return conn_info
- found_volume = False
+ def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None):
+ volume = None
iteration_count = 0
- while not found_volume and iteration_count < 600:
+ while not volume and iteration_count < 600:
volume_list = self._get_volumes_by_sfaccount(
sfaccount['accountID'], endpoint=endpoint)
- iqn = None
for v in volume_list:
if v['volumeID'] == sf_volume_id:
- iqn = v['iqn']
- found_volume = True
+ volume = v
break
- if not found_volume:
- time.sleep(2)
iteration_count += 1
- if not found_volume:
+ if not volume:
LOG.error('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!', sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
- model_update = {}
- # NOTE(john-griffith): SF volumes are always at lun 0
- model_update['provider_location'] = ('%s %s %s'
- % (iscsi_portal, iqn, 0))
- model_update['provider_auth'] = ('CHAP %s %s'
- % (sfaccount['username'],
- chap_secret))
- if not self.configuration.sf_emulate_512:
- model_update['provider_geometry'] = ('%s %s' % (4096, 4096))
- model_update['provider_id'] = (
- self._create_provider_id_string(sf_volume_id,
- sfaccount['accountID']))
+ model_update = self._build_connection_info(sfaccount, volume,
+ endpoint=endpoint)
return model_update
def _snapshot_discovery(self, src_uuid, params, vref):
@@ -748,15 +854,17 @@ class SolidFireDriver(san.SanISCSIDriver):
def _do_clone_volume(self, src_uuid,
vref, sf_src_snap=None):
"""Create a clone of an existing volume or snapshot."""
- attributes = {}
+
+ LOG.debug("Creating cloned volume from vol %(src)s to %(dst)s.",
+ {'src': src_uuid, 'dst': vref.id})
+
sf_account = self._get_create_account(vref['project_id'])
params = {'name': '%(prefix)s%(id)s' %
- {'prefix': self.configuration.sf_volume_prefix,
- 'id': vref['id']},
+ {'prefix': self.configuration.sf_volume_prefix,
+ 'id': vref['id']},
'newAccountID': sf_account['accountID']}
is_clone = False
- sf_vol = None
if sf_src_snap:
# In some scenarios we are passed the snapshot information that we
# are supposed to clone.
@@ -764,40 +872,43 @@ class SolidFireDriver(san.SanISCSIDriver):
params['volumeID'] = sf_src_snap['volumeID']
params['newSize'] = int(vref['size'] * units.Gi)
else:
- params, is_clone, sf_vol = self._snapshot_discovery(src_uuid,
- params,
- vref)
+ params, is_clone, sf_src_vol = self._snapshot_discovery(
+ src_uuid, params, vref)
data = self._issue_api_request('CloneVolume', params, version='6.0')
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("API response: %s") % data
- raise exception.SolidFireAPIException(msg)
+ raise SolidFireAPIException(msg)
- sf_volume_id = data['result']['volumeID']
+ sf_cloned_id = data['result']['volumeID']
# NOTE(jdg): all attributes are copied via clone, need to do an update
# to set any that were provided
- qos = self._retrieve_qos_setting(vref)
- params = {'volumeID': sf_volume_id}
- if qos:
- params['qos'] = qos
- create_time = vref['created_at'].isoformat()
- attributes = {'uuid': vref['id'],
- 'is_clone': 'True',
- 'src_uuid': src_uuid,
- 'created_at': create_time}
-
- params['attributes'] = attributes
+ params = self._get_default_volume_params(vref, is_clone=is_clone)
+ params['volumeID'] = sf_cloned_id
data = self._issue_api_request('ModifyVolume', params)
- model_update = self._get_model_info(sf_account, sf_volume_id)
+ model_update = self._get_model_info(sf_account, sf_cloned_id)
if model_update is None:
mesg = _('Failed to get model update from clone')
- raise exception.SolidFireAPIException(mesg)
+ raise SolidFireAPIException(mesg)
+ rep_settings = self._retrieve_replication_settings(vref)
+ if self.replication_enabled and rep_settings:
+ try:
+ vref['volumeID'] = sf_cloned_id
+ rep_updates = self._replicate_volume(
+ vref, params, sf_account, rep_settings)
+ model_update.update(rep_updates)
+ except SolidFireDriverException:
+ with excutils.save_and_reraise_exception():
+ self._issue_api_request('DeleteVolume',
+ {'volumeID': sf_cloned_id})
+ self._issue_api_request('PurgeDeletedVolume',
+ {'volumeID': sf_cloned_id})
# Increment the usage count, just for data collection
# We're only doing this for clones, not create_from snaps
if is_clone:
- data = self._update_attributes(sf_vol)
+ data = self._update_attributes(sf_src_vol)
return (data, sf_account, model_update)
def _update_attributes(self, sf_vol):
@@ -928,14 +1039,15 @@ class SolidFireDriver(san.SanISCSIDriver):
raise exception.InvalidQoSSpecs(reason=msg)
return qos
- def _get_sf_volume(self, uuid, params=None):
+ def _get_sf_volume(self, uuid, params=None, endpoint=None):
if params:
vols = [v for v in self._issue_api_request(
- 'ListVolumesForAccount', params)['result']['volumes'] if
- v['status'] == "active"]
+ 'ListVolumesForAccount',
+ params)['result']['volumes'] if v['status'] == "active"]
else:
vols = self._issue_api_request(
- 'ListActiveVolumes', params)['result']['volumes']
+ 'ListActiveVolumes', params,
+ endpoint=endpoint)['result']['volumes']
found_count = 0
sf_volref = None
@@ -1092,9 +1204,9 @@ class SolidFireDriver(san.SanISCSIDriver):
image_service,
image_meta['id'])
- def _get_sfaccounts_for_tenant(self, cinder_project_id):
+ def _get_sfaccounts_for_tenant(self, cinder_project_id, endpoint=None):
accounts = self._issue_api_request(
- 'ListAccounts', {})['result']['accounts']
+ 'ListAccounts', {}, endpoint=endpoint)['result']['accounts']
# Note(jdg): On SF we map account-name to OpenStack's tenant ID
# we use tenantID in here to get secondaries that might exist
@@ -1127,29 +1239,34 @@ class SolidFireDriver(san.SanISCSIDriver):
deleted_vols = [v for v in vols]
return deleted_vols
- def _get_account_create_availability(self, accounts):
+ def _get_account_create_availability(self, accounts, endpoint=None):
# we'll check both the primary and the secondary
# if it exists and return whichever one has count
# available.
for acc in accounts:
if len(self._get_volumes_for_account(
- acc['accountID'])) < self.max_volumes_per_account:
+ acc['accountID'],
+ endpoint=endpoint)) < self.max_volumes_per_account:
return acc
if len(accounts) == 1:
- sfaccount = self._create_sfaccount(accounts[0]['username'] + '_')
+ sfaccount = self._create_sfaccount(accounts[0]['username'] + '_',
+ endpoint=endpoint)
return sfaccount
return None
- def _get_create_account(self, proj_id):
+ def _get_create_account(self, proj_id, endpoint=None):
# Retrieve SolidFire accountID to be used for creating volumes.
- sf_accounts = self._get_sfaccounts_for_tenant(proj_id)
+ sf_accounts = self._get_sfaccounts_for_tenant(
+ proj_id, endpoint=endpoint)
if not sf_accounts:
sf_account_name = self._get_sf_account_name(proj_id)
- sf_account = self._create_sfaccount(sf_account_name)
+ sf_account = self._create_sfaccount(
+ sf_account_name, endpoint=endpoint)
else:
# Check availability for creates
- sf_account = self._get_account_create_availability(sf_accounts)
+ sf_account = self._get_account_create_availability(
+ sf_accounts, endpoint=endpoint)
if not sf_account:
msg = _('Volumes/account exceeded on both primary and '
'secondary SolidFire accounts.')
@@ -1366,6 +1483,36 @@ class SolidFireDriver(san.SanISCSIDriver):
> 0 else volume.get('size'))
return qos
+ def _get_default_volume_params(self, volume, is_clone=False):
+
+ sf_account = self._get_create_account(volume.project_id)
+ qos = self._retrieve_qos_setting(volume)
+
+ create_time = volume.created_at.isoformat()
+ attributes = {
+ 'uuid': volume.id,
+ 'is_clone': is_clone,
+ 'created_at': create_time,
+ 'cinder-name': volume.get('display_name', "")
+ }
+
+ if volume.volume_type_id:
+ for attr in self._extract_sf_attributes_from_extra_specs(
+ volume.volume_type_id):
+ for k, v in attr.items():
+ attributes[k] = v
+
+ vol_name = '%s%s' % (self.configuration.sf_volume_prefix, volume.id)
+ params = {'name': vol_name,
+ 'accountID': sf_account['accountID'],
+ 'sliceCount': 1,
+ 'totalSize': int(volume.size * units.Gi),
+ 'enable512e': self.configuration.sf_emulate_512,
+ 'attributes': attributes,
+ 'qos': qos}
+
+ return params
+
def create_volume(self, volume):
"""Create volume on SolidFire device.
@@ -1380,32 +1527,9 @@ class SolidFireDriver(san.SanISCSIDriver):
does not already exist, we'll go ahead and create it.
"""
- slice_count = 1
- attributes = {}
sf_account = self._get_create_account(volume['project_id'])
- qos = self._retrieve_qos_setting(volume)
-
- create_time = volume['created_at'].isoformat()
- attributes = {'uuid': volume['id'],
- 'is_clone': 'False',
- 'created_at': create_time}
- attributes['cinder-name'] = volume.get('display_name', "")
-
- if volume['volume_type_id']:
- for setting in self._extract_sf_attributes_from_extra_specs(
- volume['volume_type_id']):
- for k, v in setting.items():
- attributes[k] = v
-
- vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id'])
- params = {'name': vname,
- 'accountID': sf_account['accountID'],
- 'sliceCount': slice_count,
- 'totalSize': int(volume['size'] * units.Gi),
- 'enable512e': self.configuration.sf_emulate_512,
- 'attributes': attributes,
- 'qos': qos}
+ params = self._get_default_volume_params(volume)
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
@@ -1423,9 +1547,12 @@ class SolidFireDriver(san.SanISCSIDriver):
if self.replication_enabled and rep_settings:
volume['volumeID'] = (
int(model_update['provider_id'].split()[0]))
- self._replicate_volume(volume, params,
- sf_account, rep_settings)
- except exception.SolidFireAPIException:
+ rep_updates = self._replicate_volume(volume, params,
+ sf_account, rep_settings)
+ if rep_updates:
+ model_update.update(rep_updates)
+
+ except SolidFireAPIException:
# NOTE(jdg): Something went wrong after the source create, due to
# the way TFLOW works and it's insistence on retrying the same
# command over and over coupled with the fact that the introduction
@@ -1442,7 +1569,7 @@ class SolidFireDriver(san.SanISCSIDriver):
return model_update
def _retrieve_replication_settings(self, volume):
- rep_data = {}
+ rep_data = "Async"
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
@@ -1450,29 +1577,28 @@ class SolidFireDriver(san.SanISCSIDriver):
return rep_data
def _set_rep_by_volume_type(self, ctxt, type_id):
- rep_opts = {}
+ rep_type = None
type_ref = volume_types.get_volume_type(ctxt, type_id)
specs = type_ref.get('extra_specs')
- # We use the replication_enabled flag for both the trigger in the
- # driver, as well as capabilities for scheduler. Note we don't
- # require or check for the additional "replication:True|False"
- # spec in the type any longer.
+ # TODO(erlon): Add support for sync/snapshot replication
if specs.get('replication_enabled', "") == "<is> True":
- rep_opts['targets'] = specs.get(
- 'solidfire:replication_targets', self.cluster_pairs[0])
- return rep_opts
+ rep_type = 'Async'
- def _replicate_volume(self, volume, src_params,
+ return rep_type
+
+ def _replicate_volume(self, volume, params,
parent_sfaccount, rep_info):
- params = {}
- # TODO(jdg): Right now we just go to first pair,
- # need to add parsing of rep_info eventually
- # in other words "rep_info" is not used yet!
+ updates = {}
+ rep_success_status = fields.ReplicationStatus.ENABLED
+
+ # NOTE(erlon): Right now we only support 1 remote target so, we always
+ # get cluster_pairs[0]
tgt_endpoint = self.cluster_pairs[0]['endpoint']
- LOG.debug("Replicating volume on remote cluster: %s", tgt_endpoint)
- params['attributes'] = src_params['attributes']
+ LOG.debug("Replicating volume on remote cluster: %(tgt)s\n params: "
+ "%(params)s", {'tgt': tgt_endpoint, 'params': params})
+
params['username'] = self._get_sf_account_name(volume['project_id'])
try:
params['initiatorSecret'] = parent_sfaccount['initiatorSecret']
@@ -1490,7 +1616,6 @@ class SolidFireDriver(san.SanISCSIDriver):
endpoint=tgt_endpoint))
# Create the volume on the remote cluster w/same params as original
- params = src_params
params['accountID'] = remote_account['accountID']
LOG.debug("Create remote volume on: %(endpoint)s with account: "
"%(account)s",
@@ -1505,24 +1630,79 @@ class SolidFireDriver(san.SanISCSIDriver):
'8.0',
endpoint=tgt_endpoint)
- # Enable volume pairing
- LOG.debug("Start volume pairing on volume ID: %s",
- volume['volumeID'])
- params = {'volumeID': volume['volumeID']}
- rep_key = self._issue_api_request('StartVolumePairing',
- params,
- '8.0')['result']['volumePairingKey']
- params = {'volumeID': tgt_sfid,
- 'volumePairingKey': rep_key}
- LOG.debug("Issue CompleteVolumePairing request on remote: "
- "%(endpoint)s, %(parameters)s",
- {'endpoint': tgt_endpoint['url'], 'parameters': params})
- self._issue_api_request('CompleteVolumePairing',
- params,
- '8.0',
- endpoint=tgt_endpoint)
+ # NOTE(erlon): For some reason the SF cluster randomly fail the
+ # replication of volumes. The generated keys are deemed invalid by the
+ # target backend. When that happens, we re-start the volume pairing
+ # process.
+ @retry(SolidFireReplicationPairingError, tries=6)
+ def _pair_volumes():
+ # Enable volume pairing
+ LOG.debug("Start volume pairing on volume ID: %s",
+ volume['volumeID'])
+
+ # Make sure we split any pair the volume have
+ params = {'volumeID': volume['volumeID'], 'mode': rep_info}
+ self._issue_api_request('RemoveVolumePair', params, '8.0')
+
+ rep_key = self._issue_api_request(
+ 'StartVolumePairing', params,
+ '8.0')['result']['volumePairingKey']
+ params = {'volumeID': tgt_sfid,
+ 'volumePairingKey': rep_key}
+ LOG.debug("Sending issue CompleteVolumePairing request on remote: "
+ "%(endpoint)s, %(parameters)s",
+ {'endpoint': tgt_endpoint['url'], 'parameters': params})
+ self._issue_api_request('CompleteVolumePairing',
+ params,
+ '8.0',
+ endpoint=tgt_endpoint)
+
+ try:
+ _pair_volumes()
+ except SolidFireAPIException:
+ with excutils.save_and_reraise_exception():
+ params = {'volumeID': tgt_sfid}
+ LOG.debug("Error pairing volume on remote cluster. Rolling "
+ "back and deleting volume %(vol)s at cluster "
+ "%(cluster)s.",
+ {'vol': tgt_sfid, 'cluster': tgt_endpoint})
+ self._issue_api_request('DeleteVolume', params,
+ endpoint=tgt_endpoint)
+ self._issue_api_request('PurgeDeletedVolume', params,
+ endpoint=tgt_endpoint)
+
+ updates['replication_status'] = rep_success_status
+
LOG.debug("Completed volume pairing.")
- return model_update
+ return updates
+
+ def _disable_replication(self, volume):
+
+ updates = {}
+ tgt_endpoint = self.cluster_pairs[0]['endpoint']
+
+ sfvol = self._get_sfvol_by_cinder_vref(volume)
+ if len(sfvol['volumePairs']) != 1:
+ LOG.warning("Trying to disable replication on volume %s but "
+ "volume does not have pairs.", volume.id)
+
+ updates['replication_status'] = fields.ReplicationStatus.DISABLED
+ return updates
+
+ params = {'volumeID': sfvol['volumeID']}
+ self._issue_api_request('RemoveVolumePair', params, '8.0')
+
+ remote_sfid = sfvol['volumePairs'][0]['remoteVolumeID']
+ params = {'volumeID': remote_sfid}
+ self._issue_api_request('RemoveVolumePair',
+ params, '8.0', endpoint=tgt_endpoint)
+ self._issue_api_request('DeleteVolume', params,
+ endpoint=tgt_endpoint)
+ self._issue_api_request('PurgeDeletedVolume', params,
+ endpoint=tgt_endpoint)
+
+ updates['replication_status'] = fields.ReplicationStatus.DISABLED
+ return updates
@locked_source_id_operation
def create_cloned_volume(self, volume, source):
@@ -1609,6 +1789,11 @@ class SolidFireDriver(san.SanISCSIDriver):
params = {'volumeID': sf_vol['volumeID'],
'name': '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])}
+
+ rep_settings = self._retrieve_replication_settings(snapshot.volume)
+ if self.replication_enabled and rep_settings:
+ params['enableRemoteReplication'] = True
+
return self._do_snapshot_create(params)
@locked_source_id_operation
@@ -1731,6 +1916,8 @@ class SolidFireDriver(san.SanISCSIDriver):
vols = self._issue_api_request(
'ListActiveVolumes', {},
endpoint=endpoint)['result']['volumes']
+ # FIXME(erlon): When we fetch only for the volume name, we miss
+ # volumes that where brought to Cinder via cinder-manage.
vlist = (
[sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in
sfvol['name']])
@@ -1915,6 +2102,21 @@ class SolidFireDriver(san.SanISCSIDriver):
self._issue_api_request('ModifyVolume',
params, version='5.0')
+ rep_settings = self._retrieve_replication_settings(volume)
+ if self.replication_enabled and rep_settings:
+ if len(sf_vol['volumePairs']) != 1:
+ LOG.error("Can't find remote pair while extending the "
+ "volume or multiple replication pairs found!")
+ raise exception.VolumeNotFound(volume_id=volume['id'])
+
+ tgt_endpoint = self.cluster_pairs[0]['endpoint']
+ target_vol_id = sf_vol['volumePairs'][0]['remoteVolumeID']
+ params2 = params.copy()
+ params2['volumeID'] = target_vol_id
+ self._issue_api_request('ModifyVolume',
+ params2, version='5.0',
+ endpoint=tgt_endpoint)
+
def _get_provisioned_capacity(self):
response = self._issue_api_request('ListVolumes', {}, version='8.0')
volumes = response['result']['volumes']
@@ -1929,7 +2131,6 @@ class SolidFireDriver(san.SanISCSIDriver):
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
-
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
@@ -2071,7 +2272,8 @@ class SolidFireDriver(san.SanISCSIDriver):
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
- Returns a boolean indicating whether the retype occurred.
+ Returns a boolean indicating whether the retype occurred and a dict
+ with the updates on the volume.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
@@ -2082,8 +2284,10 @@ class SolidFireDriver(san.SanISCSIDriver):
dictionary of its reported capabilities (Not Used).
"""
- qos = {}
- attributes = {}
+ model_update = {}
+
+ LOG.debug("Retyping volume %(vol)s to new type %(type)s",
+ {'vol': volume.id, 'type': new_type})
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
@@ -2092,9 +2296,30 @@ class SolidFireDriver(san.SanISCSIDriver):
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
+ if self.replication_enabled:
+ ctxt = context.get_admin_context()
+ src_rep_type = self._set_rep_by_volume_type(
+ ctxt, volume.volume_type_id)
+ dst_rep_type = self._set_rep_by_volume_type(ctxt, new_type['id'])
+
+ if src_rep_type != dst_rep_type:
+ if dst_rep_type:
+ rep_settings = self._retrieve_replication_settings(volume)
+ rep_params = self._get_default_volume_params(volume)
+ volume['volumeID'] = (
+ int(volume.provider_id.split()[0]))
+ rep_updates = self._replicate_volume(volume, rep_params,
+ sfaccount,
+ rep_settings)
+ else:
+ rep_updates = self._disable_replication(volume)
+
+ if rep_updates:
+ model_update.update(rep_updates)
+
attributes = sf_vol['attributes']
attributes['retyped_at'] = timeutils.utcnow().isoformat()
- params = {'volumeID': sf_vol['volumeID']}
+ params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes}
qos = self._set_qos_by_volume_type(ctxt, new_type['id'],
volume.get('size'))
@@ -2102,19 +2327,23 @@ class SolidFireDriver(san.SanISCSIDriver):
params['qos'] = qos
self._issue_api_request('ModifyVolume', params)
- return True
+ return True, model_update
def manage_existing(self, volume, external_ref):
"""Manages an existing SolidFire Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
- Also need to consider things like QoS, Emulation, account/tenant.
+ Also need to consider things like QoS, Emulation, account/tenant and
+ replication settings.
"""
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
+
+ LOG.debug("Managing volume %(id)s to ref %(ref)s",
+ {'id': volume.id, 'ref': external_ref})
if sfid is None:
- raise exception.SolidFireAPIException(_("Manage existing volume "
- "requires 'source-id'."))
+ raise SolidFireAPIException(_("Manage existing volume "
+ "requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
@@ -2125,26 +2354,51 @@ class SolidFireDriver(san.SanISCSIDriver):
sf_ref = vols[0]
sfaccount = self._get_create_account(volume['project_id'])
- attributes = {}
- qos = self._retrieve_qos_setting(volume)
-
import_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'os_imported_at': import_time,
'old_name': sfname}
- params = {'name': volume['name'],
- 'volumeID': sf_ref['volumeID'],
- 'accountID': sfaccount['accountID'],
- 'enable512e': self.configuration.sf_emulate_512,
- 'attributes': attributes,
- 'qos': qos}
-
+ params = self._get_default_volume_params(volume)
+ params['volumeID'] = sf_ref['volumeID']
+ params['attributes'] = attributes
+ params.pop('totalSize')
self._issue_api_request('ModifyVolume',
params, version='5.0')
- return self._get_model_info(sfaccount, sf_ref['volumeID'])
+ try:
+ rep_updates = {}
+ rep_settings = self._retrieve_replication_settings(volume)
+ if self.replication_enabled and rep_settings:
+ if len(sf_ref['volumePairs']) != 0:
+ msg = _("Not possible to manage a volume with "
+ "replicated pair! Please split the volume pairs.")
+ LOG.error(msg)
+ raise SolidFireDriverException(msg)
+ else:
+ params = self._get_default_volume_params(volume)
+ params['volumeID'] = sf_ref['volumeID']
+ volume['volumeID'] = sf_ref['volumeID']
+ params['totalSize'] = sf_ref['totalSize']
+ rep_updates = self._replicate_volume(
+ volume, params, sfaccount, rep_settings)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ # When the replication fails in mid process, we need to
+ # set the volume properties the way it was before.
+ LOG.error("Error trying to replicate volume %s",
+ volume.id)
+ params = {'volumeID': sf_ref['volumeID']}
+ params['attributes'] = sf_ref['attributes']
+ self._issue_api_request('ModifyVolume',
+ params, version='5.0')
+
+ model_update = self._get_model_info(sfaccount, sf_ref['volumeID'])
+
+ model_update.update(rep_updates)
+
+ return model_update
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing LV for manage_existing.
@@ -2161,6 +2415,10 @@ class SolidFireDriver(san.SanISCSIDriver):
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
+ if len(vols) != 1:
+ msg = _("Provided volume id does not exist on SolidFire backend.")
+ raise SolidFireDriverException(msg)
+
return int(math.ceil(float(vols[0]['totalSize']) / units.Gi))
def unmanage(self, volume):
@@ -2187,18 +2445,20 @@ class SolidFireDriver(san.SanISCSIDriver):
self._issue_api_request('ModifyVolume',
params, version='5.0')
- def _failover_volume(self, src_vol, tgt_vol, tgt_cluster):
+ def _failover_volume(self, tgt_vol, tgt_cluster, src_vol=None):
"""Modify remote volume to R/W mode."""
- # Put the src in tgt mode assuming it's still available
- # catch the exception if the cluster isn't available and
- # continue on
- params = {'volumeID': src_vol['volumeID'],
- 'access': 'replicationTarget'}
- try:
- self._issue_api_request('ModifyVolume', params)
- except exception.SolidFireAPIException:
- # FIXME
- pass
+
+ if src_vol:
+ # Put the src in tgt mode assuming it's still available
+ # catch the exception if the cluster isn't available and
+ # continue on
+ params = {'volumeID': src_vol['volumeID'],
+ 'access': 'replicationTarget'}
+ try:
+ self._issue_api_request('ModifyVolume', params)
+ except SolidFireAPIException:
+ # FIXME
+ pass
# Now call out to the remote and make the tgt our new src
params = {'volumeID': tgt_vol['volumeID'],
@@ -2213,62 +2473,39 @@ class SolidFireDriver(san.SanISCSIDriver):
using secondary_id option. You can do this simply by specifying:
`secondary_id=default`
"""
+ remote = None
failback = False
volume_updates = []
- remote = None
+
+ LOG.info("Failing over. Secondary ID is: %s",
+ secondary_id)
+
+ # NOTE(erlon): For now we only support one replication target device.
+ # So, there are two cases we have to deal with here:
+ # 1. Caller specified a backend target to fail-over to (this must be
+ # the backend_id as defined in replication_device. Any other values
+ # will raise an error. If the user does not specify anything, we
+ # also fall in this case.
+ # 2. Caller wants to failback and therefore sets backend_id=default.
secondary_id = secondary_id.lower() if secondary_id else None
- # FIXME(jdg): There's an awful lot going on in this if/else block
- # it's pretty simple in terms of what it does, but would be
- # good to come back and clean it up and make it a bit more
- # readable/maintainable.
-
- # There's two cases we have to deal with
- # 1. Caller specified a backend target to fail too
- # 2. Caller just wants to failover to anything available
- # In case `1` we need to check if they specified the default
- # and want to failback, so make sure we're even failed-over
- #
- # In case `2` they didn't specify a target, but if we're failed
- # over already, can't just grab a target off the list, we might
- # already be on that target, so check that and try and go back to
- # whence you came
- if secondary_id:
- if secondary_id == "default" and not self.failed_over:
- LOG.error("SolidFire driver received failover_host "
- "specifying failback to default, the "
- "host however is not in `failed_over` "
- "state, so can't failback.")
- raise exception.InvalidReplicationTarget
- elif secondary_id == "default" and self.failed_over:
- remote = self.default_cluster
- failback = True
- # TODO(jdg): Add a simple check here to make
- # sure the default is online
- else:
- for rc in self.cluster_pairs:
- if rc['mvip'] == secondary_id:
- remote = rc
- break
- if not remote:
- LOG.error("SolidFire driver received failover_host "
- "but was unable to find specified replication "
- "pair with id: %s.", secondary_id)
- raise exception.InvalidReplicationTarget
+ if secondary_id == "default" and not self.failed_over:
+ msg = _("SolidFire driver received failover_host "
+ "specifying failback to default, the "
+ "host however is not in `failed_over` "
+ "state.")
+ raise exception.InvalidReplicationTarget(msg)
+ elif secondary_id == "default" and self.failed_over:
+ remote = self.primary_cluster
+ failback = True
else:
- # Otherwise, we just grab a target off the list
- # but beware, we may already be failed over and there
- # may not be another target left, so recycle back to
- # the default
- if self.failed_over:
- for cp in self.cluster_pairs:
- if cp['endpoint'] != self.active_cluster['endpoint']:
- remote = cp
- if not remote:
- remote = self.default_cluster
- failback = True
- else:
- remote = self.cluster_pairs[0]
+ repl_configs = self.configuration.replication_device[0]
+ if secondary_id and repl_configs['backend_id'] != secondary_id:
+ msg = _("Replication id (%s) does not match the configured"
+ "on cinder.conf.") % secondary_id
+ raise exception.InvalidReplicationTarget(msg)
+
+ remote = self.cluster_pairs[0]
if not remote or not self.replication_enabled:
LOG.error("SolidFire driver received failover_host "
@@ -2279,33 +2516,92 @@ class SolidFireDriver(san.SanISCSIDriver):
"on non replicated "
"backend."))
- # Ok, that was annoying; get on with it
target_vols = self._map_sf_volumes(volumes,
endpoint=remote['endpoint'])
- primary_vols = self._map_sf_volumes(volumes)
+ LOG.debug("Mapped target_vols: %s", target_vols)
+
+ primary_vols = None
+ try:
+ primary_vols = self._map_sf_volumes(volumes)
+ LOG.debug("Mapped Primary_vols: %s", target_vols)
+ except SolidFireAPIException:
+ # API Request failed on source. Failover/failback will skip next
+ # calls to it.
+ pass
+
for v in volumes:
+ if v['status'] == "error":
+ LOG.debug("Skipping operation for Volume %s as it is "
+ "on error state.")
+ continue
+
target_vlist = [sfv for sfv in target_vols
if sfv['cinder_id'] == v['id']]
if len(target_vlist) > 0:
target_vol = target_vlist[0]
- # BOOKMARK This fails on failback using 'default'
- #
- primary_vol = [sfv for sfv in primary_vols if
- sfv['cinder_id'] == v['id']][0]
- self._failover_volume(primary_vol, target_vol, remote)
-
- # Now we need to update the iqn of the volume to match
- # the target svip etc
- iqn = target_vol['iqn']
- volume_updates.append(
- {'volume_id': v['id'],
- 'updates': {
- 'provider_location': ('%s %s %s' %
- (remote['endpoint']['svip'],
- iqn,
- 0)),
- 'replication_status': 'failed-over'}})
+
+ if primary_vols:
+ vols = [sfv for sfv in primary_vols
+ if sfv['cinder_id'] == v['id']]
+
+ if not vols:
+ LOG.error("SolidFire driver cannot proceed. "
+ "Could not find volume %s in "
+ "back-end storage.", v['id'])
+ raise exception.UnableToFailOver(
+ reason=_("Cannot find cinder volume in "
+ "back-end storage."))
+
+ # Have at least one cinder volume in storage
+ primary_vol = vols[0]
+ else:
+ primary_vol = None
+
+ LOG.debug('Failing-over volume %s, target vol %s, '
+ 'primary vol %s', v, target_vol, primary_vol)
+
+ try:
+ self._failover_volume(target_vol, remote, primary_vol)
+
+ sf_account = self._get_create_account(
+ v.project_id, endpoint=remote['endpoint'])
+
+ conn_info = self._build_connection_info(
+ sf_account, target_vol, endpoint=remote['endpoint'])
+
+ # volume status defaults to failed-over
+ replication_status = 'failed-over'
+
+ # in case of a failback, volume status must be reset to its
+ # original state
+ if failback:
+ replication_status = 'enabled'
+
+ vol_updates = {
+ 'volume_id': v['id'],
+ 'updates': {
+ 'replication_status': replication_status
+ }
+ }
+ vol_updates['updates'].update(conn_info)
+
+ volume_updates.append(vol_updates)
+ LOG.debug("Updates for volume: %(id)s %(updates)s",
+ {'id': v.id, 'updates': vol_updates})
+
+ except Exception as e:
+ volume_updates.append({'volume_id': v['id'],
+ 'updates': {'status': 'error', }})
+
+ if failback:
+ LOG.error("Error trying to failback volume %s", v.id)
+ else:
+ LOG.error("Error trying to failover volume %s", v.id)
+
+ msg = e.message if hasattr(e, 'message') else e
+ LOG.exception(msg)
+
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
@@ -2315,11 +2611,14 @@ class SolidFireDriver(san.SanISCSIDriver):
# active/active HA c-vol services with SolidFire. The introduction of
# the active_cluster and failed_over attributes is going to break that
# but for now that's going to be the trade off of using replication
- active_cluster_id = remote['mvip']
- self.active_cluster = remote
- self.failed_over = True
if failback:
- active_cluster_id = 'default'
+ active_cluster_id = None
+ self.failed_over = False
+ else:
+ active_cluster_id = remote['backend_id']
+ self.failed_over = True
+
+ self.active_cluster = remote
return active_cluster_id, volume_updates, []
diff --git a/releasenotes/notes/fix-solidfire-replication-dcb3e59b29950933.yaml b/releasenotes/notes/fix-solidfire-replication-dcb3e59b29950933.yaml
new file mode 100644
index 000000000..62c06a560
--- /dev/null
+++ b/releasenotes/notes/fix-solidfire-replication-dcb3e59b29950933.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ The SolidFire replication was fixed. Several bugs were addressed (creating
+ replicated volumes from snapshots, from volumes, retype a volume to a
+ replicated type, managing a volume to a replicated type, correctly
+ updating portal info on failover/failback and some minor
+ other fixes). Closes bugs #1834013, #1751932.