From 3eb8bb739b3ea2a44d2a847d4c0f77233f7317eb Mon Sep 17 00:00:00 2001 From: Atsushi Kawai Date: Mon, 27 Feb 2023 03:31:07 +0000 Subject: Hitachi: Fix key error when backend is down This patch is to fix the cause of key error in cinder scheduler when a backend is down. This patch can fix the bug in OEM drivers. Closes-Bug: #2004140 Change-Id: I2735d902af256f979fc75a697f605b7a8ae65178 --- .../drivers/hitachi/test_hitachi_hbsd_rest_fc.py | 49 ++++++++++++++++++++++ cinder/volume/drivers/hitachi/hbsd_common.py | 14 ++++--- cinder/volume/drivers/hitachi/hbsd_fc.py | 1 + cinder/volume/drivers/hitachi/hbsd_iscsi.py | 1 + cinder/volume/drivers/hitachi/hbsd_utils.py | 2 +- ...-keyerr-when-backend-down-a5a35b15dc8f1132.yaml | 6 +++ 6 files changed, 66 insertions(+), 7 deletions(-) create mode 100644 releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py index db3e706ec..3743a2cb3 100644 --- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py +++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py @@ -18,6 +18,7 @@ import functools from unittest import mock from oslo_config import cfg +from oslo_utils import units import requests from requests import models @@ -877,11 +878,59 @@ class HBSDRESTFCDriverTest(test.TestCase): get_goodness_function.return_value = None stats = self.driver.get_volume_stats(True) self.assertEqual('Hitachi', stats['vendor_name']) + self.assertEqual(self.configuration.volume_backend_name, + stats["pools"][0]['pool_name']) + self.assertEqual(self.configuration.reserved_percentage, + stats["pools"][0]['reserved_percentage']) + self.assertTrue(stats["pools"][0]['thin_provisioning_support']) + self.assertFalse(stats["pools"][0]['thick_provisioning_support']) self.assertTrue(stats["pools"][0]['multiattach']) + self.assertTrue(stats["pools"][0]['consistencygroup_support']) + self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled']) + self.assertEqual(self.configuration.max_over_subscription_ratio, + stats["pools"][0]['max_over_subscription_ratio']) + self.assertEqual( + GET_POOL_RESULT['totalPoolCapacity'] // units.Ki, + stats["pools"][0]['total_capacity_gb']) + self.assertEqual( + GET_POOL_RESULT['availableVolumeCapacity'] // units.Ki, + stats["pools"][0]['free_capacity_gb']) + self.assertEqual( + GET_POOL_RESULT['totalLocatedCapacity'] // units.Ki, + stats["pools"][0]['provisioned_capacity_gb']) + self.assertEqual('up', stats["pools"][0]['backend_state']) self.assertEqual(1, request.call_count) self.assertEqual(1, get_filter_function.call_count) self.assertEqual(1, get_goodness_function.call_count) + @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function") + @mock.patch.object(driver.FibreChannelDriver, "get_filter_function") + @mock.patch.object(hbsd_rest.HBSDREST, "get_pool_info") + def test_get_volume_stats_error( + self, get_pool_info, get_filter_function, get_goodness_function): + get_pool_info.side_effect = exception.VolumeDriverException(data='') + get_filter_function.return_value = None + get_goodness_function.return_value = None + stats = self.driver.get_volume_stats(True) + self.assertEqual('Hitachi', stats['vendor_name']) + self.assertEqual(self.configuration.volume_backend_name, + stats["pools"][0]['pool_name']) + self.assertEqual(self.configuration.reserved_percentage, + stats["pools"][0]['reserved_percentage']) + self.assertTrue(stats["pools"][0]['thin_provisioning_support']) + self.assertFalse(stats["pools"][0]['thick_provisioning_support']) + self.assertTrue(stats["pools"][0]['multiattach']) + self.assertTrue(stats["pools"][0]['consistencygroup_support']) + self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled']) + self.assertEqual(self.configuration.max_over_subscription_ratio, + stats["pools"][0]['max_over_subscription_ratio']) + self.assertEqual(0, stats["pools"][0]['total_capacity_gb']) + self.assertEqual(0, stats["pools"][0]['free_capacity_gb']) + self.assertEqual(0, stats["pools"][0]['provisioned_capacity_gb']) + self.assertEqual('down', stats["pools"][0]['backend_state']) + self.assertEqual(1, get_filter_function.call_count) + self.assertEqual(1, get_goodness_function.call_count) + @mock.patch.object(requests.Session, "request") @mock.patch.object(volume_types, 'get_volume_type_extra_specs') @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get) diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py index 467886581..8512c8b7d 100644 --- a/cinder/volume/drivers/hitachi/hbsd_common.py +++ b/cinder/volume/drivers/hitachi/hbsd_common.py @@ -462,14 +462,21 @@ class HBSDCommon(): pool_name=pool_name, reserved_percentage=self.conf.safe_get('reserved_percentage'), QoS_support=False, + thin_provisioning_support=True, thick_provisioning_support=False, multiattach=True, consistencygroup_support=True, consistent_group_snapshot_enabled=True, + max_over_subscription_ratio=( + volume_utils.get_max_over_subscription_ratio( + self.conf.safe_get('max_over_subscription_ratio'), + True)), location_info=location_info )) if cap_data is None: single_pool.update(dict( + total_capacity_gb=0, + free_capacity_gb=0, provisioned_capacity_gb=0, backend_state='down')) self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name) @@ -478,12 +485,7 @@ class HBSDCommon(): single_pool.update(dict( total_capacity_gb=total_capacity, free_capacity_gb=free_capacity, - provisioned_capacity_gb=provisioned_capacity, - max_over_subscription_ratio=( - volume_utils.get_max_over_subscription_ratio( - self.conf.safe_get('max_over_subscription_ratio'), - True)), - thin_provisioning_support=True + provisioned_capacity_gb=provisioned_capacity )) single_pool.update(dict(backend_state='up')) return single_pool diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py index c701ed581..4ebee7ae2 100644 --- a/cinder/volume/drivers/hitachi/hbsd_fc.py +++ b/cinder/volume/drivers/hitachi/hbsd_fc.py @@ -79,6 +79,7 @@ class HBSDFCDriver(driver.FibreChannelDriver): 2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets. 2.3.3 - Add GAD volume support. 2.3.4 - Support data deduplication and compression. + 2.3.5 - Fix key error when backend is down. """ diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py index eb89adae1..d06f665cd 100644 --- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py +++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py @@ -79,6 +79,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver): 2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets. 2.3.3 - Add GAD volume support. 2.3.4 - Support data deduplication and compression. + 2.3.5 - Fix key error when backend is down. """ diff --git a/cinder/volume/drivers/hitachi/hbsd_utils.py b/cinder/volume/drivers/hitachi/hbsd_utils.py index 9efba7fce..b5f2ee90a 100644 --- a/cinder/volume/drivers/hitachi/hbsd_utils.py +++ b/cinder/volume/drivers/hitachi/hbsd_utils.py @@ -25,7 +25,7 @@ from oslo_utils import units from cinder import exception from cinder import utils as cinder_utils -VERSION = '2.3.4' +VERSION = '2.3.5' CI_WIKI_NAME = 'Hitachi_VSP_CI' PARAM_PREFIX = 'hitachi' VENDOR_NAME = 'Hitachi' diff --git a/releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml b/releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml new file mode 100644 index 000000000..f996280d9 --- /dev/null +++ b/releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Hitachi, NEC V, HPE XP drivers `bug #2004140 + `_: Fixed + ``KeyError`` when a backend is down. \ No newline at end of file -- cgit v1.2.1