summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/v3/samples/versions/version-show-response.json4
-rw-r--r--api-ref/source/v3/samples/versions/versions-response.json4
-rw-r--r--cinder/api/microversions.py2
-rw-r--r--cinder/api/openstack/api_version_request.py5
-rw-r--r--cinder/api/openstack/rest_api_version_history.rst5
-rw-r--r--cinder/api/v3/volume_transfer.py8
-rw-r--r--cinder/cmd/backup.py2
-rw-r--r--cinder/cmd/volume.py2
-rw-r--r--cinder/compute/nova.py2
-rw-r--r--cinder/context.py15
-rw-r--r--cinder/db/api.py3
-rw-r--r--cinder/db/sqlalchemy/api.py11
-rw-r--r--cinder/exception.py7
-rw-r--r--cinder/keymgr/transfer.py107
-rw-r--r--cinder/opts.py1
-rw-r--r--cinder/scheduler/filters/capacity_filter.py50
-rw-r--r--cinder/tests/unit/__init__.py42
-rw-r--r--cinder/tests/unit/api/v3/test_volume_transfer.py144
-rw-r--r--cinder/tests/unit/api/views/test_versions.py7
-rw-r--r--cinder/tests/unit/backup/test_backup.py3
-rw-r--r--cinder/tests/unit/compute/test_nova.py4
-rw-r--r--cinder/tests/unit/db/test_volume_type.py19
-rw-r--r--cinder/tests/unit/keymgr/test_transfer.py178
-rw-r--r--cinder/tests/unit/known_issues.py23
-rw-r--r--cinder/tests/unit/policies/test_volume_transfers.py23
-rw-r--r--cinder/tests/unit/scheduler/test_capacity_weigher.py2
-rw-r--r--cinder/tests/unit/scheduler/test_host_filters.py6
-rw-r--r--cinder/tests/unit/scheduler/test_host_manager.py6
-rw-r--r--cinder/tests/unit/test.py20
-rw-r--r--cinder/tests/unit/test_cmd.py4
-rw-r--r--cinder/tests/unit/test_db_api.py105
-rw-r--r--cinder/tests/unit/test_utils.py96
-rw-r--r--cinder/tests/unit/utils.py44
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powerflex/__init__.py3
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_snapshot.py24
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py2
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py3
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py224
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_fc.py16
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_iscsi.py11
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py10
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py6
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_performance.py6
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py6
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py31
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py106
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py23
-rw-r--r--cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py49
-rw-r--r--cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py48
-rw-r--r--cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py147
-rw-r--r--cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py2
-rw-r--r--cinder/tests/unit/volume/drivers/inspur/as13000/test_as13000_driver.py8
-rw-r--r--cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py158
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py1489
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py321
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py46
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py3709
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py248
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py36
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py205
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py40
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py24
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py52
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/test_utils.py5
-rw-r--r--cinder/tests/unit/volume/drivers/test_infinidat.py553
-rw-r--r--cinder/tests/unit/volume/drivers/test_linstordrv.py13
-rw-r--r--cinder/tests/unit/volume/drivers/test_nfs.py74
-rw-r--r--cinder/tests/unit/volume/drivers/test_pure.py245
-rw-r--r--cinder/tests/unit/volume/drivers/test_quobyte.py32
-rw-r--r--cinder/tests/unit/volume/drivers/test_rbd.py248
-rw-r--r--cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py16
-rw-r--r--cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py23
-rw-r--r--cinder/tests/unit/zonemanager/test_volume_driver.py2
-rw-r--r--cinder/transfer/api.py42
-rw-r--r--cinder/utils.py130
-rw-r--r--cinder/volume/driver_utils.py5
-rw-r--r--cinder/volume/drivers/dell_emc/powerflex/rest_client.py8
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/common.py7
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/rest.py55
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/utils.py3
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_common.py132
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_fc.py3
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_iscsi.py1
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest.py6
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest_api.py2
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest_fc.py174
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py14
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_utils.py60
-rw-r--r--cinder/volume/drivers/hpe/hpe_3par_base.py14
-rw-r--r--cinder/volume/drivers/hpe/hpe_3par_common.py103
-rw-r--r--cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py4
-rw-r--r--cinder/volume/drivers/infinidat.py449
-rw-r--r--cinder/volume/drivers/lightos.py25
-rw-r--r--cinder/volume/drivers/netapp/dataontap/block_base.py16
-rw-r--r--cinder/volume/drivers/netapp/dataontap/block_cmode.py77
-rw-r--r--cinder/volume/drivers/netapp/dataontap/client/api.py243
-rw-r--r--cinder/volume/drivers/netapp/dataontap/client/client_cmode.py79
-rw-r--r--cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py2521
-rw-r--r--cinder/volume/drivers/netapp/dataontap/nfs_base.py21
-rw-r--r--cinder/volume/drivers/netapp/dataontap/nfs_cmode.py137
-rw-r--r--cinder/volume/drivers/netapp/dataontap/utils/capabilities.py50
-rw-r--r--cinder/volume/drivers/netapp/dataontap/utils/data_motion.py25
-rw-r--r--cinder/volume/drivers/netapp/dataontap/utils/utils.py32
-rw-r--r--cinder/volume/drivers/netapp/options.py29
-rw-r--r--cinder/volume/drivers/netapp/utils.py7
-rw-r--r--cinder/volume/drivers/pure.py7
-rw-r--r--cinder/volume/drivers/rbd.py197
-rw-r--r--cinder/volume/targets/iet.py4
-rw-r--r--doc/source/cli/cli-manage-volumes.rst3
-rw-r--r--doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst64
-rw-r--r--doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst4
-rw-r--r--doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst29
-rw-r--r--doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst19
-rw-r--r--doc/source/configuration/block-storage/drivers/pure-storage-driver.rst9
-rw-r--r--doc/source/reference/support-matrix.ini2
-rw-r--r--mypy-files.txt1
-rw-r--r--releasenotes/notes/bp-infinidat-add-snapshot-revert-1bab97e85ff10780.yaml4
-rw-r--r--releasenotes/notes/bug-1936848-6ecc78e0e970419a.yaml8
-rw-r--r--releasenotes/notes/bug-1978729-cinder-backup-4cd87c4d71b7713e.yaml8
-rw-r--r--releasenotes/notes/bug-1981354-infinidat-iscsi-fix-multipath-3f8a0be5f541c66e.yaml7
-rw-r--r--releasenotes/notes/bug-1981982-infinidat-fix-ssl-options-6ddd852c24b16760.yaml9
-rw-r--r--releasenotes/notes/bug-1982078-Driver_initialization_error_w.r.t_default_portset-3992a060cca2adcb.yaml7
-rw-r--r--releasenotes/notes/fix-powerflex-volume-cache-da3fa1769ef78ae8.yaml10
-rw-r--r--releasenotes/notes/hitachi-vsp-fix-resource-lock-msg-5a119426e6c65998.yaml6
-rw-r--r--releasenotes/notes/hitachi-vsp-port-scheduler-207e01b3cd13350b.yaml14
-rw-r--r--releasenotes/notes/hpe-3par-add-get-manageable-2926f21116c98599.yaml5
-rw-r--r--releasenotes/notes/infinidat-manage-unmanage-ccc42b79d741369f.yaml6
-rw-r--r--releasenotes/notes/lock_path-940af881b2112bbe.yaml12
-rw-r--r--releasenotes/notes/netapp-nfs-copy-offload-image-812c7152d9fe4aae.yaml9
-rw-r--r--releasenotes/notes/netapp-nfs-deprecate-copy-offload-option-f9d6fe8e3dfafb04.yaml5
-rw-r--r--releasenotes/notes/netapp-ontap-rest-api-client-d889cfa895f01249.yaml19
-rw-r--r--releasenotes/notes/pure-storage-nvme-driver-f4217c00379c4827.yaml2
-rw-r--r--releasenotes/notes/rbd-backend-qos-implementation-0e141b742e277d26.yaml4
-rw-r--r--releasenotes/notes/slug-b6a0fc3db0a2dd45.yaml8
-rw-r--r--releasenotes/notes/transfer-encrypted-volume-2f040a6993435e79.yaml8
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po1130
-rw-r--r--requirements.txt2
-rw-r--r--test-requirements.txt2
-rw-r--r--tools/config/cinder-config-generator.conf1
-rw-r--r--tox.ini13
140 files changed, 14209 insertions, 1121 deletions
diff --git a/api-ref/source/v3/samples/versions/version-show-response.json b/api-ref/source/v3/samples/versions/version-show-response.json
index 352098ba0..29eeff522 100644
--- a/api-ref/source/v3/samples/versions/version-show-response.json
+++ b/api-ref/source/v3/samples/versions/version-show-response.json
@@ -21,8 +21,8 @@
],
"min_version": "3.0",
"status": "CURRENT",
- "updated": "2022-03-30T00:00:00Z",
- "version": "3.69"
+ "updated": "2022-08-31T00:00:00Z",
+ "version": "3.70"
}
]
}
diff --git a/api-ref/source/v3/samples/versions/versions-response.json b/api-ref/source/v3/samples/versions/versions-response.json
index 779a0f1a2..f04870912 100644
--- a/api-ref/source/v3/samples/versions/versions-response.json
+++ b/api-ref/source/v3/samples/versions/versions-response.json
@@ -21,8 +21,8 @@
],
"min_version": "3.0",
"status": "CURRENT",
- "updated": "2022-03-30T00:00:00Z",
- "version": "3.69"
+ "updated": "2022-08-31T00:00:00Z",
+ "version": "3.70"
}
]
}
diff --git a/cinder/api/microversions.py b/cinder/api/microversions.py
index 425af5d24..fcf228d10 100644
--- a/cinder/api/microversions.py
+++ b/cinder/api/microversions.py
@@ -177,6 +177,8 @@ SUPPORT_REIMAGE_VOLUME = '3.68'
SHARED_TARGETS_TRISTATE = '3.69'
+TRANSFER_ENCRYPTED_VOLUME = '3.70'
+
def get_mv_header(version):
"""Gets a formatted HTTP microversion header.
diff --git a/cinder/api/openstack/api_version_request.py b/cinder/api/openstack/api_version_request.py
index b9d7864b8..b81f1afdc 100644
--- a/cinder/api/openstack/api_version_request.py
+++ b/cinder/api/openstack/api_version_request.py
@@ -155,14 +155,15 @@ REST_API_VERSION_HISTORY = """
* 3.67 - API URLs no longer need to include a project_id parameter.
* 3.68 - Support re-image volume
* 3.69 - Allow null value for shared_targets
+ * 3.70 - Support encrypted volume transfers
"""
# The minimum and maximum versions of the API supported
# The default api version request is defined to be the
# minimum version of the API supported.
_MIN_API_VERSION = "3.0"
-_MAX_API_VERSION = "3.69"
-UPDATED = "2022-04-20T00:00:00Z"
+_MAX_API_VERSION = "3.70"
+UPDATED = "2022-08-31T00:00:00Z"
# NOTE(cyeoh): min and max versions declared as functions so we can
diff --git a/cinder/api/openstack/rest_api_version_history.rst b/cinder/api/openstack/rest_api_version_history.rst
index d2c974598..bf7cc6622 100644
--- a/cinder/api/openstack/rest_api_version_history.rst
+++ b/cinder/api/openstack/rest_api_version_history.rst
@@ -528,3 +528,8 @@ following meanings:
manual scans.
- ``false``: Never do locking.
- ``null``: Forced locking regardless of the iSCSI initiator.
+
+3.70
+----
+
+Add the ability to transfer encrypted volumes and their snapshots.
diff --git a/cinder/api/v3/volume_transfer.py b/cinder/api/v3/volume_transfer.py
index 830d1b0b2..0fc6c383f 100644
--- a/cinder/api/v3/volume_transfer.py
+++ b/cinder/api/v3/volume_transfer.py
@@ -96,11 +96,15 @@ class VolumeTransferController(volume_transfer_v2.VolumeTransferController):
no_snapshots = strutils.bool_from_string(transfer.get('no_snapshots',
False))
+ req_version = req.api_version_request
+ allow_encrypted = req_version.matches(mv.TRANSFER_ENCRYPTED_VOLUME)
+
LOG.info("Creating transfer of volume %s", volume_id)
try:
- new_transfer = self.transfer_api.create(context, volume_id, name,
- no_snapshots=no_snapshots)
+ new_transfer = self.transfer_api.create(
+ context, volume_id, name,
+ no_snapshots=no_snapshots, allow_encrypted=allow_encrypted)
# Not found exception will be handled at the wsgi level
except exception.Invalid as error:
raise exc.HTTPBadRequest(explanation=error.msg)
diff --git a/cinder/cmd/backup.py b/cinder/cmd/backup.py
index a3381364f..2f10be147 100644
--- a/cinder/cmd/backup.py
+++ b/cinder/cmd/backup.py
@@ -34,6 +34,7 @@ import __original_module_threading as orig_threading # pylint: disable=E0401
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = threading._active
+import os_brick
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
@@ -108,6 +109,7 @@ def main():
priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
+ os_brick.setup(CONF)
global LOG
LOG = logging.getLogger(__name__)
semaphore = utils.semaphore_factory(CONF.backup_max_operations,
diff --git a/cinder/cmd/volume.py b/cinder/cmd/volume.py
index 4fae53d3e..508bc6c07 100644
--- a/cinder/cmd/volume.py
+++ b/cinder/cmd/volume.py
@@ -40,6 +40,7 @@ import __original_module_threading as orig_threading # pylint: disable=E0401
import threading # noqa
orig_threading.current_thread.__globals__['_active'] = threading._active
+import os_brick
from oslo_config import cfg
from oslo_log import log as logging
from oslo_privsep import priv_context
@@ -178,6 +179,7 @@ def main():
priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
+ os_brick.setup(CONF)
global LOG
LOG = logging.getLogger(__name__)
diff --git a/cinder/compute/nova.py b/cinder/compute/nova.py
index 17bf3da93..b0a0952ff 100644
--- a/cinder/compute/nova.py
+++ b/cinder/compute/nova.py
@@ -226,7 +226,7 @@ class API(base.Base):
return result
def reimage_volume(self, context, server_ids, volume_id):
- api_version = '2.91'
+ api_version = '2.93'
events = [self._get_volume_reimaged_event(server_id, volume_id)
for server_id in server_ids]
result = self._send_events(context, events, api_version=api_version)
diff --git a/cinder/context.py b/cinder/context.py
index 6cb7dda0a..147609302 100644
--- a/cinder/context.py
+++ b/cinder/context.py
@@ -93,6 +93,9 @@ class RequestContext(context.RequestContext):
quota_class=None,
service_catalog: Optional[dict] = None,
user_auth_plugin=None,
+ message_resource_id = None,
+ message_resource_type = None,
+ message_action = None,
**kwargs):
"""Initialize RequestContext.
@@ -119,9 +122,9 @@ class RequestContext(context.RequestContext):
timestamp = timeutils.parse_isotime(timestamp)
self.timestamp = timestamp
self.quota_class = quota_class
- self.message_resource_id = None
- self.message_resource_type = None
- self.message_action = None
+ self.message_resource_id = message_resource_id
+ self.message_resource_type = message_resource_type
+ self.message_action = message_action
if service_catalog:
# Only include required parts of service_catalog
@@ -176,6 +179,9 @@ class RequestContext(context.RequestContext):
result['quota_class'] = self.quota_class
result['service_catalog'] = self.service_catalog
result['request_id'] = self.request_id
+ result['message_resource_id'] = self.message_resource_id
+ result['message_resource_type'] = self.message_resource_type
+ result['message_action'] = self.message_action
return result
@classmethod
@@ -196,6 +202,9 @@ class RequestContext(context.RequestContext):
auth_token=values.get('auth_token'),
user_domain_id=values.get('user_domain_id'),
project_domain_id=values.get('project_domain_id'),
+ message_resource_id = values.get('message_resource_id'),
+ message_resource_type = values.get('message_resource_type'),
+ message_action = values.get('message_action')
)
def authorize(self,
diff --git a/cinder/db/api.py b/cinder/db/api.py
index 0ab5220dd..03b0ab33b 100644
--- a/cinder/db/api.py
+++ b/cinder/db/api.py
@@ -1784,9 +1784,6 @@ def driver_initiator_data_insert_by_key(context, initiator,
"""Updates DriverInitiatorData entry.
Sets the value for the specified key within the namespace.
-
- If the entry already exists return False, if it inserted successfully
- return True.
"""
return IMPL.driver_initiator_data_insert_by_key(context,
initiator,
diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py
index 71861471e..c59dea44c 100644
--- a/cinder/db/sqlalchemy/api.py
+++ b/cinder/db/sqlalchemy/api.py
@@ -8302,11 +8302,14 @@ def driver_initiator_data_insert_by_key(
data.key = key
data.value = value
try:
- with main_context_manager.writer.savepoint.using(context):
- data.save(context.session)
- return True
+ data.save(context.session)
except db_exc.DBDuplicateEntry:
- return False
+ raise exception.DriverInitiatorDataExists(
+ initiator=initiator,
+ namespace=namespace,
+ key=key,
+ )
+ return data
@require_context
diff --git a/cinder/exception.py b/cinder/exception.py
index fa7823918..906ed3923 100644
--- a/cinder/exception.py
+++ b/cinder/exception.py
@@ -1085,3 +1085,10 @@ class CinderAcceleratorError(CinderException):
class SnapshotLimitReached(CinderException):
message = _("Exceeded the configured limit of "
"%(set_limit)s snapshots per volume.")
+
+
+class DriverInitiatorDataExists(Duplicate):
+ message = _(
+ "Driver initiator data for initiator '%(initiator)s' and backend "
+ "'%(namespace)s' with key '%(key)s' already exists."
+ )
diff --git a/cinder/keymgr/transfer.py b/cinder/keymgr/transfer.py
new file mode 100644
index 000000000..cceda1aea
--- /dev/null
+++ b/cinder/keymgr/transfer.py
@@ -0,0 +1,107 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from castellan.common.credentials import keystone_password
+from castellan.common import exception as castellan_exception
+from castellan import key_manager as castellan_key_manager
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+
+from cinder import context
+from cinder import objects
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+
+class KeyTransfer(object):
+ def __init__(self, conf: cfg.ConfigOpts):
+ self.conf = conf
+ self._service_context = keystone_password.KeystonePassword(
+ password=conf.keystone_authtoken.password,
+ auth_url=conf.keystone_authtoken.auth_url,
+ username=conf.keystone_authtoken.username,
+ user_domain_name=conf.keystone_authtoken.user_domain_name,
+ project_name=conf.keystone_authtoken.project_name,
+ project_domain_name=conf.keystone_authtoken.project_domain_name)
+
+ @property
+ def service_context(self):
+ """Returns the cinder service's context."""
+ return self._service_context
+
+ def transfer_key(self,
+ volume: objects.volume.Volume,
+ src_context: context.RequestContext,
+ dst_context: context.RequestContext) -> None:
+ """Transfer the key from the src_context to the dst_context."""
+ key_manager = castellan_key_manager.API(self.conf)
+
+ old_encryption_key_id = volume.encryption_key_id
+ secret = key_manager.get(src_context, old_encryption_key_id)
+ try:
+ new_encryption_key_id = key_manager.store(dst_context, secret)
+ except castellan_exception.KeyManagerError:
+ with excutils.save_and_reraise_exception():
+ LOG.error("Failed to transfer the encryption key. This is "
+ "likely because the cinder service lacks the "
+ "privilege to create secrets.")
+
+ volume.encryption_key_id = new_encryption_key_id
+ volume.save()
+
+ snapshots = objects.snapshot.SnapshotList.get_all_for_volume(
+ context.get_admin_context(),
+ volume.id)
+ for snapshot in snapshots:
+ snapshot.encryption_key_id = new_encryption_key_id
+ snapshot.save()
+
+ key_manager.delete(src_context, old_encryption_key_id)
+
+
+def transfer_create(context: context.RequestContext,
+ volume: objects.volume.Volume,
+ conf: cfg.ConfigOpts = CONF) -> None:
+ """Transfer the key from the owner to the cinder service."""
+ LOG.info("Initiating transfer of encryption key for volume %s", volume.id)
+ key_transfer = KeyTransfer(conf)
+ key_transfer.transfer_key(volume,
+ src_context=context,
+ dst_context=key_transfer.service_context)
+
+
+def transfer_accept(context: context.RequestContext,
+ volume: objects.volume.Volume,
+ conf: cfg.ConfigOpts = CONF) -> None:
+ """Transfer the key from the cinder service to the recipient."""
+ LOG.info("Accepting transfer of encryption key for volume %s", volume.id)
+ key_transfer = KeyTransfer(conf)
+ key_transfer.transfer_key(volume,
+ src_context=key_transfer.service_context,
+ dst_context=context)
+
+
+def transfer_delete(context: context.RequestContext,
+ volume: objects.volume.Volume,
+ conf: cfg.ConfigOpts = CONF) -> None:
+ """Transfer the key from the cinder service back to the owner."""
+ LOG.info("Cancelling transfer of encryption key for volume %s", volume.id)
+ key_transfer = KeyTransfer(conf)
+ key_transfer.transfer_key(volume,
+ src_context=key_transfer.service_context,
+ dst_context=context)
diff --git a/cinder/opts.py b/cinder/opts.py
index 2579d523f..0cc5df5c6 100644
--- a/cinder/opts.py
+++ b/cinder/opts.py
@@ -348,6 +348,7 @@ def list_opts():
cinder_volume_drivers_fujitsu_eternus_dx_eternusdxcommon.
FJ_ETERNUS_DX_OPT_opts,
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS,
+ cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PORT_OPTS,
cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS,
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
diff --git a/cinder/scheduler/filters/capacity_filter.py b/cinder/scheduler/filters/capacity_filter.py
index d1eb7fb7b..bd4cce839 100644
--- a/cinder/scheduler/filters/capacity_filter.py
+++ b/cinder/scheduler/filters/capacity_filter.py
@@ -17,11 +17,10 @@
# under the License.
-import math
-
from oslo_log import log as logging
from cinder.scheduler import filters
+from cinder import utils
LOG = logging.getLogger(__name__)
@@ -103,10 +102,6 @@ class CapacityFilter(filters.BaseBackendFilter):
"grouping_name": backend_state.backend_id})
return False
- # Calculate how much free space is left after taking into account
- # the reserved space.
- free = free_space - math.floor(total * reserved)
-
# NOTE(xyang): If 'provisioning:type' is 'thick' in extra_specs,
# we will not use max_over_subscription_ratio and
# provisioned_capacity_gb to determine whether a volume can be
@@ -118,18 +113,45 @@ class CapacityFilter(filters.BaseBackendFilter):
if provision_type == 'thick':
thin = False
+ thin_support = backend_state.thin_provisioning_support
+ if thin_support:
+ max_over_subscription_ratio = (
+ backend_state.max_over_subscription_ratio
+ )
+ else:
+ max_over_subscription_ratio = 1
+
+ # NOTE(hemna): this takes into consideration all major factors
+ # including reserved space, free_space (reported by driver),
+ # and over subscription ratio.
+ factors = utils.calculate_capacity_factors(
+ total_space,
+ free_space,
+ backend_state.provisioned_capacity_gb,
+ thin_support,
+ max_over_subscription_ratio,
+ backend_state.reserved_percentage,
+ thin
+ )
+ virtual_free_space = factors["virtual_free_capacity"]
+ LOG.debug("Storage Capacity factors %s", factors)
+
msg_args = {"grouping_name": backend_state.backend_id,
"grouping": grouping,
"requested": requested_size,
- "available": free}
+ "available": virtual_free_space}
+
# Only evaluate using max_over_subscription_ratio if
# thin_provisioning_support is True. Check if the ratio of
# provisioned capacity over total capacity has exceeded over
# subscription ratio.
if (thin and backend_state.thin_provisioning_support and
backend_state.max_over_subscription_ratio >= 1):
- provisioned_ratio = ((backend_state.provisioned_capacity_gb +
- requested_size) / total)
+ provisioned_ratio = (
+ (backend_state.provisioned_capacity_gb + requested_size) / (
+ factors["total_available_capacity"]
+ )
+ )
LOG.debug("Checking provisioning for request of %s GB. "
"Backend: %s", requested_size, backend_state)
if provisioned_ratio > backend_state.max_over_subscription_ratio:
@@ -149,14 +171,12 @@ class CapacityFilter(filters.BaseBackendFilter):
else:
# Thin provisioning is enabled and projected over-subscription
# ratio does not exceed max_over_subscription_ratio. The host
- # passes if "adjusted" free virtual capacity is enough to
+ # passes if virtual free capacity is enough to
# accommodate the volume. Adjusted free virtual capacity is
# the currently available free capacity (taking into account
# of reserved space) which we can over-subscribe.
- adjusted_free_virtual = (
- free * backend_state.max_over_subscription_ratio)
- msg_args["available"] = adjusted_free_virtual
- res = adjusted_free_virtual >= requested_size
+ msg_args["available"] = virtual_free_space
+ res = virtual_free_space >= requested_size
if not res:
LOG.warning("Insufficient free virtual space "
"(%(available)sGB) to accommodate thin "
@@ -179,7 +199,7 @@ class CapacityFilter(filters.BaseBackendFilter):
"grouping_name": backend_state.backend_id})
return False
- if free < requested_size:
+ if virtual_free_space < requested_size:
LOG.warning("Insufficient free space for volume creation "
"on %(grouping)s %(grouping_name)s (requested / "
"avail): %(requested)s/%(available)s",
diff --git a/cinder/tests/unit/__init__.py b/cinder/tests/unit/__init__.py
index cff971fd9..0b1b8be93 100644
--- a/cinder/tests/unit/__init__.py
+++ b/cinder/tests/unit/__init__.py
@@ -22,11 +22,36 @@
:platform: Unix
"""
+import os
+import sys
+
import eventlet
+# Monkey patching must go before the oslo.log import, otherwise
+# oslo.context will not use greenthread thread local and all greenthreads
+# will share the same context.
+if os.name == 'nt':
+ # eventlet monkey patching the os module causes subprocess.Popen to fail
+ # on Windows when using pipes due to missing non-blocking IO support.
+ eventlet.monkey_patch(os=False)
+else:
+ eventlet.monkey_patch()
+# Monkey patch the original current_thread to use the up-to-date _active
+# global variable. See https://bugs.launchpad.net/bugs/1863021 and
+# https://github.com/eventlet/eventlet/issues/592
+import __original_module_threading as orig_threading # pylint: disable=E0401
+import threading # noqa
+orig_threading.current_thread.__globals__['_active'] = threading._active
+
+from oslo_config import cfg
+from oslo_reports import guru_meditation_report as gmr
+from oslo_reports import opts as gmr_opts
+from oslo_service import loopingcall
from cinder import objects
+from cinder.tests.unit import utils as test_utils
+from cinder import version
-eventlet.monkey_patch()
+CONF = cfg.CONF
# NOTE(alaski): Make sure this is done after eventlet monkey patching otherwise
# the threading.local() store used in oslo_messaging will be initialized to
@@ -36,3 +61,18 @@ eventlet.monkey_patch()
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
+
+gmr_opts.set_defaults(CONF)
+gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
+
+# Keep track of looping calls
+looping_call_tracker = test_utils.InstanceTracker(loopingcall.LoopingCallBase)
+
+
+def stop_looping_calls():
+ for loop in looping_call_tracker.instances:
+ try:
+ loop.stop()
+ except Exception:
+ sys.stderr.write(f'Error stopping loop call {loop}\n')
+ looping_call_tracker.clear()
diff --git a/cinder/tests/unit/api/v3/test_volume_transfer.py b/cinder/tests/unit/api/v3/test_volume_transfer.py
index d12d77b68..341367ce7 100644
--- a/cinder/tests/unit/api/v3/test_volume_transfer.py
+++ b/cinder/tests/unit/api/v3/test_volume_transfer.py
@@ -27,12 +27,14 @@ from cinder.api import microversions as mv
from cinder.api.v3 import volume_transfer as volume_transfer_v3
from cinder import context
from cinder import db
+from cinder import exception
from cinder.objects import fields
from cinder import quota
from cinder.tests.unit.api import fakes
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
+from cinder.tests.unit import utils as test_utils
import cinder.transfer
@@ -358,3 +360,145 @@ class VolumeTransferAPITestCase357(VolumeTransferAPITestCase):
microversion = mv.TRANSFER_WITH_HISTORY
DETAIL_LEN = 9
expect_transfer_history = True
+
+
+@ddt.ddt
+class VolumeTransferEncryptedAPITestCase(test.TestCase):
+ # NOTE:
+ # - The TRANSFER_ENCRYPTED_VOLUME microversion is only relevant when
+ # creating a volume transfer. The microversion specified when accepting
+ # or deleting a transfer is not relevant.
+ # - The tests take advantage of the fact that a project_id is no longer
+ # required in API URLs.
+
+ def setUp(self):
+ super(VolumeTransferEncryptedAPITestCase, self).setUp()
+ self.volume_transfer_api = cinder.transfer.API()
+ self.controller = volume_transfer_v3.VolumeTransferController()
+ self.user_ctxt = context.RequestContext(
+ fake.USER_ID, fake.PROJECT_ID, auth_token=True)
+ self.admin_ctxt = context.get_admin_context()
+
+ def _create_volume(self, encryption_key_id):
+ vol_type = test_utils.create_volume_type(self.admin_ctxt,
+ name='fake_vol_type',
+ testcase_instance=self)
+ volume = test_utils.create_volume(self.user_ctxt,
+ volume_type_id=vol_type.id,
+ testcase_instance=self,
+ encryption_key_id=encryption_key_id)
+ return volume
+
+ @mock.patch('cinder.keymgr.transfer.transfer_create')
+ def _create_transfer(self, volume_id, mock_key_transfer_create):
+ transfer = self.volume_transfer_api.create(self.admin_ctxt,
+ volume_id,
+ display_name='test',
+ allow_encrypted=True)
+ return transfer
+
+ @ddt.data(None, fake.ENCRYPTION_KEY_ID)
+ @mock.patch('cinder.keymgr.transfer.transfer_create')
+ def test_create_transfer(self,
+ encryption_key_id,
+ mock_key_transfer_create):
+ volume = self._create_volume(encryption_key_id)
+ body = {"transfer": {"name": "transfer1",
+ "volume_id": volume.id}}
+
+ req = webob.Request.blank('/v3/volume-transfers')
+ req.method = 'POST'
+ req.headers = mv.get_mv_header(mv.TRANSFER_ENCRYPTED_VOLUME)
+ req.headers['Content-Type'] = 'application/json'
+ req.body = jsonutils.dump_as_bytes(body)
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_ctxt))
+
+ self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
+
+ call_count = 0 if encryption_key_id is None else 1
+ self.assertEqual(mock_key_transfer_create.call_count, call_count)
+
+ def test_create_transfer_encrypted_volume_not_supported(self):
+ volume = self._create_volume(fake.ENCRYPTION_KEY_ID)
+ body = {"transfer": {"name": "transfer1",
+ "volume_id": volume.id}}
+
+ req = webob.Request.blank('/v3/volume-transfers')
+ req.method = 'POST'
+ req.headers = mv.get_mv_header(
+ mv.get_prior_version(mv.TRANSFER_ENCRYPTED_VOLUME))
+ req.headers['Content-Type'] = 'application/json'
+ req.body = jsonutils.dump_as_bytes(body)
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_ctxt))
+
+ res_dict = jsonutils.loads(res.body)
+
+ self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int)
+ self.assertEqual(('Invalid volume: '
+ 'transferring encrypted volume is not supported'),
+ res_dict['badRequest']['message'])
+
+ @mock.patch('cinder.keymgr.transfer.transfer_create',
+ side_effect=exception.KeyManagerError('whoops!'))
+ def test_create_transfer_key_transfer_failed(self,
+ mock_key_transfer_create):
+ volume = self._create_volume(fake.ENCRYPTION_KEY_ID)
+ body = {"transfer": {"name": "transfer1",
+ "volume_id": volume.id}}
+
+ req = webob.Request.blank('/v3/volume-transfers')
+ req.method = 'POST'
+ req.headers = mv.get_mv_header(mv.TRANSFER_ENCRYPTED_VOLUME)
+ req.headers['Content-Type'] = 'application/json'
+ req.body = jsonutils.dump_as_bytes(body)
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_ctxt))
+
+ self.assertEqual(HTTPStatus.INTERNAL_SERVER_ERROR, res.status_int)
+
+ @ddt.data(None, fake.ENCRYPTION_KEY_ID)
+ @mock.patch('cinder.keymgr.transfer.transfer_accept')
+ @mock.patch('cinder.volume.api.API.accept_transfer')
+ def test_accept_transfer(self,
+ encryption_key_id,
+ mock_volume_accept_transfer,
+ mock_key_transfer_accept):
+ volume = self._create_volume(encryption_key_id)
+ transfer = self._create_transfer(volume.id)
+
+ body = {"accept": {"auth_key": transfer['auth_key']}}
+
+ req = webob.Request.blank('/v3/volume-transfers/%s/accept' % (
+ transfer['id']))
+ req.method = 'POST'
+ req.headers['Content-Type'] = 'application/json'
+ req.body = jsonutils.dump_as_bytes(body)
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_ctxt))
+
+ self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
+
+ call_count = 0 if encryption_key_id is None else 1
+ self.assertEqual(mock_key_transfer_accept.call_count, call_count)
+
+ @ddt.data(None, fake.ENCRYPTION_KEY_ID)
+ @mock.patch('cinder.keymgr.transfer.transfer_delete')
+ def test_delete_transfer(self,
+ encryption_key_id,
+ mock_key_transfer_delete):
+ volume = self._create_volume(encryption_key_id)
+ transfer = self._create_transfer(volume.id)
+
+ req = webob.Request.blank('/v3/volume-transfers/%s' % (
+ transfer['id']))
+ req.method = 'DELETE'
+ req.headers['Content-Type'] = 'application/json'
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_ctxt))
+
+ self.assertEqual(HTTPStatus.ACCEPTED, res.status_int)
+
+ call_count = 0 if encryption_key_id is None else 1
+ self.assertEqual(mock_key_transfer_delete.call_count, call_count)
diff --git a/cinder/tests/unit/api/views/test_versions.py b/cinder/tests/unit/api/views/test_versions.py
index ea64f38af..065a519dd 100644
--- a/cinder/tests/unit/api/views/test_versions.py
+++ b/cinder/tests/unit/api/views/test_versions.py
@@ -83,11 +83,12 @@ class ViewBuilderTestCase(test.TestCase):
'_build_links',
return_value=FAKE_LINKS)
- result = self._get_builder().build_versions(FAKE_VERSIONS)
+ fake_versions = copy.deepcopy(FAKE_VERSIONS)
+ result = self._get_builder().build_versions(fake_versions)
result_no_slash = self._get_builder_no_slash().build_versions(
- FAKE_VERSIONS)
+ fake_versions)
- expected = {'versions': list(FAKE_VERSIONS.values())}
+ expected = {'versions': list(fake_versions.values())}
expected['versions'][0]['links'] = FAKE_LINKS
self.assertEqual(expected, result)
diff --git a/cinder/tests/unit/backup/test_backup.py b/cinder/tests/unit/backup/test_backup.py
index 29ee7983c..eca450637 100644
--- a/cinder/tests/unit/backup/test_backup.py
+++ b/cinder/tests/unit/backup/test_backup.py
@@ -41,6 +41,7 @@ from cinder import quota
from cinder.tests import fake_driver
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
+from cinder.tests.unit import known_issues as issues
from cinder.tests.unit import test
from cinder.tests.unit import utils
from cinder.volume import rpcapi as volume_rpcapi
@@ -1879,6 +1880,7 @@ class BackupTestCase(BaseBackupTest):
backup = self._create_backup_db_entry(volume_id=vol_id)
self.assertFalse(backup.has_dependent_backups)
+ @test.testtools.skipIf(issues.TPOOL_KILLALL_ISSUE, 'tpool.killall bug')
def test_default_tpool_size(self):
"""Test we can set custom tpool size."""
tpool._nthreads = 20
@@ -1889,6 +1891,7 @@ class BackupTestCase(BaseBackupTest):
self.assertEqual(60, tpool._nthreads)
self.assertListEqual([], tpool._threads)
+ @test.testtools.skipIf(issues.TPOOL_KILLALL_ISSUE, 'tpool.killall bug')
def test_tpool_size(self):
"""Test we can set custom tpool size."""
self.assertNotEqual(100, tpool._nthreads)
diff --git a/cinder/tests/unit/compute/test_nova.py b/cinder/tests/unit/compute/test_nova.py
index cc7646994..c788e124f 100644
--- a/cinder/tests/unit/compute/test_nova.py
+++ b/cinder/tests/unit/compute/test_nova.py
@@ -295,7 +295,7 @@ class NovaApiTestCase(test.TestCase):
mock_novaclient.assert_called_once_with(self.ctx,
privileged_user=True,
- api_version='2.91')
+ api_version='2.93')
mock_create_event.assert_called_once_with([
{'name': 'volume-reimaged',
'server_uuid': 'server-id-1',
@@ -323,7 +323,7 @@ class NovaApiTestCase(test.TestCase):
mock_novaclient.assert_called_once_with(self.ctx,
privileged_user=True,
- api_version='2.91')
+ api_version='2.93')
mock_create.assert_called_once_with(
self.ctx,
message_field.Action.REIMAGE_VOLUME,
diff --git a/cinder/tests/unit/db/test_volume_type.py b/cinder/tests/unit/db/test_volume_type.py
index ba3d83548..a1f49c699 100644
--- a/cinder/tests/unit/db/test_volume_type.py
+++ b/cinder/tests/unit/db/test_volume_type.py
@@ -68,6 +68,25 @@ class VolumeTypeTestCase(test.TestCase):
db.group_destroy(self.ctxt, group['id'])
volume_types.destroy(self.ctxt, volume_type['id'])
+ def test_volume_type_mark_in_use_exists(self):
+ volume_type = db.volume_type_create(
+ self.ctxt,
+ {'name': 'fake volume type'},
+ )
+ group = db.group_create(self.ctxt, {})
+ db.group_volume_type_mapping_create(
+ self.ctxt,
+ group['id'],
+ volume_type['id'],
+ )
+ self.assertRaises(
+ exception.GroupVolumeTypeMappingExists,
+ db.group_volume_type_mapping_create,
+ self.ctxt,
+ group['id'],
+ volume_type['id'],
+ )
+
def test_volume_type_delete_with_consistencygroups_in_use(self):
volume_type = db.volume_type_create(self.ctxt, {'name':
'fake volume type'})
diff --git a/cinder/tests/unit/keymgr/test_transfer.py b/cinder/tests/unit/keymgr/test_transfer.py
new file mode 100644
index 000000000..2e058c249
--- /dev/null
+++ b/cinder/tests/unit/keymgr/test_transfer.py
@@ -0,0 +1,178 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Tests for encryption key transfer."""
+
+from unittest import mock
+
+from castellan.common.credentials import keystone_password
+from oslo_config import cfg
+
+from cinder.common import constants
+from cinder import context
+from cinder.keymgr import conf_key_mgr
+from cinder.keymgr import transfer
+from cinder import objects
+from cinder.tests.unit import fake_constants as fake
+from cinder.tests.unit import test
+from cinder.tests.unit import utils as test_utils
+
+CONF = cfg.CONF
+
+ENCRYPTION_SECRET = 'the_secret'
+CINDER_USERNAME = 'cinder'
+CINDER_PASSWORD = 'key_transfer_test'
+
+
+class KeyTransferTestCase(test.TestCase):
+ OLD_ENCRYPTION_KEY_ID = fake.ENCRYPTION_KEY_ID
+ NEW_ENCRYPTION_KEY_ID = fake.ENCRYPTION_KEY2_ID
+
+ key_manager_class = ('castellan.key_manager.barbican_key_manager.'
+ 'BarbicanKeyManager')
+
+ def setUp(self):
+ super(KeyTransferTestCase, self).setUp()
+ self.conf = CONF
+ self.conf.set_override('backend',
+ self.key_manager_class,
+ group='key_manager')
+ self.conf.set_override('username',
+ CINDER_USERNAME,
+ group='keystone_authtoken')
+ self.conf.set_override('password',
+ CINDER_PASSWORD,
+ group='keystone_authtoken')
+
+ self.context = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
+
+ def _create_volume_and_snapshots(self):
+ volume = test_utils.create_volume(
+ self.context,
+ testcase_instance=self,
+ encryption_key_id=self.OLD_ENCRYPTION_KEY_ID)
+
+ _ = test_utils.create_snapshot(
+ self.context,
+ volume.id,
+ display_name='snap_1',
+ testcase_instance=self,
+ encryption_key_id=self.OLD_ENCRYPTION_KEY_ID)
+
+ _ = test_utils.create_snapshot(
+ self.context,
+ volume.id,
+ display_name='snap_2',
+ testcase_instance=self,
+ encryption_key_id=self.OLD_ENCRYPTION_KEY_ID)
+
+ return volume
+
+ def _verify_service_context(self, mocked_call):
+ service_context = mocked_call.call_args.args[0]
+ self.assertIsInstance(service_context,
+ keystone_password.KeystonePassword)
+ self.assertEqual(service_context.username, CINDER_USERNAME)
+ self.assertEqual(service_context.password, CINDER_PASSWORD)
+
+ def _verify_encryption_key_id(self, volume_id, encryption_key_id):
+ volume = objects.Volume.get_by_id(self.context, volume_id)
+ self.assertEqual(volume.encryption_key_id, encryption_key_id)
+
+ snapshots = objects.snapshot.SnapshotList.get_all_for_volume(
+ self.context, volume.id)
+ self.assertEqual(len(snapshots), 2)
+ for snapshot in snapshots:
+ self.assertEqual(snapshot.encryption_key_id, encryption_key_id)
+
+ def _test_transfer_from_user_to_cinder(self, transfer_fn):
+ volume = self._create_volume_and_snapshots()
+ with mock.patch(
+ self.key_manager_class + '.get',
+ return_value=ENCRYPTION_SECRET) as mock_key_get, \
+ mock.patch(
+ self.key_manager_class + '.store',
+ return_value=self.NEW_ENCRYPTION_KEY_ID) as mock_key_store, \
+ mock.patch(
+ self.key_manager_class + '.delete') as mock_key_delete:
+
+ transfer_fn(self.context, volume)
+
+ # Verify the user's context was used to fetch and delete the
+ # volume's current key ID.
+ mock_key_get.assert_called_once_with(
+ self.context, self.OLD_ENCRYPTION_KEY_ID)
+ mock_key_delete.assert_called_once_with(
+ self.context, self.OLD_ENCRYPTION_KEY_ID)
+
+ # Verify the cinder service created the new key ID.
+ mock_key_store.assert_called_once_with(
+ mock.ANY, ENCRYPTION_SECRET)
+ self._verify_service_context(mock_key_store)
+
+ # Verify the volume (and its snaps) reference the new key ID.
+ self._verify_encryption_key_id(volume.id, self.NEW_ENCRYPTION_KEY_ID)
+
+ def _test_transfer_from_cinder_to_user(self, transfer_fn):
+ volume = self._create_volume_and_snapshots()
+ with mock.patch(
+ self.key_manager_class + '.get',
+ return_value=ENCRYPTION_SECRET) as mock_key_get, \
+ mock.patch(
+ self.key_manager_class + '.store',
+ return_value=self.NEW_ENCRYPTION_KEY_ID) as mock_key_store, \
+ mock.patch(
+ self.key_manager_class + '.delete') as mock_key_delete:
+
+ transfer_fn(self.context, volume)
+
+ # Verify the cinder service was used to fetch and delete the
+ # volume's current key ID.
+ mock_key_get.assert_called_once_with(
+ mock.ANY, self.OLD_ENCRYPTION_KEY_ID)
+ self._verify_service_context(mock_key_get)
+
+ mock_key_delete.assert_called_once_with(
+ mock.ANY, self.OLD_ENCRYPTION_KEY_ID)
+ self._verify_service_context(mock_key_delete)
+
+ # Verify the user's context created the new key ID.
+ mock_key_store.assert_called_once_with(
+ self.context, ENCRYPTION_SECRET)
+
+ # Verify the volume (and its snaps) reference the new key ID.
+ self._verify_encryption_key_id(volume.id, self.NEW_ENCRYPTION_KEY_ID)
+
+ def test_transfer_create(self):
+ self._test_transfer_from_user_to_cinder(transfer.transfer_create)
+
+ def test_transfer_accept(self):
+ self._test_transfer_from_cinder_to_user(transfer.transfer_accept)
+
+ def test_transfer_delete(self):
+ self._test_transfer_from_cinder_to_user(transfer.transfer_delete)
+
+
+class KeyTransferFixedKeyTestCase(KeyTransferTestCase):
+ OLD_ENCRYPTION_KEY_ID = constants.FIXED_KEY_ID
+ NEW_ENCRYPTION_KEY_ID = constants.FIXED_KEY_ID
+
+ key_manager_class = 'cinder.keymgr.conf_key_mgr.ConfKeyManager'
+
+ def setUp(self):
+ super(KeyTransferFixedKeyTestCase, self).setUp()
+ self.conf.register_opts(conf_key_mgr.key_mgr_opts, group='key_manager')
+ self.conf.set_override('fixed_key',
+ 'df393fca58657e6dc76a6fea31c3e7e0',
+ group='key_manager')
diff --git a/cinder/tests/unit/known_issues.py b/cinder/tests/unit/known_issues.py
new file mode 100644
index 000000000..9fc2689c5
--- /dev/null
+++ b/cinder/tests/unit/known_issues.py
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# KNOWN ISSUES RUNNING UNIT TESTS
+
+# We've seen tpool.killall method freeze everything. The issue seems to be
+# resolved by calling killall during the cleanup after stopping all remaining
+# looping calls, but we cannot be 100% of it, so we have this flag to quickly
+# disable the cleanup and the tests that would break with the change if
+# necessary.
+# If we find that an stestr child runner is blocking we can trigger the Guru
+# Meditation Report (kill -USR2 <child_pid>) and look if a Green Thread is
+# stuck on tpool.killall.
+TPOOL_KILLALL_ISSUE = False
diff --git a/cinder/tests/unit/policies/test_volume_transfers.py b/cinder/tests/unit/policies/test_volume_transfers.py
index be9115634..e482630c5 100644
--- a/cinder/tests/unit/policies/test_volume_transfers.py
+++ b/cinder/tests/unit/policies/test_volume_transfers.py
@@ -19,7 +19,6 @@ import ddt
from cinder.api.v3 import volume_transfer
from cinder import context
from cinder import exception
-from cinder.objects import volume as volume_obj
from cinder.policies import volume_transfer as vol_transfer_policies
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit.policies import base
@@ -98,19 +97,15 @@ class VolumeTransferPolicyTest(base.BasePolicyTest):
testcase_instance=self)
return volume
- @mock.patch.object(volume_obj.Volume, 'get_by_id')
- def _create_volume_transfer(self, mock_get_vol, volume=None):
+ def _create_volume_transfer(self, volume=None):
if not volume:
volume = self._create_volume()
- mock_get_vol.return_value = volume
return self.volume_transfer_api.create(context.get_admin_context(),
volume.id, 'test-transfer')
@ddt.data(*base.all_users)
- @mock.patch.object(volume_obj.Volume, 'get_by_id')
- def test_create_volume_transfer_policy(self, user_id, mock_get_vol):
+ def test_create_volume_transfer_policy(self, user_id):
volume = self._create_volume()
- mock_get_vol.return_value = volume
rule_name = vol_transfer_policies.CREATE_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
@@ -126,8 +121,7 @@ class VolumeTransferPolicyTest(base.BasePolicyTest):
body=body)
@ddt.data(*base.all_users)
- @mock.patch.object(volume_obj.Volume, 'get_by_id')
- def test_get_volume_transfer_policy(self, user_id, mock_get_vol):
+ def test_get_volume_transfer_policy(self, user_id):
vol_transfer = self._create_volume_transfer()
rule_name = vol_transfer_policies.GET_POLICY
url = '%s/%s' % (self.api_path, vol_transfer['id'])
@@ -176,10 +170,8 @@ class VolumeTransferPolicyTest(base.BasePolicyTest):
self.assertEqual(transfer_count, len(transfers))
@ddt.data(*base.all_users)
- @mock.patch.object(volume_obj.Volume, 'get_by_id')
@mock.patch.object(volume_utils, 'notify_about_volume_usage')
- def test_delete_volume_transfer_policy(self, user_id, mock_get_vol,
- mock_notify):
+ def test_delete_volume_transfer_policy(self, user_id, mock_notify):
vol_transfer = self._create_volume_transfer()
rule_name = vol_transfer_policies.DELETE_POLICY
url = '%s/%s' % (self.api_path, vol_transfer['id'])
@@ -196,13 +188,10 @@ class VolumeTransferPolicyTest(base.BasePolicyTest):
@ddt.data(*base.all_users)
@mock.patch('cinder.transfer.api.QUOTAS')
- @mock.patch.object(volume_obj.Volume, 'get_by_id')
@mock.patch.object(volume_utils, 'notify_about_volume_usage')
def test_accept_volume_transfer_policy(self, user_id, mock_notify,
- mock_get_vol, mock_quotas):
- volume = self._create_volume()
- vol_transfer = self._create_volume_transfer(volume=volume)
- mock_get_vol.return_value = volume
+ mock_quotas):
+ vol_transfer = self._create_volume_transfer()
rule_name = vol_transfer_policies.ACCEPT_POLICY
url = '%s/%s/accept' % (self.api_path, vol_transfer['id'])
req = fake_api.HTTPRequest.blank(url)
diff --git a/cinder/tests/unit/scheduler/test_capacity_weigher.py b/cinder/tests/unit/scheduler/test_capacity_weigher.py
index 8a90e94ee..81259f373 100644
--- a/cinder/tests/unit/scheduler/test_capacity_weigher.py
+++ b/cinder/tests/unit/scheduler/test_capacity_weigher.py
@@ -114,7 +114,7 @@ class CapacityWeigherTestCase(test.TestCase):
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
'winner': 'host4'},
{'volume_type': {'extra_specs': {'provisioning:type': 'thick'}},
- 'winner': 'host2'},
+ 'winner': 'host4'},
{'volume_type': {'extra_specs': {}},
'winner': 'host4'},
{'volume_type': {},
diff --git a/cinder/tests/unit/scheduler/test_host_filters.py b/cinder/tests/unit/scheduler/test_host_filters.py
index 815a1c611..767b649e9 100644
--- a/cinder/tests/unit/scheduler/test_host_filters.py
+++ b/cinder/tests/unit/scheduler/test_host_filters.py
@@ -99,7 +99,7 @@ class CapacityFilterTestCase(BackendFiltersTestCase):
def test_filter_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
- filter_properties = {'size': 100,
+ filter_properties = {'size': 121,
'request_spec': {'volume_id': fake.VOLUME_ID}}
service = {'disabled': False}
host = fakes.FakeBackendState('host1',
@@ -282,7 +282,7 @@ class CapacityFilterTestCase(BackendFiltersTestCase):
def test_filter_thin_true_passes2(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
- filter_properties = {'size': 3000,
+ filter_properties = {'size': 2400,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
@@ -462,7 +462,7 @@ class CapacityFilterTestCase(BackendFiltersTestCase):
def test_filter_reserved_thin_thick_true_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['CapacityFilter']()
- filter_properties = {'size': 100,
+ filter_properties = {'size': 151,
'capabilities:thin_provisioning_support':
'<is> True',
'capabilities:thick_provisioning_support':
diff --git a/cinder/tests/unit/scheduler/test_host_manager.py b/cinder/tests/unit/scheduler/test_host_manager.py
index c61346a8e..c1fd829ef 100644
--- a/cinder/tests/unit/scheduler/test_host_manager.py
+++ b/cinder/tests/unit/scheduler/test_host_manager.py
@@ -1029,7 +1029,7 @@ class HostManagerTestCase(test.TestCase):
"free": 18.01,
"allocated": 2.0,
"provisioned": 2.0,
- "virtual_free": 37.02,
+ "virtual_free": 36.02,
"reported_at": 40000},
{"name_to_id": 'host1@backend1',
"type": "backend",
@@ -1037,7 +1037,7 @@ class HostManagerTestCase(test.TestCase):
"free": 46.02,
"allocated": 4.0,
"provisioned": 4.0,
- "virtual_free": 64.03,
+ "virtual_free": 63.03,
"reported_at": 40000}]
expected2 = [
@@ -1055,7 +1055,7 @@ class HostManagerTestCase(test.TestCase):
"free": 46.02,
"allocated": 4.0,
"provisioned": 4.0,
- "virtual_free": 95.04,
+ "virtual_free": 94.04,
"reported_at": 40000}]
def sort_func(data):
diff --git a/cinder/tests/unit/test.py b/cinder/tests/unit/test.py
index 21f2d1eb8..66659b275 100644
--- a/cinder/tests/unit/test.py
+++ b/cinder/tests/unit/test.py
@@ -51,8 +51,10 @@ from cinder.objects import base as objects_base
from cinder import rpc
from cinder import service
from cinder.tests import fixtures as cinder_fixtures
+from cinder.tests import unit as cinder_unit
from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_notifier
+from cinder.tests.unit import known_issues as issues
from cinder.volume import configuration
from cinder.volume import driver as vol_driver
from cinder.volume import volume_types
@@ -290,11 +292,6 @@ class TestCase(testtools.TestCase):
coordination.COORDINATOR.start()
self.addCleanup(coordination.COORDINATOR.stop)
- # Ensure we have the default tpool size value and we don't carry
- # threads from other test runs.
- tpool.killall()
- tpool._nthreads = 20
-
# NOTE(mikal): make sure we don't load a privsep helper accidentally
self.useFixture(cinder_fixtures.PrivsepNoHelperFixture())
@@ -333,19 +330,28 @@ class TestCase(testtools.TestCase):
def _common_cleanup(self):
"""Runs after each test method to tear down test environment."""
- # Kill any services
+ # Stop any services (this stops RPC handlers)
for x in self._services:
try:
- x.kill()
+ x.stop()
except Exception:
pass
+ # Stop any looping call that has not yet been stopped
+ cinder_unit.stop_looping_calls()
+
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__ if k[0] != '_']:
del self.__dict__[key]
+ if not issues.TPOOL_KILLALL_ISSUE:
+ # Ensure we have the default tpool size value and we don't carry
+ # threads from other test runs.
+ tpool.killall()
+ tpool._nthreads = 20
+
def override_config(self, name, override, group=None):
"""Cleanly override CONF variables."""
CONF.set_override(name, override, group)
diff --git a/cinder/tests/unit/test_cmd.py b/cinder/tests/unit/test_cmd.py
index 0813666f8..610d9d22e 100644
--- a/cinder/tests/unit/test_cmd.py
+++ b/cinder/tests/unit/test_cmd.py
@@ -1152,7 +1152,7 @@ class TestCinderManageCmd(test.TestCase):
script_name = 'cinder-manage'
sys.argv = [script_name, 'cluster', 'remove', 'abinary', 'acluster']
- cinder_manage.CONF = cfg.ConfigOpts()
+ self.mock_object(cinder_manage, 'CONF', cfg.ConfigOpts())
cinder_manage.main()
expected_argument = (['cluster_name'],
@@ -1183,7 +1183,7 @@ class TestCinderManageCmd(test.TestCase):
def test_main_missing_action(self):
sys.argv = ['cinder-manage', 'backup']
- cinder_manage.CONF = cfg.ConfigOpts()
+ self.mock_object(cinder_manage, 'CONF', cfg.ConfigOpts())
stdout = io.StringIO()
with mock.patch('sys.stdout', new=stdout):
diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py
index 34180cde3..caccd2ffe 100644
--- a/cinder/tests/unit/test_db_api.py
+++ b/cinder/tests/unit/test_db_api.py
@@ -2373,20 +2373,18 @@ class DBAPICgsnapshotTestCase(BaseTest):
class DBAPIVolumeTypeTestCase(BaseTest):
-
"""Tests for the db.api.volume_type_* methods."""
- def setUp(self):
- self.ctxt = context.get_admin_context()
- super(DBAPIVolumeTypeTestCase, self).setUp()
-
- def test_volume_type_create_exists(self):
- self.assertRaises(exception.VolumeTypeExists,
- db.volume_type_create,
- self.ctxt,
- {'name': 'n2', 'id': self.vt['id']})
+ def test_volume_type_create__exists(self):
+ vt = db.volume_type_create(self.ctxt, {'name': 'n2'})
+ self.assertRaises(
+ exception.VolumeTypeExists,
+ db.volume_type_create,
+ self.ctxt,
+ {'name': 'n2', 'id': vt['id']},
+ )
- def test_volume_type_access_remove(self):
+ def test_volume_type_access_add_remove(self):
vt = db.volume_type_create(self.ctxt, {'name': 'n2'})
db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project')
vtas = db.volume_type_access_get_all(self.ctxt, vt['id'])
@@ -2395,7 +2393,20 @@ class DBAPIVolumeTypeTestCase(BaseTest):
vtas = db.volume_type_access_get_all(self.ctxt, vt['id'])
self.assertEqual(0, len(vtas))
- def test_volume_type_access_remove_high_id(self):
+ def test_volume_type_access_add__exists(self):
+ vt = db.volume_type_create(self.ctxt, {'name': 'n2'})
+ db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project')
+ vtas = db.volume_type_access_get_all(self.ctxt, vt['id'])
+ self.assertEqual(1, len(vtas))
+ self.assertRaises(
+ exception.VolumeTypeAccessExists,
+ db.volume_type_access_add,
+ self.ctxt,
+ vt['id'],
+ 'fake_project',
+ )
+
+ def test_volume_type_access_remove__high_id(self):
vt = db.volume_type_create(self.ctxt, {'name': 'n2'})
vta = db.volume_type_access_add(self.ctxt, vt['id'], 'fake_project')
vtas = db.volume_type_access_get_all(self.ctxt, vt['id'])
@@ -3343,22 +3354,35 @@ class DBAPIDriverInitiatorDataTestCase(BaseTest):
initiator = 'iqn.1993-08.org.debian:01:222'
namespace = 'test_ns'
- def _test_insert(self, key, value, expected_result=True):
- result = db.driver_initiator_data_insert_by_key(
- self.ctxt, self.initiator, self.namespace, key, value)
- self.assertEqual(expected_result, result)
+ def test_insert(self):
+ key = 'key1'
+ value = 'foo'
- data = db.driver_initiator_data_get(self.ctxt, self.initiator,
- self.namespace)
+ db.driver_initiator_data_insert_by_key(
+ self.ctxt, self.initiator, self.namespace, key, value,
+ )
+ data = db.driver_initiator_data_get(
+ self.ctxt, self.initiator, self.namespace,
+ )
self.assertEqual(data[0].key, key)
self.assertEqual(data[0].value, value)
- def test_insert(self):
- self._test_insert('key1', 'foo')
-
def test_insert_already_exists(self):
- self._test_insert('key2', 'bar')
- self._test_insert('key2', 'bar', expected_result=False)
+ key = 'key1'
+ value = 'foo'
+
+ db.driver_initiator_data_insert_by_key(
+ self.ctxt, self.initiator, self.namespace, key, value,
+ )
+ self.assertRaises(
+ exception.DriverInitiatorDataExists,
+ db.driver_initiator_data_insert_by_key,
+ self.ctxt,
+ self.initiator,
+ self.namespace,
+ key,
+ value,
+ )
@ddt.ddt
@@ -3720,7 +3744,40 @@ class DBAPIBackendTestCase(BaseTest):
@ddt.ddt
-class DBAPIGroupTestCase(BaseTest):
+class DBAPIGroupTypeTestCase(BaseTest):
+ """Tests for the db.api.group_type_* methods."""
+
+ def test_group_type_create__exists(self):
+ gt = db.group_type_create(self.ctxt, {'name': 'n2'})
+ self.assertRaises(
+ exception.GroupTypeExists,
+ db.group_type_create,
+ self.ctxt,
+ {'name': gt['name'], 'id': gt['id']},
+ )
+
+ def test_volume_type_access_add_remove(self):
+ gt = db.group_type_create(self.ctxt, {'name': 'n2'})
+ db.group_type_access_add(self.ctxt, gt['id'], 'fake_project')
+ gtas = db.group_type_access_get_all(self.ctxt, gt['id'])
+ self.assertEqual(1, len(gtas))
+ db.group_type_access_remove(self.ctxt, gt['id'], 'fake_project')
+ gtas = db.group_type_access_get_all(self.ctxt, gt['id'])
+ self.assertEqual(0, len(gtas))
+
+ def test_group_type_access_add__exists(self):
+ gt = db.group_type_create(self.ctxt, {'name': 'my_group_type'})
+ db.group_type_access_add(self.ctxt, gt['id'], 'fake_project')
+ gtas = db.group_type_access_get_all(self.ctxt, gt['id'])
+ self.assertEqual(1, len(gtas))
+ self.assertRaises(
+ exception.GroupTypeAccessExists,
+ db.group_type_access_add,
+ self.ctxt,
+ gt['id'],
+ 'fake_project',
+ )
+
def test_group_get_all_by_host(self):
grp_type = db.group_type_create(self.ctxt, {'name': 'my_group_type'})
groups = []
diff --git a/cinder/tests/unit/test_utils.py b/cinder/tests/unit/test_utils.py
index e41841792..6676f2181 100644
--- a/cinder/tests/unit/test_utils.py
+++ b/cinder/tests/unit/test_utils.py
@@ -1083,10 +1083,10 @@ class TestCalculateVirtualFree(test.TestCase):
'is_thin_lun': False, 'expected': 27.01},
{'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0,
'thin_support': True, 'thick_support': False,
- 'is_thin_lun': True, 'expected': 37.02},
+ 'is_thin_lun': True, 'expected': 36.02},
{'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0,
'thin_support': True, 'thick_support': True,
- 'is_thin_lun': True, 'expected': 37.02},
+ 'is_thin_lun': True, 'expected': 36.02},
{'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 2.0,
'thin_support': True, 'thick_support': True,
'is_thin_lun': False, 'expected': 27.01},
@@ -1114,6 +1114,98 @@ class TestCalculateVirtualFree(test.TestCase):
self.assertEqual(expected, free_capacity)
+ @ddt.data(
+ {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 1.0,
+ 'thin_support': False, 'thick_support': True,
+ 'is_thin_lun': False, 'reserved_percentage': 5,
+ 'expected_total_capacity': 30.01,
+ 'expected_reserved_capacity': 1,
+ 'expected_free_capacity': 28.01,
+ 'expected_total_available_capacity': 29.01,
+ 'expected_virtual_free': 27.01,
+ 'expected_free_percent': 93.11,
+ 'expected_provisioned_type': 'thick',
+ 'expected_provisioned_ratio': 0.07},
+ {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0,
+ 'thin_support': True, 'thick_support': False,
+ 'is_thin_lun': True, 'reserved_percentage': 10,
+ 'expected_total_capacity': 20.01,
+ 'expected_reserved_capacity': 2,
+ 'expected_free_capacity': 18.01,
+ 'expected_total_available_capacity': 36.02,
+ 'expected_virtual_free': 34.02,
+ 'expected_free_percent': 94.45,
+ 'expected_provisioned_type': 'thin',
+ 'expected_provisioned_ratio': 0.06},
+ {'total': 20.01, 'free': 18.01, 'provisioned': 2.0, 'max_ratio': 2.0,
+ 'thin_support': True, 'thick_support': True,
+ 'is_thin_lun': True, 'reserved_percentage': 20,
+ 'expected_total_capacity': 20.01,
+ 'expected_reserved_capacity': 4,
+ 'expected_free_capacity': 18.01,
+ 'expected_total_available_capacity': 32.02,
+ 'expected_virtual_free': 30.02,
+ 'expected_free_percent': 93.75,
+ 'expected_provisioned_type': 'thin',
+ 'expected_provisioned_ratio': 0.06},
+ {'total': 30.01, 'free': 28.01, 'provisioned': 2.0, 'max_ratio': 2.0,
+ 'thin_support': True, 'thick_support': True,
+ 'is_thin_lun': False, 'reserved_percentage': 10,
+ 'expected_total_capacity': 30.01,
+ 'expected_reserved_capacity': 3,
+ 'expected_free_capacity': 28.01,
+ 'expected_total_available_capacity': 27.01,
+ 'expected_virtual_free': 25.01,
+ 'expected_free_percent': 92.6,
+ 'expected_provisioned_type': 'thick',
+ 'expected_provisioned_ratio': 0.07},
+ )
+ @ddt.unpack
+ def test_utils_calculate_capacity_factors(
+ self, total, free, provisioned, max_ratio, thin_support,
+ thick_support, is_thin_lun, reserved_percentage,
+ expected_total_capacity,
+ expected_reserved_capacity,
+ expected_free_capacity,
+ expected_total_available_capacity,
+ expected_virtual_free,
+ expected_free_percent,
+ expected_provisioned_type,
+ expected_provisioned_ratio):
+ host_stat = {'total_capacity_gb': total,
+ 'free_capacity_gb': free,
+ 'provisioned_capacity_gb': provisioned,
+ 'max_over_subscription_ratio': max_ratio,
+ 'thin_provisioning_support': thin_support,
+ 'thick_provisioning_support': thick_support,
+ 'reserved_percentage': reserved_percentage}
+
+ factors = utils.calculate_capacity_factors(
+ host_stat['total_capacity_gb'],
+ host_stat['free_capacity_gb'],
+ host_stat['provisioned_capacity_gb'],
+ host_stat['thin_provisioning_support'],
+ host_stat['max_over_subscription_ratio'],
+ host_stat['reserved_percentage'],
+ is_thin_lun)
+
+ self.assertEqual(expected_total_capacity,
+ factors['total_capacity'])
+ self.assertEqual(expected_reserved_capacity,
+ factors['reserved_capacity'])
+ self.assertEqual(expected_free_capacity,
+ factors['free_capacity'])
+ self.assertEqual(expected_total_available_capacity,
+ factors['total_available_capacity'])
+ self.assertEqual(expected_virtual_free,
+ factors['virtual_free_capacity'])
+ self.assertEqual(expected_free_percent,
+ factors['free_percent'])
+ self.assertEqual(expected_provisioned_type,
+ factors['provisioned_type'])
+ self.assertEqual(expected_provisioned_ratio,
+ factors['provisioned_ratio'])
+
class Comparable(utils.ComparableMixin):
def __init__(self, value):
diff --git a/cinder/tests/unit/utils.py b/cinder/tests/unit/utils.py
index 955e4b937..163cfba86 100644
--- a/cinder/tests/unit/utils.py
+++ b/cinder/tests/unit/utils.py
@@ -18,6 +18,7 @@ import functools
import socket
from unittest import mock
import uuid
+import weakref
import fixtures
from oslo_config import cfg
@@ -595,3 +596,46 @@ def time_format(at=None):
# Need to handle either iso8601 or python UTC format
date_string += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz)
return date_string
+
+
+class InstanceTracker(object):
+ """Track instances of a given class.
+
+ Going through the Garbage collection objects searching for instances makes
+ tests take up to 12 times longer.
+
+ The slower GC code alternative that was compared was something like:
+
+ for obj in gc.get_objects():
+ try:
+ if isinstance(obj, cls):
+ <do_something>
+ except ReferenceError:
+ pass
+ """
+ def __init__(self, cls):
+ self.cls = cls
+ self.refs = []
+ self.init_method = getattr(cls, '__init__')
+ setattr(cls, '__init__', self._track_instances())
+
+ def _track_instances(self):
+ def track(init_self, *args, **kwargs):
+ # Use weak references so garbage collector doesn't count these
+ # references.
+ self.refs.append(weakref.ref(init_self))
+ return self.init_method(init_self, *args, **kwargs)
+ return track
+
+ def clear(self):
+ self.refs.clear()
+
+ @property
+ def instances(self):
+ result = []
+ for ref in self.refs:
+ inst = ref()
+ # Only return instances that have not been garbage collected
+ if inst is not None:
+ result.append(inst)
+ return result
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powerflex/__init__.py b/cinder/tests/unit/volume/drivers/dell_emc/powerflex/__init__.py
index 52a333b9c..322923b92 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powerflex/__init__.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powerflex/__init__.py
@@ -69,12 +69,14 @@ class TestPowerFlexDriver(test.TestCase):
Invalid='1',
BadStatus='2',
ValidVariant='3',
+ BadStatusWithDetails='4',
))
__RESPONSE_MODE_NAMES = {
'0': 'Valid',
'1': 'Invalid',
'2': 'BadStatus',
'3': 'ValidVariant',
+ '4': 'BadStatusWithDetails',
}
BAD_STATUS_RESPONSE = mocks.MockHTTPSResponse(
@@ -175,6 +177,7 @@ class TestPowerFlexDriver(test.TestCase):
RESPONSE_MODE.Valid: Respond with valid data
RESPONSE_MODE.Invalid: Respond with invalid data
RESPONSE_MODE.BadStatus: Response with not-OK status code.
+ RESPONSE_MODE.BadStatusWithDetails: as BadStatus but with "details".
"""
self.__https_response_mode = mode
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_snapshot.py b/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_snapshot.py
index 411a6e522..e872f90ad 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_snapshot.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powerflex/test_create_snapshot.py
@@ -24,6 +24,7 @@ from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.dell_emc import powerflex
from cinder.tests.unit.volume.drivers.dell_emc.powerflex import mocks
+from cinder.volume.drivers.dell_emc.powerflex import rest_client
from cinder.volume.drivers.dell_emc.powerflex import utils as flex_utils
@@ -84,6 +85,20 @@ class TestCreateSnapShot(powerflex.TestPowerFlexDriver):
'instances/System/action/snapshotVolumes':
self.BAD_STATUS_RESPONSE,
},
+ self.RESPONSE_MODE.BadStatusWithDetails: {
+ 'instances/System/action/snapshotVolumes':
+ mocks.MockHTTPSResponse(
+ {
+ 'errorCode': 0,
+ 'message': 'Error with details',
+ 'details': [
+ {
+ 'rc': rest_client.TOO_MANY_SNAPS_ERROR,
+ },
+ ],
+ }, 500
+ ),
+ },
self.RESPONSE_MODE.Invalid: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: None,
@@ -116,3 +131,12 @@ class TestCreateSnapShot(powerflex.TestPowerFlexDriver):
def test_create_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.create_snapshot(self.snapshot)
+
+ def test_create_snapshot_limit_reached(self):
+ self.set_https_response_mode(
+ self.RESPONSE_MODE.BadStatusWithDetails)
+ self.assertRaises(
+ exception.SnapshotLimitReached,
+ self.driver.create_snapshot,
+ self.snapshot
+ )
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py
index e83da161f..3c6a3568c 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py
@@ -600,7 +600,7 @@ class PowerMaxData(object):
'connector': connector,
'device_id': device_id,
'init_group_name': initiatorgroup_name_f,
- 'initiator_check': None,
+ 'initiator_check': False,
'maskingview_name': masking_view_name_f,
'parent_sg_name': parent_sg_f,
'srp': srp,
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py
index 4a8947049..229f4b767 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py
@@ -323,6 +323,9 @@ class FakeConfiguration(object):
self.filter_function = None
self.goodness_function = None
self.san_is_local = False
+ self.initiator_check = False
+ self.powermax_service_level = None
+ self.vmax_workload = None
if replication_device:
self.replication_device = replication_device
for key, value in kwargs.items():
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py
index 5ed8461fc..c6d1eccb3 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_common.py
@@ -55,8 +55,8 @@ class PowerMaxCommonTest(test.TestCase):
powermax_port_groups=[self.data.port_group_name_f],
powermax_port_group_name_template='portGroupName',
replication_device=replication_device)
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
@@ -64,9 +64,10 @@ class PowerMaxCommonTest(test.TestCase):
self.provision = self.common.provision
self.rest = self.common.rest
self.utils = self.common.utils
- self.utils.get_volumetype_extra_specs = (
- mock.Mock(return_value=self.data.vol_type_extra_specs))
- self.rest.is_snap_id = True
+ self.mock_object(
+ self.utils, 'get_volumetype_extra_specs',
+ return_value=deepcopy(self.data.vol_type_extra_specs))
+ self.mock_object(self.rest, 'is_snap_id', True)
@mock.patch.object(rest.PowerMaxRest, 'get_array_ucode_version',
return_value=tpd.PowerMaxData.next_gen_ucode)
@@ -102,14 +103,10 @@ class PowerMaxCommonTest(test.TestCase):
common.PowerMaxCommon, '_get_attributes_from_config')
def test_gather_info_rep_enabled_duplicate_serial_numbers(
self, mck_get_cnf, mck_get_c_cnf, mck_set, mck_model, mck_ucode):
- is_enabled = self.common.replication_enabled
- targets = self.common.replication_targets
- self.common.replication_enabled = True
- self.common.replication_targets = [self.data.array]
+ self.mock_object(self.common, 'replication_enabled', True)
+ self.mock_object(self.common, 'replication_targets', [self.data.array])
self.assertRaises(
exception.InvalidConfigurationValue, self.common._gather_info)
- self.common.replication_enabled = is_enabled
- self.common.replication_targets = targets
@mock.patch.object(common.PowerMaxCommon,
'_gather_info')
@@ -160,35 +157,35 @@ class PowerMaxCommonTest(test.TestCase):
self.assertIsNone(driver.common.powermax_port_group_name_template)
def test_get_slo_workload_combinations_powermax(self):
- self.common.next_gen = True
- self.common.array_model = 'PowerMax_2000'
+ self.mock_object(self.common, 'next_gen', True)
+ self.mock_object(self.common, 'array_model', 'PowerMax_2000')
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 24)
def test_get_slo_workload_combinations_afa_powermax(self):
- self.common.next_gen = True
- self.common.array_model = 'VMAX250F'
+ self.mock_object(self.common, 'next_gen', True)
+ self.mock_object(self.common, 'array_model', 'VMAX250F')
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 28)
def test_get_slo_workload_combinations_afa_hypermax(self):
- self.common.next_gen = False
- self.common.array_model = 'VMAX250F'
+ self.mock_object(self.common, 'next_gen', False)
+ self.mock_object(self.common, 'array_model', 'VMAX250F')
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 16)
def test_get_slo_workload_combinations_hybrid(self):
- self.common.next_gen = False
- self.common.array_model = 'VMAX100K'
+ self.mock_object(self.common, 'next_gen', False)
+ self.mock_object(self.common, 'array_model', 'VMAX100K')
array_info = {}
pools = self.common._get_slo_workload_combinations(array_info)
self.assertTrue(len(pools) == 44)
def test_get_slo_workload_combinations_failed(self):
- self.common.array_model = 'xxxxxx'
+ self.mock_object(self.common, 'array_model', 'xxxxxx')
array_info = {}
self.assertRaises(
exception.VolumeBackendAPIException,
@@ -665,9 +662,8 @@ class PowerMaxCommonTest(test.TestCase):
deepcopy(self.data.test_volume_attachment)]
extra_specs = deepcopy(self.data.rep_extra_specs_rep_config)
extra_specs[utils.FORCE_VOL_EDIT] = True
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
self.common._unmap_lun(volume, connector)
- self.common.promotion = False
self.assertEqual(1, mck_rem.call_count)
@mock.patch.object(common.PowerMaxCommon, '_unmap_lun')
@@ -848,11 +844,10 @@ class PowerMaxCommonTest(test.TestCase):
connector = self.data.connector
with mock.patch.object(
self.common, '_unmap_lun_promotion') as mock_unmap:
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
self.common.terminate_connection(volume, connector)
mock_unmap.assert_called_once_with(
volume, connector)
- self.common.promotion = False
@mock.patch.object(provision.PowerMaxProvision, 'extend_volume')
@mock.patch.object(common.PowerMaxCommon, '_extend_vol_validation_checks')
@@ -879,7 +874,7 @@ class PowerMaxCommonTest(test.TestCase):
def test_extend_vol_rep_success_next_gen(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_extend,
mck_validate):
- self.common.next_gen = True
+ self.mock_object(self.common, 'next_gen', True)
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
@@ -907,8 +902,8 @@ class PowerMaxCommonTest(test.TestCase):
def test_extend_vol_rep_success_next_gen_legacy_r2(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_leg_extend,
mck_extend, mck_validate):
- self.common.next_gen = True
- self.common.rep_config = self.data.rep_config
+ self.mock_object(self.common, 'next_gen', True)
+ self.mock_object(self.common, 'rep_configs', [self.data.rep_config])
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
@@ -938,8 +933,8 @@ class PowerMaxCommonTest(test.TestCase):
def test_extend_vol_rep_success_legacy(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_leg_extend,
mck_extend, mck_validate):
- self.common.rep_config = self.data.rep_config
- self.common.next_gen = False
+ self.mock_object(self.common, 'rep_configs', [self.data.rep_config])
+ self.mock_object(self.common, 'next_gen', False)
volume = self.data.test_volume
array = self.data.array
device_id = self.data.device_id
@@ -967,8 +962,8 @@ class PowerMaxCommonTest(test.TestCase):
return_value=tpd.PowerMaxData.ex_specs_rep_config_no_extend)
def test_extend_vol_rep_success_legacy_allow_extend_false(
self, mck_setup, mck_val_chk, mck_get_rdf, mck_ode, mck_validate):
- self.common.rep_config = self.data.rep_config
- self.common.next_gen = False
+ self.mock_object(self.common, 'rep_configs', [self.data.rep_config])
+ self.mock_object(self.common, 'next_gen', False)
volume = self.data.test_volume
new_size = self.data.test_volume.size
self.assertRaises(exception.VolumeBackendAPIException,
@@ -1080,8 +1075,8 @@ class PowerMaxCommonTest(test.TestCase):
self, mock_vol, mock_mvs, mock_mv_conns):
expected_dict = {'hostlunid': '1', 'maskingview': 'OS-HostX-I-PG-MV',
'array': '000197800123', 'device_id': '00001'}
- self.common.powermax_short_host_name_template = (
- 'shortHostName[:7]finance')
+ self.mock_object(self.common, 'powermax_short_host_name_template',
+ 'shortHostName[:7]finance')
masked_vols, is_multiattach = self.common.find_host_lun_id(
self.data.test_volume, 'HostX',
self.data.extra_specs)
@@ -1161,8 +1156,9 @@ class PowerMaxCommonTest(test.TestCase):
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
extra_specs[utils.WORKLOAD] = self.data.workload
ref_mv_dict = self.data.masking_view_dict
- self.common.next_gen = False
- self.common.powermax_port_group_name_template = 'portGroupName'
+ self.mock_object(self.common, 'next_gen', False)
+ self.mock_object(self.common, 'powermax_port_group_name_template',
+ 'portGroupName')
extra_specs.pop(utils.IS_RE, None)
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
@@ -1208,7 +1204,7 @@ class PowerMaxCommonTest(test.TestCase):
connector = self.data.connector
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
- self.common.next_gen = True
+ self.mock_object(self.common, 'next_gen', True)
masking_view_dict = self.common._populate_masking_dict(
volume, connector, extra_specs)
self.assertEqual('NONE', masking_view_dict[utils.WORKLOAD])
@@ -1408,7 +1404,7 @@ class PowerMaxCommonTest(test.TestCase):
volume_name = '1'
volume_size = self.data.test_volume.size
extra_specs = self.data.extra_specs
- self.common.next_gen = True
+ self.mock_object(self.common, 'next_gen', True)
with mock.patch.object(
self.utils, 'is_compression_disabled', return_value=True):
with mock.patch.object(
@@ -1598,20 +1594,22 @@ class PowerMaxCommonTest(test.TestCase):
with mock.patch.object(self.rest, 'is_compression_capable',
return_value=True):
srp_record = self.common.get_attributes_from_cinder_config()
- extra_specs = self.common._set_vmax_extra_specs(
- self.data.vol_type_extra_specs_compr_disabled, srp_record)
+ specs = deepcopy(self.data.vol_type_extra_specs_compr_disabled)
+ extra_specs = self.common._set_vmax_extra_specs(specs, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
ref_extra_specs[utils.DISABLECOMPRESSION] = "true"
self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_compr_disabled_not_compr_capable(self):
- srp_record = self.common.get_attributes_from_cinder_config()
- extra_specs = self.common._set_vmax_extra_specs(
- self.data.vol_type_extra_specs_compr_disabled, srp_record)
- ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
- ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
- self.assertEqual(ref_extra_specs, extra_specs)
+ with mock.patch.object(self.rest, 'is_compression_capable',
+ return_value=False):
+ srp_record = self.common.get_attributes_from_cinder_config()
+ specs = deepcopy(self.data.vol_type_extra_specs_compr_disabled)
+ extra_specs = self.common._set_vmax_extra_specs(specs, srp_record)
+ ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
+ ref_extra_specs[utils.PORTGROUPNAME] = self.data.port_group_name_f
+ self.assertEqual(ref_extra_specs, extra_specs)
def test_set_vmax_extra_specs_portgroup_as_spec(self):
srp_record = self.common.get_attributes_from_cinder_config()
@@ -1631,7 +1629,7 @@ class PowerMaxCommonTest(test.TestCase):
def test_set_vmax_extra_specs_next_gen(self):
srp_record = self.common.get_attributes_from_cinder_config()
- self.common.next_gen = True
+ self.mock_object(self.common, 'next_gen', True)
extra_specs = self.common._set_vmax_extra_specs(
self.data.vol_type_extra_specs, srp_record)
ref_extra_specs = deepcopy(self.data.extra_specs_intervals_set)
@@ -1762,7 +1760,8 @@ class PowerMaxCommonTest(test.TestCase):
'interval': 1, 'retries': 1, 'slo': 'Diamond',
'workload': 'DSS'}
host_template = 'shortHostName[:10]uuid[:5]'
- self.common.powermax_short_host_name_template = host_template
+ self.mock_object(self.common, 'powermax_short_host_name_template',
+ host_template)
self.common.get_target_wwns_from_masking_view(
self.data.test_volume, connector)
mock_label.assert_called_once_with(
@@ -2274,9 +2273,8 @@ class PowerMaxCommonTest(test.TestCase):
volume = self.data.test_rep_volume
new_type = {'extra_specs': {}}
host = {'host': self.data.new_host}
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
self.common.retype(volume, new_type, host)
- self.common.promotion = False
mck_migrate.assert_called_once_with(
device_id, volume, host, volume_name, new_type, rep_extra_specs)
@@ -2664,12 +2662,11 @@ class PowerMaxCommonTest(test.TestCase):
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, True, False, self.data.slo_silver,
self.data.workload, False)
- self.common.promotion = False
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_promotion_change_slo(
@@ -2678,12 +2675,11 @@ class PowerMaxCommonTest(test.TestCase):
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo,
self.data.workload, False)
- self.common.promotion = False
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_promotion_change_workload(
@@ -2692,12 +2688,11 @@ class PowerMaxCommonTest(test.TestCase):
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo_silver,
'fail_workload', False)
- self.common.promotion = False
self.assertEqual(ref_return, return_val)
def test_is_valid_for_storage_assisted_migration_promotion_target_not_rep(
@@ -2706,12 +2701,11 @@ class PowerMaxCommonTest(test.TestCase):
host = {'host': self.data.new_host}
volume_name = self.data.test_volume.name
ref_return = (False, None, None)
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
return_val = self.common._is_valid_for_storage_assisted_migration(
device_id, host, self.data.array,
self.data.srp, volume_name, False, False, self.data.slo_silver,
'OLTP', True)
- self.common.promotion = False
self.assertEqual(ref_return, return_val)
@mock.patch.object(
@@ -2943,11 +2937,10 @@ class PowerMaxCommonTest(test.TestCase):
group = self.data.test_group_1
add_vols = []
remove_vols = [self.data.test_volume_group_member]
- self.common.failover = True
+ self.mock_object(self.common, 'failover', True)
self.assertRaises(
exception.VolumeBackendAPIException, self.common.update_group,
group, add_vols, remove_vols)
- self.common.failover = False
@mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False)
@@ -2960,13 +2953,14 @@ class PowerMaxCommonTest(test.TestCase):
add_vols = []
remove_vols = [self.data.test_volume_group_member]
ref_model_update = {'status': fields.GroupStatus.AVAILABLE}
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
model_update, __, __ = self.common.update_group(
group, add_vols, remove_vols)
- self.common.promotion = False
mck_update.assert_called_once_with(group, add_vols, remove_vols)
self.assertEqual(ref_model_update, model_update)
+ @mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
+ return_value=tpd.PowerMaxData.test_rep_group)
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
return_value=True)
@mock.patch.object(
@@ -2983,7 +2977,7 @@ class PowerMaxCommonTest(test.TestCase):
masking.PowerMaxMasking, 'remove_volumes_from_storage_group')
def test_update_group_promotion(
self, mck_rem, mock_cg_type, mock_type_check, mck_setup, mck_rep,
- mck_in_sg):
+ mck_in_sg, mck_group):
group = self.data.test_rep_group
add_vols = []
remove_vols = [self.data.test_volume_group_member]
@@ -3230,10 +3224,10 @@ class PowerMaxCommonTest(test.TestCase):
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_i])
- self.common.configuration = configuration
+ self.mock_object(self.common, 'configuration', configuration)
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
- self.common.configuration = old_conf
+ self.mock_object(self.common, 'configuration', old_conf)
kwargs = self.common.get_attributes_from_cinder_config()
self.assertIsNone(kwargs)
@@ -3248,7 +3242,7 @@ class PowerMaxCommonTest(test.TestCase):
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=3448,
powermax_port_groups=[self.data.port_group_name_i])
- self.common.configuration = configuration
+ self.mock_object(self.common, 'configuration', configuration)
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
@@ -3263,7 +3257,7 @@ class PowerMaxCommonTest(test.TestCase):
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc',
powermax_port_groups=[self.data.port_group_name_i])
- self.common.configuration = configuration
+ self.mock_object(self.common, 'configuration', configuration)
kwargs_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual(kwargs_expected, kwargs_returned)
@@ -3276,7 +3270,7 @@ class PowerMaxCommonTest(test.TestCase):
driver_ssl_cert_verify=True,
driver_ssl_cert_path='/path/to/cert')
- self.common.configuration = conf
+ self.mock_object(self.common, 'configuration', conf)
conf_returned = self.common.get_attributes_from_cinder_config()
self.assertEqual('/path/to/cert', conf_returned['SSLVerify'])
@@ -3473,12 +3467,12 @@ class PowerMaxCommonTest(test.TestCase):
self.common.revert_to_snapshot, volume, snapshot)
def test_get_initiator_check_flag(self):
- self.common.configuration.initiator_check = False
+ self.mock_object(self.common.configuration, 'initiator_check', False)
initiator_check = self.common._get_initiator_check_flag()
self.assertFalse(initiator_check)
def test_get_initiator_check_flag_true(self):
- self.common.configuration.initiator_check = True
+ self.mock_object(self.common.configuration, 'initiator_check', True)
initiator_check = self.common._get_initiator_check_flag()
self.assertTrue(initiator_check)
@@ -3594,14 +3588,15 @@ class PowerMaxCommonTest(test.TestCase):
self.assertEqual(vols_lists, expected_response)
def test_get_slo_workload_combo_from_cinder_conf(self):
- self.common.configuration.powermax_service_level = 'Diamond'
- self.common.configuration.vmax_workload = 'DSS'
+ self.mock_object(self.common.configuration, 'powermax_service_level',
+ 'Diamond')
+ self.mock_object(self.common.configuration, 'vmax_workload', 'DSS')
response1 = self.common.get_attributes_from_cinder_config()
self.assertEqual('Diamond', response1['ServiceLevel'])
self.assertEqual('DSS', response1['Workload'])
- self.common.configuration.powermax_service_level = 'Diamond'
- self.common.configuration.vmax_workload = None
+ # powermax_service_level is already set to Diamond
+ self.mock_object(self.common.configuration, 'vmax_workload', None)
response2 = self.common.get_attributes_from_cinder_config()
self.assertEqual(self.common.configuration.powermax_service_level,
response2['ServiceLevel'])
@@ -3613,13 +3608,14 @@ class PowerMaxCommonTest(test.TestCase):
'SerialNumber': '000197800123', 'srpName': 'SRP_1',
'PortGroup': ['OS-fibre-PG']}
- self.common.configuration.powermax_service_level = None
- self.common.configuration.vmax_workload = 'DSS'
+ self.mock_object(self.common.configuration, 'powermax_service_level',
+ None)
+ self.mock_object(self.common.configuration, 'vmax_workload', 'DSS')
response3 = self.common.get_attributes_from_cinder_config()
self.assertEqual(expected_response, response3)
- self.common.configuration.powermax_service_level = None
- self.common.configuration.vmax_workload = None
+ # powermax_service_level is already set to None
+ self.mock_object(self.common.configuration, 'vmax_workload', None)
response4 = self.common.get_attributes_from_cinder_config()
self.assertEqual(expected_response, response4)
@@ -3633,7 +3629,7 @@ class PowerMaxCommonTest(test.TestCase):
u4p_failover_retries='3', u4p_failover_timeout='10',
u4p_primary='10.10.10.10', powermax_array=self.data.array,
powermax_srp=self.data.srp)
- self.common.configuration = configuration
+ self.mock_object(self.common, 'configuration', configuration)
self.common._get_u4p_failover_info()
self.assertTrue(self.rest.u4p_failover_enabled)
self.assertIsNotNone(self.rest.u4p_failover_targets)
@@ -3667,21 +3663,21 @@ class PowerMaxCommonTest(test.TestCase):
'RestUserName': 'test', 'RestPassword': 'test',
'SerialNumber': '000197800123', 'srpName': 'SRP_1',
'PortGroup': None, 'SSLVerify': True}}
- self.common.configuration = configuration
+ self.mock_object(self.common, 'configuration', configuration)
self.common._get_u4p_failover_info()
self.assertIsNotNone(self.rest.u4p_failover_targets)
mck_set_fo.assert_called_once_with(expected_u4p_failover_config)
def test_update_vol_stats_retest_u4p(self):
- self.rest.u4p_in_failover = True
- self.rest.u4p_failover_autofailback = True
+ self.mock_object(self.rest, 'u4p_in_failover', True)
+ self.mock_object(self.rest, 'u4p_failover_autofailback', True)
with mock.patch.object(
self.common, 'retest_primary_u4p') as mock_retest:
self.common.update_volume_stats()
mock_retest.assert_called_once()
- self.rest.u4p_in_failover = True
- self.rest.u4p_failover_autofailback = False
+ self.mock_object(self.rest, 'u4p_in_failover', False)
+ self.mock_object(self.rest, 'u4p_failover_autofailback', False)
with mock.patch.object(
self.common, 'retest_primary_u4p') as mock_retest:
self.common.update_volume_stats()
@@ -3730,7 +3726,7 @@ class PowerMaxCommonTest(test.TestCase):
device_id = self.data.device_id
new_size = self.data.test_volume.size + 1
extra_specs = deepcopy(self.data.extra_specs)
- self.common.next_gen = False
+ self.mock_object(self.common, 'next_gen', False)
self.assertRaises(
exception.VolumeBackendAPIException,
self.common._extend_vol_validation_checks,
@@ -3753,7 +3749,7 @@ class PowerMaxCommonTest(test.TestCase):
def test_array_ode_capabilities_check_non_next_gen_local(self):
"""Rep enabled, neither array next gen, returns F,F,F,F"""
array = self.data.powermax_model_details['symmetrixId']
- self.common.next_gen = False
+ self.mock_object(self.common, 'next_gen', False)
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
@@ -3770,8 +3766,8 @@ class PowerMaxCommonTest(test.TestCase):
self, mock_rdf, mock_det):
"""Rep disabled, local array next gen, pre elm, returns T,F,F,F"""
array = self.data.powermax_model_details['symmetrixId']
- self.common.ucode_level = '5978.1.1'
- self.common.next_gen = True
+ self.mock_object(self.common, 'ucode_level', '5978.1.1')
+ self.mock_object(self.common, 'next_gen', True)
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, False)
@@ -3788,8 +3784,9 @@ class PowerMaxCommonTest(test.TestCase):
self, mock_rdf, mock_det):
"""Rep enabled, remote not next gen, returns T,T,F,F"""
array = self.data.powermax_model_details['symmetrixId']
- self.common.ucode_level = self.data.powermax_model_details['ucode']
- self.common.next_gen = True
+ self.mock_object(self.common, 'ucode_level',
+ self.data.powermax_model_details['ucode'])
+ self.mock_object(self.common, 'next_gen', True)
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
@@ -3806,8 +3803,9 @@ class PowerMaxCommonTest(test.TestCase):
self, mock_rdf, mock_det):
"""Rep enabled, both array next gen, tgt<5978.221, returns T,T,T,F"""
array = self.data.powermax_model_details['symmetrixId']
- self.common.ucode_level = self.data.powermax_model_details['ucode']
- self.common.next_gen = True
+ self.mock_object(self.common, 'ucode_level',
+ self.data.powermax_model_details['ucode'])
+ self.mock_object(self.common, 'next_gen', True)
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
@@ -3824,8 +3822,9 @@ class PowerMaxCommonTest(test.TestCase):
self, mock_rdf, mock_det):
"""Rep enabled, both array next gen, tgt>5978.221 returns T,T,T,T"""
array = self.data.powermax_model_details['symmetrixId']
- self.common.ucode_level = self.data.powermax_model_details['ucode']
- self.common.next_gen = True
+ self.mock_object(self.common, 'ucode_level',
+ self.data.powermax_model_details['ucode'])
+ self.mock_object(self.common, 'next_gen', True)
(r1_ode, r1_ode_metro,
r2_ode, r2_ode_metro) = self.common._array_ode_capabilities_check(
array, self.data.rep_config_metro, True)
@@ -3879,7 +3878,7 @@ class PowerMaxCommonTest(test.TestCase):
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=1234,
powermax_port_groups=[self.data.port_group_name_i])
- self.common.configuration = configuration
+ self.mock_object(self.common, 'configuration', configuration)
port = self.common._get_unisphere_port()
self.assertEqual(1234, port)
@@ -3889,7 +3888,7 @@ class PowerMaxCommonTest(test.TestCase):
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc',
powermax_port_groups=[self.data.port_group_name_i])
- self.common.configuration = configuration
+ self.mock_object(self.common, 'configuration', configuration)
ref_port = utils.DEFAULT_PORT
port = self.common._get_unisphere_port()
self.assertEqual(ref_port, port)
@@ -4186,13 +4185,18 @@ class PowerMaxCommonTest(test.TestCase):
mock_verify.assert_not_called()
def test_set_config_file_and_get_extra_specs(self):
- self.common.rep_config = {
- 'mode': utils.REP_METRO, utils.METROBIAS: True}
- with mock.patch.object(self.utils, 'get_volumetype_extra_specs',
- return_value=self.data.rep_extra_specs_metro):
- extra_specs, __ = self.common._set_config_file_and_get_extra_specs(
- self.data.test_volume, None)
- self.assertEqual(self.data.rep_extra_specs_metro, extra_specs)
+ self.mock_object(self.common, 'rep_configs',
+ [{'mode': utils.REP_METRO, utils.METROBIAS: True}])
+ original_specs = deepcopy(self.data.rep_extra_specs_metro)
+ try:
+ with mock.patch.object(
+ self.utils, 'get_volumetype_extra_specs',
+ return_value=self.data.rep_extra_specs_metro):
+ specs, __ = self.common._set_config_file_and_get_extra_specs(
+ self.data.test_volume, None)
+ self.assertEqual(self.data.rep_extra_specs_metro, specs)
+ finally:
+ self.data.rep_extra_specs_metro = original_specs
@mock.patch.object(utils.PowerMaxUtils, 'get_rdf_management_group_name')
def test_retype_volume_promotion_get_extra_specs_mgmt_group(self, mck_get):
@@ -4207,11 +4211,10 @@ class PowerMaxCommonTest(test.TestCase):
target_extra_specs = deepcopy(self.data.extra_specs)
target_extra_specs[utils.DISABLECOMPRESSION] = False
extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
- self.common.promotion = True
+ self.mock_object(self.common, 'promotion', True)
self.common._retype_volume(
array, srp, device_id, volume, volume_name, extra_specs,
target_slo, target_workload, target_extra_specs)
- self.common.promotion = False
mck_get.assert_called_once_with(extra_specs[utils.REP_CONFIG])
@mock.patch.object(rest.PowerMaxRest, 'is_volume_in_storagegroup',
@@ -4458,8 +4461,8 @@ class PowerMaxCommonTest(test.TestCase):
@mock.patch.object(utils.PowerMaxUtils, 'get_volume_group_utils',
return_value=(None, {'interval': 1, 'retries': 1}))
def test_get_volume_group_info(self, mock_group_utils):
- self.common.interval = 1
- self.common.retries = 1
+ self.mock_object(self.common, 'interval', 1)
+ self.mock_object(self.common, 'retries', 1)
with mock.patch.object(
tpfo.FakeConfiguration, 'safe_get') as mock_array:
self.common._get_volume_group_info(
@@ -4481,9 +4484,9 @@ class PowerMaxCommonTest(test.TestCase):
port_load_metric='PercentBusy')
ref_perf_conf = self.data.performance_config
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=ref_cinder_conf)
self.assertEqual(ref_perf_conf, driver.common.performance.config)
@@ -4521,7 +4524,8 @@ class PowerMaxCommonTest(test.TestCase):
"""
extra_specs = {utils.ARRAY: self.data.array}
pool_record = {utils.PORT_GROUP: self.data.perf_port_groups}
- self.common.performance.config = self.data.performance_config
+ self.mock_object(self.common.performance, 'config',
+ self.data.performance_config)
with mock.patch.object(
self.common.performance, 'process_port_group_load',
side_effect=(
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_fc.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_fc.py
index 84ff92257..a2e6ba624 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_fc.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_fc.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
from unittest import mock
from cinder import exception
@@ -32,21 +33,22 @@ class PowerMaxFCTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxFCTest, self).setUp()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
self.configuration = tpfo.FakeConfiguration(
None, 'FCTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_i])
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=self.configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.utils = self.common.utils
- self.utils.get_volumetype_extra_specs = (
- mock.Mock(return_value=self.data.vol_type_extra_specs))
+ self.mock_object(
+ self.utils, 'get_volumetype_extra_specs',
+ return_value=copy.deepcopy(self.data.vol_type_extra_specs))
def test_create_volume(self):
with mock.patch.object(self.common, 'create_volume') as mock_create:
@@ -138,11 +140,11 @@ class PowerMaxFCTest(test.TestCase):
def test_get_zoning_mappings(self):
ref_mappings = self.data.zoning_mappings
zoning_mappings = self.driver._get_zoning_mappings(
- self.data.test_volume, self.data.connector)
+ self.data.test_volume, copy.deepcopy(self.data.connector))
self.assertEqual(ref_mappings, zoning_mappings)
# Legacy vol
zoning_mappings2 = self.driver._get_zoning_mappings(
- self.data.test_legacy_vol, self.data.connector)
+ self.data.test_legacy_vol, copy.deepcopy(self.data.connector))
self.assertEqual(ref_mappings, zoning_mappings2)
def test_get_zoning_mappings_no_mv(self):
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_iscsi.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_iscsi.py
index 5025e7610..080942819 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_iscsi.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_iscsi.py
@@ -31,21 +31,22 @@ class PowerMaxISCSITest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxISCSITest, self).setUp()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
configuration = tpfo.FakeConfiguration(
None, 'ISCSITests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_i])
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = iscsi.PowerMaxISCSIDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.masking = self.common.masking
self.utils = self.common.utils
- self.utils.get_volumetype_extra_specs = (
- mock.Mock(return_value=self.data.vol_type_extra_specs))
+ self.mock_object(
+ self.utils, 'get_volumetype_extra_specs',
+ return_value=deepcopy(self.data.vol_type_extra_specs))
def test_create_volume(self):
with mock.patch.object(self.common, 'create_volume') as mock_create:
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py
index 7dc542ec0..0f32724fe 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py
@@ -34,7 +34,7 @@ class PowerMaxMaskingTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxMaskingTest, self).setUp()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
self.replication_device = self.data.sync_rep_device
configuration = tpfo.FakeConfiguration(
None, 'MaskingTests', 1, 1, san_ip='1.1.1.1',
@@ -43,10 +43,10 @@ class PowerMaxMaskingTest(test.TestCase):
powermax_port_groups=[self.data.port_group_name_f],
replication_device=self.replication_device)
self._gather_info = common.PowerMaxCommon._gather_info
- common.PowerMaxCommon._get_u4p_failover_info = mock.Mock()
- common.PowerMaxCommon._gather_info = mock.Mock()
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(common.PowerMaxCommon, '_get_u4p_failover_info')
+ self.mock_object(common.PowerMaxCommon, '_gather_info')
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = common.PowerMaxCommon(
'iSCSI', self.data.version, configuration=configuration)
driver_fc = common.PowerMaxCommon(
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py
index 12deed669..eb5788bb5 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_migrate.py
@@ -32,15 +32,15 @@ from cinder.volume import volume_utils
class PowerMaxMigrateTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
super(PowerMaxMigrateTest, self).setUp()
configuration = tpfo.FakeConfiguration(
None, 'MaskingTests', 1, 1, san_ip='1.1.1.1',
san_login='smc', powermax_array=self.data.array,
powermax_srp='SRP_1', san_password='smc', san_api_port=8443,
vmax_port_groups=[self.data.port_group_name_f])
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = iscsi.PowerMaxISCSIDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_performance.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_performance.py
index fdbc29ece..e6b716542 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_performance.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_performance.py
@@ -50,9 +50,9 @@ class PowerMaxPerformanceTest(test.TestCase):
super(PowerMaxPerformanceTest, self).setUp()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = iscsi.PowerMaxISCSIDriver(
configuration=self.reference_cinder_conf)
self.driver = driver
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py
index d6dc3cffb..e4d8ae920 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_provision.py
@@ -33,14 +33,14 @@ class PowerMaxProvisionTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxProvisionTest, self).setUp()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
configuration = tpfo.FakeConfiguration(
None, 'ProvisionTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_i])
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = iscsi.PowerMaxISCSIDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py
index 4ab8ba3f1..f883ebff9 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_replication.py
@@ -42,15 +42,15 @@ class PowerMaxReplicationTest(test.TestCase):
self.data = tpd.PowerMaxData()
super(PowerMaxReplicationTest, self).setUp()
self.replication_device = self.data.sync_rep_device
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
configuration = tpfo.FakeConfiguration(
None, 'CommonReplicationTests', interval=1, retries=1,
san_ip='1.1.1.1', san_login='smc', powermax_array=self.data.array,
powermax_srp='SRP_1', san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_f],
replication_device=self.replication_device)
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=configuration)
iscsi_config = tpfo.FakeConfiguration(
None, 'CommonReplicationTests', interval=1, retries=1,
@@ -66,9 +66,9 @@ class PowerMaxReplicationTest(test.TestCase):
self.provision = self.common.provision
self.rest = self.common.rest
self.utils = self.common.utils
- self.utils.get_volumetype_extra_specs = (
- mock.Mock(
- return_value=self.data.vol_type_extra_specs_rep_enabled))
+ self.mock_object(
+ self.utils, 'get_volumetype_extra_specs',
+ return_value=deepcopy(self.data.vol_type_extra_specs_rep_enabled))
self.extra_specs = deepcopy(self.data.extra_specs_rep_enabled)
self.extra_specs['retries'] = 1
self.extra_specs['interval'] = 1
@@ -370,8 +370,9 @@ class PowerMaxReplicationTest(test.TestCase):
@mock.patch.object(common.PowerMaxCommon,
'_update_volume_list_from_sync_vol_list',
return_value={'vol_updates'})
- @mock.patch.object(common.PowerMaxCommon, '_initial_setup',
- return_value=tpd.PowerMaxData.ex_specs_rep_config_sync)
+ @mock.patch.object(
+ common.PowerMaxCommon, '_initial_setup',
+ return_value=tpd.PowerMaxData.ex_specs_rep_config_sync.copy())
@mock.patch.object(common.PowerMaxCommon, 'failover_replication',
return_value=('grp_updates', {'grp_vol_updates'}))
def test_populate_volume_and_group_update_lists(
@@ -396,7 +397,7 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(group_updates_ref, group_updates)
@mock.patch.object(common.PowerMaxCommon, '_initial_setup',
- return_value=tpd.PowerMaxData.extra_specs)
+ return_value=tpd.PowerMaxData.extra_specs.copy())
def test_populate_volume_and_group_update_lists_promotion_non_rep(
self, mck_setup):
volumes = [self.data.test_volume]
@@ -663,7 +664,7 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(ref_vol_update, vols_model_update[0])
@mock.patch.object(common.PowerMaxCommon, '_initial_setup',
- return_value=tpd.PowerMaxData.extra_specs)
+ return_value=tpd.PowerMaxData.extra_specs.copy())
def test_populate_volume_and_group_update_lists_group_update_vol_list(
self, mck_setup):
volume = deepcopy(self.data.test_volume)
@@ -704,7 +705,10 @@ class PowerMaxReplicationTest(test.TestCase):
mck_validate.assert_called_once_with(
self.common.rep_configs, extra_specs_list)
- def test_enable_replication(self):
+ @mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
+ side_effect=[tpd.PowerMaxData.test_group,
+ None])
+ def test_enable_replication(self, mock_vg):
# Case 1: Group not replicated
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False):
@@ -729,7 +733,10 @@ class PowerMaxReplicationTest(test.TestCase):
self.assertEqual(fields.ReplicationStatus.ERROR,
model_update['replication_status'])
- def test_disable_replication(self):
+ @mock.patch.object(common.PowerMaxCommon, '_find_volume_group',
+ side_effect=[tpd.PowerMaxData.test_group,
+ None])
+ def test_disable_replication(self, mock_vg):
# Case 1: Group not replicated
with mock.patch.object(volume_utils, 'is_group_a_type',
return_value=False):
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py
index 90b2dd3dc..bf6ca99b8 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py
@@ -36,19 +36,19 @@ class PowerMaxRestTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
super(PowerMaxRestTest, self).setUp()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
configuration = tpfo.FakeConfiguration(
None, 'RestTests', 1, 1, san_ip='1.1.1.1', san_login='smc',
powermax_array=self.data.array, powermax_srp='SRP_1',
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_i])
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = fc.PowerMaxFCDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
self.rest = self.common.rest
- self.rest.is_snap_id = True
+ self.mock_object(self.rest, 'is_snap_id', True)
self.utils = self.common.utils
def test_rest_request_no_response(self):
@@ -84,22 +84,21 @@ class PowerMaxRestTest(test.TestCase):
with mock.patch.object(self.rest.session, 'request',
side_effect=[requests.ConnectionError,
response]):
- self.rest.u4p_failover_enabled = True
+ self.mock_object(self.rest, 'u4p_failover_enabled', True)
self.rest.request('/fake_uri', 'GET')
mock_fail.assert_called_once()
@mock.patch.object(time, 'sleep')
def test_rest_request_failover_escape(self, mck_sleep):
- self.rest.u4p_failover_lock = True
+ self.mock_object(self.rest, 'u4p_failover_lock', True)
response = tpfo.FakeResponse(200, 'Success')
with mock.patch.object(self.rest, '_handle_u4p_failover')as mock_fail:
with mock.patch.object(self.rest.session, 'request',
side_effect=[requests.ConnectionError,
response]):
- self.rest.u4p_failover_enabled = True
+ self.mock_object(self.rest, 'u4p_failover_enabled', True)
self.rest.request('/fake_uri', 'GET')
mock_fail.assert_called_once()
- self.rest.u4p_failover_lock = False
def test_wait_for_job_complete(self):
rc, job, status, task = self.rest.wait_for_job_complete(
@@ -761,8 +760,9 @@ class PowerMaxRestTest(test.TestCase):
return_value=[])
def test_delete_volume(self, mock_sgs):
device_id = self.data.device_id
- self.rest.ucode_major_level = utils.UCODE_5978
- self.rest.ucode_minor_level = utils.UCODE_5978_HICKORY
+ self.mock_object(self.rest, 'ucode_major_level', utils.UCODE_5978)
+ self.mock_object(self.rest, 'ucode_minor_level',
+ utils.UCODE_5978_HICKORY)
with mock.patch.object(
self.rest, 'delete_resource') as mock_delete:
self.rest.delete_volume(self.data.array, device_id)
@@ -773,8 +773,9 @@ class PowerMaxRestTest(test.TestCase):
return_value=['OS-SG'])
def test_delete_volume_in_sg(self, mock_sgs):
device_id = self.data.device_id
- self.rest.ucode_major_level = utils.UCODE_5978
- self.rest.ucode_minor_level = utils.UCODE_5978_HICKORY
+ self.mock_object(self.rest, 'ucode_major_level', utils.UCODE_5978)
+ self.mock_object(self.rest, 'ucode_minor_level',
+ utils.UCODE_5978_HICKORY)
self.assertRaises(
exception.VolumeBackendAPIException,
self.rest.delete_volume, self.data.array, device_id)
@@ -1878,7 +1879,8 @@ class PowerMaxRestTest(test.TestCase):
self.rest.u4p_failover_targets[1]['san_ip'])
def test_handle_u4p_failover_with_targets(self):
- self.rest.u4p_failover_targets = self.data.u4p_failover_target
+ self.mock_object(self.rest, 'u4p_failover_targets',
+ self.data.u4p_failover_target)
self.rest._handle_u4p_failover()
self.assertTrue(self.rest.u4p_in_failover)
@@ -1889,7 +1891,7 @@ class PowerMaxRestTest(test.TestCase):
self.rest.base_uri)
def test_handle_u4p_failover_no_targets_exception(self):
- self.rest.u4p_failover_targets = []
+ self.mock_object(self.rest, 'u4p_failover_targets', [])
self.assertRaises(exception.VolumeBackendAPIException,
self.rest._handle_u4p_failover)
@@ -2112,7 +2114,7 @@ class PowerMaxRestTest(test.TestCase):
array_id = self.data.array
rdf_group_no = self.data.rdf_group_no_1
sg_name = self.data.default_sg_re_enabled
- rep_extra_specs = self.data.rep_extra_specs
+ rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.REP_CONFIG] = self.data.rep_config_async
rep_extra_specs[utils.REP_MODE] = utils.REP_ASYNC
@@ -2249,7 +2251,7 @@ class PowerMaxRestTest(test.TestCase):
mode = utils.REP_ASYNC
device_id = self.data.device_id
tgt_device_id = self.data.device_id2
- rep_extra_specs = self.data.rep_extra_specs
+ rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs['array'] = remote_array
ref_payload = {
@@ -2284,7 +2286,7 @@ class PowerMaxRestTest(test.TestCase):
mode = utils.REP_SYNC
device_id = self.data.device_id
tgt_device_id = self.data.device_id2
- rep_extra_specs = self.data.rep_extra_specs
+ rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs[utils.ARRAY] = remote_array
ref_payload = {
@@ -2320,7 +2322,7 @@ class PowerMaxRestTest(test.TestCase):
rdf_group_no = self.data.rdf_group_no_1
mode = utils.REP_ASYNC
device_id = self.data.device_id
- rep_extra_specs = self.data.rep_extra_specs
+ rep_extra_specs = deepcopy(self.data.rep_extra_specs)
rep_extra_specs['array'] = remote_array
self.assertRaises(exception.VolumeBackendAPIException,
@@ -2536,9 +2538,75 @@ class PowerMaxRestTest(test.TestCase):
[{'snap_name': 'generation_string',
'generation': '0'}]])
def test_get_snap_id_legacy_generation(self, mock_snaps):
- self.rest.is_snap_id = False
+ self.mock_object(self.rest, 'is_snap_id', False)
for x in range(0, 2):
snap_id = self.rest.get_snap_id(
self.data.array, self.data.device_id,
self.data.test_snapshot_snap_name)
self.assertEqual('0', snap_id)
+
+ @mock.patch.object(
+ rest.PowerMaxRest, 'get_storage_group_list')
+ @mock.patch.object(
+ rest.PowerMaxRest, 'get_storage_group_rep',
+ side_effect=[{'rdf': False}, None])
+ def test_get_or_rename_storage_group_rep(
+ self, mock_sg_rep, mock_sg_list):
+ # Success - no need for rename
+ rep_info = self.rest.get_or_rename_storage_group_rep(
+ self.data.array, self.data.storagegroup_name_f,
+ self.data.extra_specs)
+ mock_sg_list.assert_not_called()
+ self.assertIsNotNone(rep_info)
+
+ # Fail - cannot find sg but no filter set
+ rep_info = self.rest.get_or_rename_storage_group_rep(
+ self.data.array, self.data.storagegroup_name_f,
+ self.data.extra_specs)
+ mock_sg_list.assert_not_called()
+ self.assertIsNone(rep_info)
+
+ @mock.patch.object(
+ rest.PowerMaxRest, '_rename_storage_group')
+ @mock.patch.object(
+ rest.PowerMaxRest, 'get_storage_group_list',
+ return_value=({'storageGroupId': ['user-name+uuid']}))
+ @mock.patch.object(
+ rest.PowerMaxRest, 'get_storage_group_rep',
+ side_effect=[None, ({'rdf': False}), ({'rdf': False})])
+ def test_get_or_rename_storage_group_rep_exists(
+ self, mock_sg_rep, mock_sg_list, mock_rename):
+ sg_filter = '<like>uuid'
+ rep_info = self.rest.get_or_rename_storage_group_rep(
+ self.data.array, self.data.storagegroup_name_f,
+ self.data.extra_specs, sg_filter=sg_filter)
+ mock_sg_list.assert_called_once_with(
+ self.data.array,
+ params={'storageGroupId': sg_filter})
+ group_list_return = {'storageGroupId': ['user-name+uuid']}
+ mock_rename.assert_called_once_with(
+ self.data.array,
+ group_list_return['storageGroupId'][0],
+ self.data.storagegroup_name_f,
+ self.data.extra_specs)
+ self.assertIsNotNone(rep_info)
+
+ @mock.patch.object(
+ rest.PowerMaxRest, '_rename_storage_group')
+ @mock.patch.object(
+ rest.PowerMaxRest, 'get_storage_group_list',
+ return_value=({'storageGroupId': ['user-name+uuid']}))
+ @mock.patch.object(
+ rest.PowerMaxRest, 'get_storage_group_rep',
+ side_effect=[None, None])
+ def test_get_or_rename_storage_group_rep_does_not_exist(
+ self, mock_sg_rep, mock_sg_list, mock_rename):
+ sg_filter = '<like>uuid'
+ rep_info = self.rest.get_or_rename_storage_group_rep(
+ self.data.array, self.data.storagegroup_name_f,
+ self.data.extra_specs, sg_filter=sg_filter)
+ mock_sg_list.assert_called_once_with(
+ self.data.array,
+ params={'storageGroupId': sg_filter})
+ mock_rename.assert_not_called()
+ self.assertIsNone(rep_info)
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py
index f1cff7afd..bb6c7d60b 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_utils.py
@@ -39,7 +39,7 @@ from cinder.volume import volume_utils
class PowerMaxUtilsTest(test.TestCase):
def setUp(self):
self.data = tpd.PowerMaxData()
- volume_utils.get_max_over_subscription_ratio = mock.Mock()
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio')
super(PowerMaxUtilsTest, self).setUp()
self.replication_device = self.data.sync_rep_device
configuration = tpfo.FakeConfiguration(
@@ -48,8 +48,8 @@ class PowerMaxUtilsTest(test.TestCase):
san_password='smc', san_api_port=8443,
powermax_port_groups=[self.data.port_group_name_i],
replication_device=self.replication_device)
- rest.PowerMaxRest._establish_rest_session = mock.Mock(
- return_value=tpfo.FakeRequestsSession())
+ self.mock_object(rest.PowerMaxRest, '_establish_rest_session',
+ return_value=tpfo.FakeRequestsSession())
driver = iscsi.PowerMaxISCSIDriver(configuration=configuration)
self.driver = driver
self.common = self.driver.common
@@ -59,17 +59,18 @@ class PowerMaxUtilsTest(test.TestCase):
with mock.patch.object(volume_types, 'get_volume_type_extra_specs',
return_value={'specs'}) as type_mock:
# path 1: volume_type_id not passed in
- self.data.test_volume.volume_type_id = (
- self.data.test_volume_type.id)
- self.utils.get_volumetype_extra_specs(self.data.test_volume)
+ volume = deepcopy(self.data.test_volume)
+ volume.volume_type_id = self.data.test_volume_type.id
+ self.utils.get_volumetype_extra_specs(volume)
type_mock.assert_called_once_with(self.data.test_volume_type.id)
type_mock.reset_mock()
# path 2: volume_type_id passed in
- self.utils.get_volumetype_extra_specs(self.data.test_volume, '123')
+ self.utils.get_volumetype_extra_specs(volume, '123')
type_mock.assert_called_once_with('123')
type_mock.reset_mock()
# path 3: no type_id
- self.utils.get_volumetype_extra_specs(self.data.test_clone_volume)
+ volume = deepcopy(self.data.test_clone_volume)
+ self.utils.get_volumetype_extra_specs(volume)
type_mock.assert_not_called()
def test_get_volumetype_extra_specs_exception(self):
@@ -1265,7 +1266,7 @@ class PowerMaxUtilsTest(test.TestCase):
def test_is_retype_supported(self):
# Volume source type not replicated, target type Metro replicated,
# volume is detached, host-assisted retype supported
- volume = self.data.test_volume
+ volume = deepcopy(self.data.test_volume)
volume.attach_status = 'detached'
src_extra_specs = deepcopy(self.data.extra_specs)
@@ -1620,7 +1621,7 @@ class PowerMaxUtilsTest(test.TestCase):
self.assertEqual(expected_msg, e.msg)
def test_get_migration_delete_extra_specs_replicated(self):
- volume = self.data.test_volume
+ volume = deepcopy(self.data.test_volume)
metadata = deepcopy(self.data.volume_metadata)
metadata[utils.IS_RE_CAMEL] = 'True'
metadata['ReplicationMode'] = utils.REP_SYNC
@@ -1639,7 +1640,7 @@ class PowerMaxUtilsTest(test.TestCase):
self.assertEqual(ref_extra_specs, updated_extra_specs)
def test_get_migration_delete_extra_specs_non_replicated(self):
- volume = self.data.test_volume
+ volume = deepcopy(self.data.test_volume)
volume.metadata = self.data.volume_metadata
extra_specs = deepcopy(self.data.extra_specs)
extra_specs[utils.IS_RE] = True
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py
index db3db2ef4..de4e24a65 100644
--- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py
@@ -52,7 +52,7 @@ CONFIG_MAP = {
'port_id': 'CL1-A',
'host_grp_name': 'HBSD-0123456789abcdef',
'host_mode': 'LINUX/IRIX',
- 'host_wwn': '0123456789abcdef',
+ 'host_wwn': ['0123456789abcdef', '0123456789abcdeg'],
'target_wwn': '1111111123456789',
'user_id': 'user',
'user_pass': 'password',
@@ -64,21 +64,28 @@ CONFIG_MAP = {
# Dummy response for FC zoning device mapping
DEVICE_MAP = {
'fabric_name': {
- 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']],
+ 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn'][0]],
'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}}
DEFAULT_CONNECTOR = {
'host': 'host',
'ip': CONFIG_MAP['my_ip'],
- 'wwpns': [CONFIG_MAP['host_wwn']],
+ 'wwpns': [CONFIG_MAP['host_wwn'][0]],
'multipath': False,
}
-DEFAULT_CONNECTOR_AIX = {
- 'os_type': 'aix',
+DEVICE_MAP_MULTI_WWN = {
+ 'fabric_name': {
+ 'initiator_port_wwn_list': [
+ CONFIG_MAP['host_wwn'][0],
+ CONFIG_MAP['host_wwn'][1]
+ ],
+ 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}}
+
+DEFAULT_CONNECTOR_MULTI_WWN = {
'host': 'host',
'ip': CONFIG_MAP['my_ip'],
- 'wwpns': [CONFIG_MAP['host_wwn']],
+ 'wwpns': [CONFIG_MAP['host_wwn'][0], CONFIG_MAP['host_wwn'][1]],
'multipath': False,
}
@@ -170,7 +177,7 @@ GET_HOST_WWNS_RESULT = {
"data": [
{
"hostGroupNumber": 0,
- "hostWwn": CONFIG_MAP['host_wwn'],
+ "hostWwn": CONFIG_MAP['host_wwn'][0],
},
],
}
@@ -329,10 +336,10 @@ def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
return DEFAULT_CONNECTOR
-def _brick_get_connector_properties_aix(
+def _brick_get_connector_properties_multi_wwn(
multipath=False, enforce_multipath=False):
"""Return a predefined connector object."""
- return DEFAULT_CONNECTOR_AIX
+ return DEFAULT_CONNECTOR_MULTI_WWN
def reduce_retrying_time(func):
@@ -382,6 +389,14 @@ class FakeLookupService():
return DEVICE_MAP
+class FakeLookupServiceMultiWwn():
+ """Dummy FC zoning mapping lookup service class."""
+
+ def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
+ """Return predefined FC zoning mapping."""
+ return DEVICE_MAP_MULTI_WWN
+
+
class FakeResponse():
def __init__(self, status_code, data=None, headers=None):
@@ -449,6 +464,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.configuration.hitachi_copy_speed = 3
self.configuration.hitachi_copy_check_interval = 3
self.configuration.hitachi_async_copy_check_interval = 10
+ self.configuration.hitachi_port_scheduler = False
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
@@ -599,13 +615,16 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
volume_utils, 'brick_get_connector_properties',
- side_effect=_brick_get_connector_properties_aix)
- def test_do_setup_create_hg_aix(
+ side_effect=_brick_get_connector_properties_multi_wwn)
+ def test_do_setup_create_hg_port_scheduler(
self, brick_get_connector_properties, request):
- """Normal case: The host group not exists in AIX."""
+ """Normal case: The host group not exists with port scheduler."""
drv = hbsd_fc.HBSDFCDriver(
configuration=self.configuration)
self._setup_config()
+ self.configuration.hitachi_port_scheduler = True
+ self.configuration.hitachi_zoning_request = True
+ drv.common._lookup_service = FakeLookupServiceMultiWwn()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, NOTFOUND_RESULT),
@@ -613,15 +632,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(8, request.call_count)
- kargs1 = request.call_args_list[6][1]
- self.assertEqual('AIX', kargs1['json']['hostMode'])
+ self.assertEqual(9, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -1180,6 +1198,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
_get_oslo_driver_opts.return_value = []
ret = self.driver.get_driver_options()
actual = (hbsd_common.COMMON_VOLUME_OPTS +
+ hbsd_common.COMMON_PORT_OPTS +
hbsd_rest.REST_VOLUME_OPTS +
hbsd_rest_fc.FC_VOLUME_OPTS)
self.assertEqual(actual, ret)
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py
index dde0ef939..f1ce79a1a 100644
--- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py
@@ -64,14 +64,6 @@ DEFAULT_CONNECTOR = {
'multipath': False,
}
-DEFAULT_CONNECTOR_AIX = {
- 'os_type': 'aix',
- 'host': 'host',
- 'ip': CONFIG_MAP['my_ip'],
- 'initiator': CONFIG_MAP['host_iscsi_name'],
- 'multipath': False,
-}
-
CTXT = cinder_context.get_admin_context()
TEST_VOLUME = []
@@ -274,12 +266,6 @@ def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
return DEFAULT_CONNECTOR
-def _brick_get_connector_properties_aix(
- multipath=False, enforce_multipath=False):
- """Return a predefined connector object."""
- return DEFAULT_CONNECTOR_AIX
-
-
class FakeResponse():
def __init__(self, status_code, data=None, headers=None):
@@ -347,6 +333,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.configuration.hitachi_copy_speed = 3
self.configuration.hitachi_copy_check_interval = 3
self.configuration.hitachi_async_copy_check_interval = 10
+ self.configuration.hitachi_port_scheduler = False
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
@@ -503,39 +490,6 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
- @mock.patch.object(
- volume_utils, 'brick_get_connector_properties',
- side_effect=_brick_get_connector_properties_aix)
- def test_do_setup_create_hg_aix(
- self, brick_get_connector_properties, request):
- """Normal case: The host group not exists in AIX."""
- drv = hbsd_iscsi.HBSDISCSIDriver(
- configuration=self.configuration)
- self._setup_config()
- request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
- FakeResponse(200, GET_PORTS_RESULT),
- FakeResponse(200, GET_PORT_RESULT),
- FakeResponse(200, NOTFOUND_RESULT),
- FakeResponse(200, NOTFOUND_RESULT),
- FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
- FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
- FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
- drv.do_setup(None)
- self.assertEqual(
- {CONFIG_MAP['port_id']:
- '%(ip)s:%(port)s' % {
- 'ip': CONFIG_MAP['ipv4Address'],
- 'port': CONFIG_MAP['tcpPort']}},
- drv.common.storage_info['portals'])
- self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(8, request.call_count)
- kargs1 = request.call_args_list[6][1]
- self.assertEqual('AIX', kargs1['json']['hostMode'])
- # stop the Loopingcall within the do_setup treatment
- self.driver.common.client.keep_session_loop.stop()
- self.driver.common.client.keep_session_loop.wait()
-
- @mock.patch.object(requests.Session, "request")
def test_extend_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
index bf48ce037..d19824208 100644
--- a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
+++ b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
@@ -26,6 +26,7 @@ from oslo_utils import uuidutils
from cinder import context
from cinder import exception
from cinder.objects import fields
+from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.hpe \
@@ -320,6 +321,14 @@ class HPE3PARBaseDriver(test.TestCase):
'display_description': 'description',
'volume_name': 'name'}
+ snapshot_obj = fake_snapshot.fake_snapshot_obj(
+ context.get_admin_context(),
+ name=SNAPSHOT_NAME,
+ id=SNAPSHOT_ID,
+ display_name='Foo Snapshot',
+ volume_size=2,
+ volume_id=VOLUME_ID_SNAP)
+
wwn = ["123456789012345", "123456789054321"]
connector = {'ip': '10.0.0.2',
@@ -4844,6 +4853,113 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
self.driver.unmanage_snapshot,
snapshot=snapshot)
+ def _test_get_manageable(self, cinder_list, expected_output, vol_name,
+ attached=False, snap_name=None):
+ # common test function for:
+ # [a] get_manageable_volumes
+ # [b] get_manageable_snapshots
+
+ mock_client = self.setup_driver()
+
+ mock_client.getVolumes.return_value = {
+ 'members': [
+ {'name': vol_name,
+ 'sizeMiB': 2048,
+ 'userCPG': 'OpenStackCPG'}]}
+
+ if attached:
+ mock_client.getVLUN.return_value = {
+ 'hostname': 'cssosbe02-b04',
+ }
+ else:
+ mock_client.getVLUN.side_effect = hpeexceptions.HTTPNotFound
+
+ if snap_name:
+ mock_client.getSnapshotsOfVolume.return_value = [snap_name]
+
+ with mock.patch.object(hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+
+ common = self.driver._login()
+ if snap_name:
+ actual_output = common.get_manageable_snapshots(
+ cinder_list, None, 1000, 0, ['size'], ['asc'])
+ else:
+ actual_output = self.driver.get_manageable_volumes(
+ cinder_list, None, 1000, 0, ['size'], ['asc'])
+
+ expected_calls = []
+ expected_calls.append(mock.call.getVolumes())
+ if attached:
+ expected_calls.append(mock.call.getVLUN(vol_name))
+ if snap_name:
+ expected_calls.append(
+ mock.call.getSnapshotsOfVolume('OpenStackCPG', vol_name))
+
+ mock_client.assert_has_calls(expected_calls)
+ self.assertEqual(expected_output, actual_output)
+
+ # (i) volume already managed
+ # (ii) volume currently not managed; but attached to some other host
+ # (iii) volume currently not managed
+ @ddt.data({'cinder_vol': [HPE3PARBaseDriver.volume],
+ 'vol_name': 'osv-0DM4qZEVSKON-DXN-NwVpw',
+ 'safe': False,
+ 'reason': 'Volume already managed',
+ 'cinder_id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'},
+ {'cinder_vol': [],
+ 'vol_name': 'volume_2',
+ 'safe': False,
+ 'reason': 'Volume attached to host cssosbe02-b04',
+ 'cinder_id': None,
+ 'attached': True},
+ {'cinder_vol': [],
+ 'vol_name': 'volume_2',
+ 'safe': True,
+ 'reason': None,
+ 'cinder_id': None})
+ @ddt.unpack
+ def test_get_manageable_volumes(self, cinder_vol, vol_name, safe, reason,
+ cinder_id, attached=False):
+ expected_output = [
+ {'reference': {'name': vol_name},
+ 'size': 2,
+ 'safe_to_manage': safe,
+ 'reason_not_safe': reason,
+ 'cinder_id': cinder_id}
+ ]
+ self._test_get_manageable(cinder_vol, expected_output, vol_name,
+ attached)
+
+ # (i) snapshot already managed
+ # (ii) snapshot currently not managed
+ @ddt.data({'cinder_snapshot': [HPE3PARBaseDriver.snapshot_obj],
+ 'snap_name': 'oss-L4I73ONuTci9Fd4ceij-MQ',
+ 'vol_name': 'osv-CX7Ilh.dQ2.XdNpmqW408A',
+ 'safe': False,
+ 'reason': 'Snapshot already managed',
+ 'cinder_id': '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'},
+ {'cinder_snapshot': [],
+ 'snap_name': 'snap_2',
+ 'vol_name': 'volume_2',
+ 'safe': True,
+ 'reason': None,
+ 'cinder_id': None})
+ @ddt.unpack
+ def test_get_manageable_snapshots(self, cinder_snapshot, snap_name,
+ vol_name, safe, reason, cinder_id):
+ expected_output = [
+ {'reference': {'name': snap_name},
+ 'size': 2,
+ 'safe_to_manage': safe,
+ 'reason_not_safe': reason,
+ 'cinder_id': cinder_id,
+ 'source_reference': {'name': vol_name}}
+ ]
+ self._test_get_manageable(cinder_snapshot, expected_output, vol_name,
+ False, snap_name)
+
@ddt.data(True, False)
def test__safe_hostname(self, in_shared):
config = self._set_unique_fqdn_override(True, in_shared)
@@ -5795,7 +5911,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
typ_info.return_value = type_info
source_volume = self.volume_src_cg
- volume = self.volume_tiramisu
+ volume = self.volume_tiramisu.copy()
volume['source_volid'] = source_volume['id']
common = hpecommon.HPE3PARCommon(None)
vol_name = common._get_3par_vol_name(volume.get('id'))
@@ -7161,8 +7277,8 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
mock_create_client.return_value = mock_client
mock_replication_client.return_value = mock_replicated_client
- volume = self.volume
- volume['replication_status'] = 'enabled'
+ volume = copy.deepcopy(self.volume)
+ volume.replication_status = 'enabled'
result = self.driver.initialize_connection(
volume,
@@ -7521,8 +7637,8 @@ class TestHPE3PARFCDriver(HPE3PARBaseDriver):
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getPorts()]
- volume = self.volume
- volume['replication_status'] = 'enabled'
+ volume = copy.deepcopy(self.volume)
+ volume.replication_status = 'enabled'
with mock.patch.object(
hpecommon.HPE3PARCommon,
@@ -8484,8 +8600,8 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
- volume = self.volume
- volume['replication_status'] = 'disabled'
+ volume = copy.deepcopy(self.volume)
+ volume.replication_status = 'disabled'
result = self.driver.initialize_connection(
volume,
self.connector_multipath_enabled)
@@ -8546,8 +8662,8 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
- volume = self.volume
- volume['replication_status'] = 'disabled'
+ volume = copy.deepcopy(self.volume)
+ volume.replication_status = 'disabled'
result = self.driver.initialize_connection(
volume,
self.connector_multipath_enabled)
@@ -8620,9 +8736,10 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
expected +
self.standard_logout)
- expected_properties = self.properties
+ expected_properties = self.properties.copy()
+ expected_properties['data'] = self.properties['data'].copy()
expected_properties['data']['encrypted'] = True
- self.assertDictEqual(self.properties, result)
+ self.assertDictEqual(expected_properties, result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_initialize_connection_peer_persistence(self, _mock_volume_types):
@@ -8697,8 +8814,8 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
mock_create_client.return_value = mock_client
mock_replication_client.return_value = mock_replicated_client
- volume = self.volume
- volume['replication_status'] = 'enabled'
+ volume = copy.deepcopy(self.volume)
+ volume.replication_status = 'enabled'
result = self.driver.initialize_connection(
volume,
@@ -10549,8 +10666,8 @@ class TestHPE3PARISCSIDriver(HPE3PARBaseDriver):
}]
}
- volume = self.volume
- volume['replication_status'] = 'enabled'
+ volume = copy.deepcopy(self.volume)
+ volume.replication_status = 'enabled'
with mock.patch.object(
hpecommon.HPE3PARCommon,
diff --git a/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py b/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py
index ae355b647..7f9ccb0a1 100644
--- a/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py
+++ b/cinder/tests/unit/volume/drivers/ibm/test_xiv_proxy.py
@@ -2610,7 +2610,7 @@ class XIVProxyTest(test.TestCase):
test_mock.cinder.exception,
driver)
- xiv_replication.VolumeReplication = mock.MagicMock()
+ self.mock_object(xiv_replication, 'VolumeReplication')
grp = testutils.create_group(self.ctxt, name='bla', group_type_id='1')
volume = testutils.create_volume(self.ctxt, display_name='bla',
volume_type_id=self.vt['id'])
diff --git a/cinder/tests/unit/volume/drivers/inspur/as13000/test_as13000_driver.py b/cinder/tests/unit/volume/drivers/inspur/as13000/test_as13000_driver.py
index 5c96cd287..90cd4ed8e 100644
--- a/cinder/tests/unit/volume/drivers/inspur/as13000/test_as13000_driver.py
+++ b/cinder/tests/unit/volume/drivers/inspur/as13000/test_as13000_driver.py
@@ -685,8 +685,10 @@ class AS13000DriverTestCase(test.TestCase):
self.as13000_san.delete_snapshot, snapshot)
mock_cv.assert_called_once_with(snapshot.volume)
- @ddt.data((time.time() - 3000), (time.time() - 4000))
- def test__update_volume_stats(self, time_token):
+ @mock.patch('time.time')
+ @ddt.data(2000, 1000)
+ def test__update_volume_stats(self, time_token, mock_time):
+ mock_time.return_value = 5000
self.as13000_san.VENDOR = 'INSPUR'
self.as13000_san.VERSION = 'V1.3.1'
self.as13000_san.PROTOCOL = 'iSCSI'
@@ -705,7 +707,7 @@ class AS13000DriverTestCase(test.TestCase):
self.as13000_san._update_volume_stats()
backend_data = {'driver_version': 'V1.3.1',
- 'pools': [{'pool_name': 'fake_pool'}],
+ 'pools': fake_pool_backend,
'storage_protocol': 'iSCSI',
'vendor_name': 'INSPUR',
'volume_backend_name': 'fake_backend_name'}
diff --git a/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py b/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py
index 352aabb58..35fddb1bb 100644
--- a/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py
+++ b/cinder/tests/unit/volume/drivers/lightos/test_lightos_storage.py
@@ -44,6 +44,17 @@ FAKE_LIGHTOS_CLUSTER_NODES: Dict[str, List] = {
"nvmeEndpoint": "192.168.75.12:4420"}
]
}
+IPV6_LIST = ['::192:168:75:10', '::192:168:75:11', '::192:168:75:12']
+FAKE_LIGHTOS_CLUSTER_NODES_IPV6: Dict[str, List] = {
+ "nodes": [
+ {"UUID": "926e6df8-73e1-11ec-a624-000000000001",
+ "nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[0])},
+ {"UUID": "926e6df8-73e1-11ec-a624-000000000002",
+ "nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[1])},
+ {"UUID": "926e6df8-73e1-11ec-a624-000000000003",
+ "nvmeEndpoint": "[{}]:4420".format(IPV6_LIST[2])}
+ ]
+}
FAKE_LIGHTOS_CLUSTER_INFO: Dict[str, str] = {
'UUID': "926e6df8-73e1-11ec-a624-07ba3880f6cc",
@@ -399,7 +410,7 @@ class LightOSStorageVolumeDriverTest(test.TestCase):
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
- def test_create_volume_in_failed_state(self):
+ def _create_volume_in_failed_state(self, vol_state):
"""Verify scenario of created volume in failed state:
Driver is expected to issue a deletion command and raise exception
@@ -415,25 +426,16 @@ class LightOSStorageVolumeDriverTest(test.TestCase):
"compression": kwargs["compression"],
"src_snapshot_name": kwargs["src_snapshot_name"],
"acl": {'values': kwargs.get('acl')},
- "state": "Failed",
+ "state": vol_state,
}
volume["ETag"] = get_vol_etag(volume)
code, new_vol = self.db.create_volume(volume)
return (code, new_vol)
- elif cmd == "delete_volume":
- return self.db.delete_volume(kwargs["project_name"],
- kwargs["volume_uuid"])
- elif cmd == "get_volume":
- return self.db.get_volume_by_uuid(kwargs["project_name"],
- kwargs["volume_uuid"])
- elif cmd == "get_volume_by_name":
- return self.db.get_volume_by_name(kwargs["project_name"],
- kwargs["volume_name"])
else:
- raise RuntimeError(
- f"'{cmd}' is not implemented. kwargs: {kwargs}")
+ return cluster_send_cmd(cmd, **kwargs)
self.driver.do_setup(None)
+ cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
self.driver.cluster.send_cmd = send_cmd_mock
vol_type = test_utils.create_volume_type(self.ctxt, self,
name='my_vol_type')
@@ -446,6 +448,48 @@ class LightOSStorageVolumeDriverTest(test.TestCase):
self.assertEqual(0, len(actual_volumes))
db.volume_destroy(self.ctxt, volume.id)
+ def test_create_volume_in_failed_state(self):
+ self._create_volume_in_failed_state("Failed")
+
+ def test_create_volume_in_rollback_state(self):
+ self._create_volume_in_failed_state("Rollback")
+
+ def test_create_volume_in_migrating_state_succeed(self):
+ """Verify scenario of created volume in migrating state:
+
+ Driver is expected to succeed.
+ """
+ def send_cmd_mock(cmd, **kwargs):
+ if cmd == "create_volume":
+ project_name = kwargs["project_name"]
+ volume = {
+ "project_name": project_name,
+ "name": kwargs["name"],
+ "size": kwargs["size"],
+ "n_replicas": kwargs["n_replicas"],
+ "compression": kwargs["compression"],
+ "src_snapshot_name": kwargs["src_snapshot_name"],
+ "acl": {'values': kwargs.get('acl')},
+ "state": "Migrating",
+ }
+ volume["ETag"] = get_vol_etag(volume)
+ code, new_vol = self.db.create_volume(volume)
+ return (code, new_vol)
+ else:
+ return cluster_send_cmd(cmd, **kwargs)
+ self.driver.do_setup(None)
+ cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
+ self.driver.cluster.send_cmd = send_cmd_mock
+ vol_type = test_utils.create_volume_type(self.ctxt, self,
+ name='my_vol_type')
+ volume = test_utils.create_volume(self.ctxt, size=4,
+ volume_type_id=vol_type.id)
+ self.driver.create_volume(volume)
+ proj = self.db.data["projects"][lightos.LIGHTOS_DEFAULT_PROJECT_NAME]
+ actual_volumes = proj["volumes"]
+ self.assertEqual(1, len(actual_volumes))
+ db.volume_destroy(self.ctxt, volume.id)
+
def test_delete_volume_fail_if_not_created(self):
"""Test that lightos_client fail creating an already exists volume."""
self.driver.do_setup(None)
@@ -617,6 +661,82 @@ class LightOSStorageVolumeDriverTest(test.TestCase):
self.driver.delete_volume(volume)
db.volume_destroy(self.ctxt, volume.id)
+ def test_initialize_connection_mirgrating_volume(self):
+ InitialConnectorMock.nqn = "hostnqn1"
+ InitialConnectorMock.found_discovery_client = True
+
+ def send_cmd_mock(cmd, **kwargs):
+ if cmd == "create_volume":
+ project_name = kwargs["project_name"]
+ volume = {
+ "project_name": project_name,
+ "name": kwargs["name"],
+ "size": kwargs["size"],
+ "n_replicas": kwargs["n_replicas"],
+ "compression": kwargs["compression"],
+ "src_snapshot_name": kwargs["src_snapshot_name"],
+ "acl": {'values': kwargs.get('acl')},
+ "state": "Migrating",
+ }
+ volume["ETag"] = get_vol_etag(volume)
+ code, new_vol = self.db.create_volume(volume)
+ return (code, new_vol)
+ else:
+ return cluster_send_cmd(cmd, **kwargs)
+ self.driver.do_setup(None)
+ cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
+ self.driver.cluster.send_cmd = send_cmd_mock
+ vol_type = test_utils.create_volume_type(self.ctxt, self,
+ name='my_vol_type')
+ volume = test_utils.create_volume(self.ctxt, size=4,
+ volume_type_id=vol_type.id)
+ self.driver.create_volume(volume)
+ connection_props = (
+ self.driver.initialize_connection(volume,
+ get_connector_properties()))
+ self.assertIn('driver_volume_type', connection_props)
+ self.assertEqual('lightos', connection_props['driver_volume_type'])
+ self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'],
+ connection_props['data']['subsysnqn'])
+ self.assertEqual(
+ self.db.data['projects']['default']['volumes'][0]['UUID'],
+ connection_props['data']['uuid'])
+
+ self.driver.delete_volume(volume)
+ db.volume_destroy(self.ctxt, volume.id)
+
+ def test_initialize_connection_ipv6(self):
+ def side_effect(cmd, timeout, **kwargs):
+ if cmd == "get_nodes":
+ return (httpstatus.OK, FAKE_LIGHTOS_CLUSTER_NODES_IPV6)
+ else:
+ return cluster_send_cmd(cmd, timeout, **kwargs)
+ cluster_send_cmd = deepcopy(self.driver.cluster.send_cmd)
+ self.driver.cluster.send_cmd = side_effect
+ InitialConnectorMock.nqn = "hostnqn1"
+ InitialConnectorMock.found_discovery_client = True
+ self.driver.do_setup(None)
+ vol_type = test_utils.create_volume_type(self.ctxt, self,
+ name='my_vol_type')
+ volume = test_utils.create_volume(self.ctxt, size=4,
+ volume_type_id=vol_type.id)
+ self.driver.create_volume(volume)
+ connection_props = (
+ self.driver.initialize_connection(volume,
+ get_connector_properties()))
+ self.assertIn('driver_volume_type', connection_props)
+ self.assertEqual('lightos', connection_props['driver_volume_type'])
+ self.assertEqual(FAKE_LIGHTOS_CLUSTER_INFO['subsystemNQN'],
+ connection_props['data']['subsysnqn'])
+ self.assertEqual(
+ self.db.data['projects']['default']['volumes'][0]['UUID'],
+ connection_props['data']['uuid'])
+ for connection in connection_props['data']['lightos_nodes']:
+ self.assertIn(connection, IPV6_LIST)
+
+ self.driver.delete_volume(volume)
+ db.volume_destroy(self.ctxt, volume.id)
+
def test_initialize_connection_no_hostnqn_should_fail(self):
InitialConnectorMock.nqn = ""
InitialConnectorMock.found_discovery_client = True
@@ -710,6 +830,18 @@ class LightOSStorageVolumeDriverTest(test.TestCase):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.check_for_setup_error)
+ def test_check_ip_format(self):
+ InitialConnectorMock.nqn = ""
+ InitialConnectorMock.found_discovery_client = True
+ self.driver.do_setup(None)
+ host = "1.1.1.1"
+ port = 8009
+ endpoint = self.driver.cluster._format_endpoint(host, port)
+ self.assertEqual("1.1.1.1:8009", endpoint)
+ host = "::1111"
+ endpoint = self.driver.cluster._format_endpoint(host, port)
+ self.assertEqual("[::1111]:8009", endpoint)
+
def test_check_for_setup_error_no_dsc_should_succeed(self):
InitialConnectorMock.nqn = "hostnqn1"
InitialConnectorMock.found_discovery_client = False
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py
index 19c53674f..8f6a5ad8c 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/fakes.py
@@ -76,6 +76,7 @@ LUN_NAME = 'fake-lun-name'
DEST_LUN_NAME = 'new-fake-lun-name'
FILE_NAME = 'fake-file-name'
DEST_FILE_NAME = 'new-fake-file-name'
+FAKE_UUID = 'b32bab78-82be-11ec-a8a3-0242ac120002'
FAKE_QUERY = {'volume-attributes': None}
@@ -646,6 +647,7 @@ AGGR_INFO_SSC = {
AGGR_SIZE_TOTAL = 107374182400
AGGR_SIZE_AVAILABLE = 59055800320
AGGR_USED_PERCENT = 45
+AGGR_SIZE_USED = 58888957952
AGGR_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
@@ -720,7 +722,6 @@ VOLUME_GET_ITER_CAPACITY_RESPONSE = etree.XML("""
'volume': VOLUME_GET_ITER_CAPACITY_ATTR_STR,
})
-
VOLUME_GET_ITER_STYLE_RESPONSE = etree.XML("""
<results status="passed">
<num-records>3</num-records>
@@ -826,7 +827,7 @@ VOLUME_GET_ITER_SSC_RESPONSE_STR = """
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
- <language-code>en_US</language-code>
+ <language-code>c.utf_8</language-code>
</volume-language-attributes>
</volume-attributes>
""" % {
@@ -876,7 +877,7 @@ VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP = """
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
- <language-code>en_US</language-code>
+ <language-code>c.utf_8</language-code>
</volume-language-attributes>
</volume-attributes>
""" % {
@@ -903,7 +904,7 @@ VOLUME_INFO_SSC = {
'junction-path': '/%s' % VOLUME_NAMES[0],
'aggregate': VOLUME_AGGREGATE_NAMES[0],
'space-guarantee-enabled': True,
- 'language': 'en_US',
+ 'language': 'c.utf_8',
'percentage-snapshot-reserve': '5',
'snapshot-policy': 'default',
'type': 'rw',
@@ -919,7 +920,7 @@ VOLUME_INFO_SSC_FLEXGROUP = {
'junction-path': '/%s' % VOLUME_NAMES[0],
'aggregate': [VOLUME_AGGREGATE_NAMES[0]],
'space-guarantee-enabled': True,
- 'language': 'en_US',
+ 'language': 'c.utf_8',
'percentage-snapshot-reserve': '5',
'snapshot-policy': 'default',
'type': 'rw',
@@ -1019,7 +1020,7 @@ VOLUME_GET_ITER_ENCRYPTION_SSC_RESPONSE = etree.XML("""
<snapshot-policy>default</snapshot-policy>
</volume-snapshot-attributes>
<volume-language-attributes>
- <language-code>en_US</language-code>
+ <language-code>c.utf_8</language-code>
</volume-language-attributes>
</volume-attributes>
</attributes-list>
@@ -1349,6 +1350,8 @@ SM_SOURCE_VSERVER = 'fake_source_vserver'
SM_SOURCE_VOLUME = 'fake_source_volume'
SM_DEST_VSERVER = 'fake_destination_vserver'
SM_DEST_VOLUME = 'fake_destination_volume'
+IGROUP_NAME = 'openstack-d9b4194f-5f65-4952-fake-26c911f1e4b2'
+LUN_NAME_PATH = '/vol/volume-fake/lun-path-fake-1234'
CLUSTER_PEER_GET_ITER_RESPONSE = etree.XML("""
<results status="passed">
@@ -1581,3 +1584,1477 @@ GET_FILE_COPY_STATUS_RESPONSE = etree.XML("""
DESTROY_FILE_COPY_RESPONSE = etree.XML("""
<results status="passed" />
""")
+
+VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST = {
+ "uuid": "2407b637-119c-11ec-a4fb",
+ "language": "c.utf_8",
+ "name": VOLUME_NAMES[0],
+ "style": "flexgroup",
+ "is_svm_root": False,
+ "type": "rw",
+ "aggregates": [
+ {
+ "name": VOLUME_AGGREGATE_NAMES[0]
+ }
+ ],
+ "error_state": {
+ "is_inconsistent": False
+ },
+ "nas": {
+ "path": '/' + VOLUME_NAMES[0]
+ },
+ "snapshot_policy": {
+ "name": "default",
+ "uuid": "e7b0f455-fc15-11ea-b64a"
+ },
+ "svm": {
+ "name": VOLUME_VSERVER_NAME
+ },
+ "space": {
+ "size": 12345,
+ "snapshot": {
+ "reserve_percent": 5
+ }
+ },
+ "qos": {
+ "policy": {
+ "name": "fake_qos_policy_group_name"
+ }
+ },
+ "guarantee": {
+ "type": "none",
+ "honored": True
+ },
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb"
+ }
+ }
+}
+
+VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP_REST = {
+ "records": [
+ VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST,
+ ],
+ "num_records": 1,
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes"
+ }
+ }
+}
+
+VOLUME_GET_ITER_SSC_RESPONSE_STR_REST = {
+ "uuid": "2407b637-119c-11ec-a4fb",
+ "language": "c.utf_8",
+ "name": VOLUME_NAMES[0],
+ "style": "flexvol",
+ "is_svm_root": False,
+ "type": "rw",
+ "aggregates": [
+ {
+ "name": VOLUME_AGGREGATE_NAMES[0]
+ }
+ ],
+ "error_state": {
+ "is_inconsistent": False
+ },
+ "nas": {
+ "path": '/' + VOLUME_NAMES[0]
+ },
+ "snapshot_policy": {
+ "name": "default",
+ "uuid": "e7b0f455-fc15-11ea-b64a"
+ },
+ "svm": {
+ "name": VOLUME_VSERVER_NAME
+ },
+ "space": {
+ "size": 12345,
+ "snapshot": {
+ "reserve_percent": 5
+ }
+ },
+ "qos": {
+ "policy": {
+ "name": "fake_qos_policy_group_name"
+ }
+ },
+ "guarantee": {
+ "type": "none",
+ "honored": True
+ },
+ "efficiency": {
+ "compression": "none",
+ "dedupe": "none",
+ "cross_volume_dedupe": "none",
+ "compaction": "none",
+ "schedule": "-",
+ "volume_path": "/vol/" + VOLUME_NAMES[0],
+ "state": "disabled",
+ "policy": {
+ "name": "-"
+ }
+ },
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb"
+ }
+ }
+}
+
+VOLUME_GET_ITER_SSC_RESPONSE_REST = {
+ "records": [
+ VOLUME_GET_ITER_SSC_RESPONSE_STR_REST,
+ ],
+ "num_records": 1,
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes"
+ }
+ }
+}
+
+VOLUME_GET_ITER_RESPONSE_LIST_REST = [
+ {
+ "uuid": "2407b637-119c-11ec-a4fb-00a0b89c9a78",
+ "name": VOLUME_NAMES[0],
+ "state": "online",
+ "style": "flexvol",
+ "is_svm_root": False,
+ "type": "rw",
+ "error_state": {
+ "is_inconsistent": False
+ },
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb"
+ }
+ }
+ },
+ {
+ "uuid": "2c190609-d51c-11eb-b83a",
+ "name": VOLUME_NAMES[1],
+ "state": "online",
+ "style": "flexvol",
+ "is_svm_root": False,
+ "type": "rw",
+ "error_state": {
+ "is_inconsistent": False
+ },
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes/2c190609-d51c-11eb-b83a"
+ }
+ }
+ }
+]
+
+VOLUME_GET_ITER_LIST_RESPONSE_REST = {
+ "records": [
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[1],
+ ],
+ "num_records": 2,
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes"
+ }
+ }
+}
+
+VOLUME_ITEM_SIMPLE_RESPONSE_REST = {
+ "uuid": "2407b637-119c-11ec-a4fb-00a0b89c9a78",
+ "name": VOLUME_NAMES[0],
+ "style": 'flexvol',
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes/2407b637-119c-11ec-a4fb-00a0b89c9a78"
+ }
+ }
+}
+
+VOLUME_LIST_SIMPLE_RESPONSE_REST = {
+ "records": [
+ VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ ],
+ "num_records": 1,
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes"
+ }
+ }
+}
+
+NO_RECORDS_RESPONSE_REST = {
+ "records": [],
+ "num_records": 0,
+}
+
+VOLUME_GET_ITER_RESPONSE_REST_PAGE = {
+ "records": [
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ ],
+ "num_records": 10,
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes?fields=name&max_records=2"
+ },
+ "next": {
+ "href": "/api/storage/volumes?"
+ f"start.uuid={VOLUME_GET_ITER_RESPONSE_LIST_REST[0]['uuid']}"
+ "&fields=name&max_records=2"
+ }
+ }
+}
+
+VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE = {
+ "records": [
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ VOLUME_GET_ITER_RESPONSE_LIST_REST[0],
+ ],
+ "num_records": 8,
+}
+
+INVALID_GET_ITER_RESPONSE_NO_RECORDS_REST = {
+ "num_records": 1,
+}
+
+INVALID_GET_ITER_RESPONSE_NO_NUM_RECORDS_REST = {
+ "records": [],
+}
+
+VOLUME_GET_ITER_STYLE_RESPONSE_REST = {
+ "records": [
+ {
+ "style": "flexgroup",
+ },
+ ],
+ "num_records": 1,
+}
+
+VOLUME_FLEXGROUP_STYLE_REST = \
+ VOLUME_GET_ITER_STYLE_RESPONSE_REST["records"][0]
+
+VOLUME_GET_ITER_SAME_STYLE_RESPONSE_REST = {
+ "records": [
+ {
+ "style": "flexvol",
+ },
+ {
+ "style": "flexvol",
+ },
+ {
+ "style": "flexvol",
+ },
+ ],
+ "num_records": 3,
+}
+
+GET_NUM_RECORDS_RESPONSE_REST = {
+ "num_records": 1,
+}
+
+AGGR_GET_ITER_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": "6aad2b76-a069-47e9-93ee-e501ebf2cdd2",
+ "name": VOLUME_AGGREGATE_NAMES[1],
+ "node": {
+ "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9",
+ "name": NODE_NAME
+ },
+ "home_node": {
+ "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9",
+ "name": NODE_NAME
+ },
+ "snapshot": {
+ "files_total": 0,
+ "files_used": 0,
+ "max_files_available": 0,
+ "max_files_used": 0
+ },
+ "space": {
+ "footprint": 58491584512,
+ "footprint_percent": 4,
+ "block_storage": {
+ "size": AGGR_SIZE_TOTAL,
+ "available": AGGR_SIZE_AVAILABLE,
+ "used": AGGR_SIZE_USED,
+ "inactive_user_data": 0,
+ "inactive_user_data_percent": 0,
+ "full_threshold_percent": 98,
+ "physical_used": 7706808320,
+ "physical_used_percent": 1,
+ "aggregate_metadata": 397373440,
+ "aggregate_metadata_percent": 0,
+ "used_including_snapshot_reserve": 58888957952,
+ "used_including_snapshot_reserve_percent": 4,
+ "data_compacted_count": 0,
+ "data_compaction_space_saved": 0,
+ "data_compaction_space_saved_percent": 0,
+ "volume_deduplication_shared_count": 0,
+ "volume_deduplication_space_saved": 0,
+ "volume_deduplication_space_saved_percent": 0
+ },
+ "snapshot": {
+ "used_percent": 0,
+ "available": 0,
+ "total": 0,
+ "used": 0,
+ "reserve_percent": 0
+ },
+ "cloud_storage": {
+ "used": 0
+ },
+ "efficiency": {
+ "savings": 0,
+ "ratio": 1,
+ "logical_used": 117510144
+ },
+ "efficiency_without_snapshots": {
+ "savings": 0,
+ "ratio": 1,
+ "logical_used": 9617408
+ },
+ "efficiency_without_snapshots_flexclones": {
+ "savings": 0,
+ "ratio": 1,
+ "logical_used": 9617408
+ }
+ },
+ "state": "online",
+ "snaplock_type": "non_snaplock",
+ "create_time": "2020-09-21T14:45:11+00:00",
+ "data_encryption": {
+ "software_encryption_enabled": False,
+ "drive_protection_enabled": False
+ },
+ "block_storage": {
+ "primary": {
+ "disk_count": 1,
+ "disk_class": "virtual",
+ "raid_type": "raid0",
+ "raid_size": 8,
+ "checksum_style": "advanced_zoned",
+ "disk_type": "vm_disk"
+ },
+ "hybrid_cache": {
+ "enabled": False
+ },
+ "mirror": {
+ "enabled": False,
+ "state": "unmirrored"
+ },
+ "plexes": [
+ {
+ "name": "plex0"
+ }
+ ],
+ "storage_type": "hdd"
+ },
+ "cloud_storage": {
+ "attach_eligible": True
+ },
+ "inactive_data_reporting": {
+ "enabled": False
+ },
+ "metric": {
+ "timestamp": "2021-12-21T13:25:15Z",
+ "duration": "PT15S",
+ "status": "ok",
+ "throughput": {
+ "read": 0,
+ "write": 13107,
+ "other": 0,
+ "total": 13107
+ },
+ "latency": {
+ "read": 0,
+ "write": 2659,
+ "other": 0,
+ "total": 2659
+ },
+ "iops": {
+ "read": 0,
+ "write": 0,
+ "other": 0,
+ "total": 0
+ }
+ },
+ "statistics": {
+ "timestamp": "2021-12-21T13:25:21Z",
+ "status": "ok",
+ "throughput_raw": {
+ "read": 3699994624,
+ "write": 111813349376,
+ "other": 0,
+ "total": 115513344000
+ },
+ "latency_raw": {
+ "read": 1884163936,
+ "write": 9308463160,
+ "other": 0,
+ "total": 11192627096
+ },
+ "iops_raw": {
+ "read": 242498,
+ "write": 4871034,
+ "other": 0,
+ "total": 5113532
+ }
+ }
+ },
+ {
+ "uuid": "ad20dafb-1dcb-483a-b457-012ae9225062",
+ "name": VOLUME_AGGREGATE_NAMES[0],
+ "node": {
+ "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9",
+ "name": NODE_NAME
+ },
+ "home_node": {
+ "uuid": "2ac8f13a-fc16-11ea-8799-52540006bba9",
+ "name": NODE_NAME
+ },
+ "snapshot": {
+ "files_total": 0,
+ "files_used": 0,
+ "max_files_available": 0,
+ "max_files_used": 0
+ },
+ "space": {
+ "footprint": 172316893184,
+ "footprint_percent": 14,
+ "block_storage": {
+ "size": 1271819509760,
+ "available": 1099709939712,
+ "used": 172109570048,
+ "inactive_user_data": 0,
+ "inactive_user_data_percent": 0,
+ "full_threshold_percent": 98,
+ "physical_used": 27038863360,
+ "physical_used_percent": 2,
+ "aggregate_metadata": 0,
+ "aggregate_metadata_percent": 0,
+ "used_including_snapshot_reserve": 172109570048,
+ "used_including_snapshot_reserve_percent": 14,
+ "data_compacted_count": 0,
+ "data_compaction_space_saved": 0,
+ "data_compaction_space_saved_percent": 0,
+ "volume_deduplication_shared_count": 0,
+ "volume_deduplication_space_saved": 0,
+ "volume_deduplication_space_saved_percent": 0
+ },
+ "snapshot": {
+ "used_percent": 0,
+ "available": 0,
+ "total": 0,
+ "used": 0,
+ "reserve_percent": 0
+ },
+ "cloud_storage": {
+ "used": 0
+ },
+ "efficiency": {
+ "savings": 74937720832,
+ "ratio": 9.238858947247071,
+ "logical_used": 84033363968
+ },
+ "efficiency_without_snapshots": {
+ "savings": 0,
+ "ratio": 1,
+ "logical_used": 7005036544
+ },
+ "efficiency_without_snapshots_flexclones": {
+ "savings": 0,
+ "ratio": 1,
+ "logical_used": 7005036544
+ }
+ },
+ "state": "online",
+ "snaplock_type": "non_snaplock",
+ "create_time": "2020-09-21T14:44:51+00:00",
+ "data_encryption": {
+ "software_encryption_enabled": False,
+ "drive_protection_enabled": False
+ },
+ "block_storage": {
+ "primary": {
+ "disk_count": 1,
+ "disk_class": "virtual",
+ "raid_type": "raid0",
+ "raid_size": 8,
+ "checksum_style": "advanced_zoned",
+ "disk_type": "vm_disk"
+ },
+ "hybrid_cache": {
+ "enabled": False
+ },
+ "mirror": {
+ "enabled": False,
+ "state": "unmirrored"
+ },
+ "plexes": [
+ {
+ "name": "plex0"
+ }
+ ],
+ "storage_type": "hdd"
+ },
+ "cloud_storage": {
+ "attach_eligible": True
+ },
+ "inactive_data_reporting": {
+ "enabled": False
+ },
+ "metric": {
+ "timestamp": "2021-12-21T13:25:15Z",
+ "duration": "PT15S",
+ "status": "ok",
+ "throughput": {
+ "read": 0,
+ "write": 27033,
+ "other": 0,
+ "total": 27033
+ },
+ "latency": {
+ "read": 0,
+ "write": 1173,
+ "other": 0,
+ "total": 1173
+ },
+ "iops": {
+ "read": 0,
+ "write": 0,
+ "other": 0,
+ "total": 0
+ }
+ },
+ "statistics": {
+ "timestamp": "2021-12-21T13:25:21Z",
+ "status": "ok",
+ "throughput_raw": {
+ "read": 5740912640,
+ "write": 132358234112,
+ "other": 0,
+ "total": 138099146752
+ },
+ "latency_raw": {
+ "read": 15095876198,
+ "write": 12140289450,
+ "other": 0,
+ "total": 27236165648
+ },
+ "iops_raw": {
+ "read": 535930,
+ "write": 6011240,
+ "other": 0,
+ "total": 6547170
+ }
+ }
+ }
+ ],
+ "num_records": 2
+}
+
+LUN_GET_ITER_REST = {
+ "records": [
+ {
+ "uuid": "bd6baab3-4842-45b6-b627-45b305ed2e84",
+ "svm": {
+ "uuid": "fake-uuid",
+ "name": "vserver-name",
+ },
+ "name": "/vol/nahim_dev_vol01/volume-fake-uuid",
+ "location": {
+ "logical_unit": "volume-fake-uuid",
+ "node": {
+ "name": "node-name",
+ "uuid": "fake-uuid",
+ },
+ "volume": {
+ "uuid": "fake-uuid",
+ "name": "nahim_dev_vol01",
+ }
+ },
+ "auto_delete": False,
+ "class": "regular",
+ "create_time": "2021-12-09T14:07:31+00:00",
+ "enabled": True,
+ "lun_maps": [
+ {
+ "logical_unit_number": 0,
+ "igroup": {
+ "uuid": "fake-uuid",
+ "name": "openstack-fake-uuid",
+ },
+ }
+ ],
+ "os_type": "linux",
+ "serial_number": "ZlAFA?QMnBdX",
+ "space": {
+ "scsi_thin_provisioning_support_enabled": False,
+ "size": 10737418240,
+ "used": 3474366464,
+ "guarantee": {
+ "requested": False,
+ "reserved": False
+ }
+ },
+ "status": {
+ "container_state": "online",
+ "mapped": True,
+ "read_only": False,
+ "state": "online"
+ },
+ "vvol": {
+ "is_bound": False
+ },
+ "metric": {
+ "timestamp": "2021-12-23T20:36:00Z",
+ "duration": "PT15S",
+ "status": "ok",
+ "throughput": {
+ "read": 0,
+ "write": 0,
+ "other": 0,
+ "total": 0
+ },
+ "iops": {
+ "read": 0,
+ "write": 0,
+ "other": 0,
+ "total": 0
+ },
+ "latency": {
+ "read": 0,
+ "write": 0,
+ "other": 0,
+ "total": 0
+ }
+ },
+ "statistics": {
+ "timestamp": "2021-12-23T20:36:02Z",
+ "status": "ok",
+ "throughput_raw": {
+ "read": 1078230528,
+ "write": 3294724096,
+ "other": 0,
+ "total": 4372954624
+ },
+ "iops_raw": {
+ "read": 16641,
+ "write": 51257,
+ "other": 59,
+ "total": 67957
+ },
+ "latency_raw": {
+ "read": 2011655,
+ "write": 1235068755,
+ "other": 1402,
+ "total": 1237081812
+ }
+ },
+ },
+ {
+ "uuid": "dff549b8-fabe-466b-8608-871a6493b492",
+ "svm": {
+ "uuid": "fake-uuid",
+ "name": "vserver-name",
+ "_links": {
+ "self": {
+ "href": "/api/svm/svms/fake-uuid"
+ }
+ }
+ },
+ "name": "/vol/nahim_dev_vol01/volume-fake-uuid",
+ "location": {
+ "logical_unit": "volume-fake-uuid",
+ "node": {
+ "name": "node-name",
+ "uuid": "fake-uuid",
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes/fake-uuid"
+ }
+ }
+ },
+ "volume": {
+ "uuid": "fake-uuid",
+ "name": "nahim_dev_vol01",
+ "_links": {
+ "self": {
+ "href": "/api/storage/volumes/fake-uuid"
+ }
+ }
+ }
+ },
+ "auto_delete": False,
+ "class": "regular",
+ "create_time": "2021-12-14T18:12:38+00:00",
+ "enabled": True,
+ "os_type": "linux",
+ "serial_number": "ZlAFA?QMnBdf",
+ "space": {
+ "scsi_thin_provisioning_support_enabled": False,
+ "size": 5368709120,
+ "used": 0,
+ "guarantee": {
+ "requested": False,
+ "reserved": False
+ }
+ },
+ "status": {
+ "container_state": "online",
+ "mapped": False,
+ "read_only": False,
+ "state": "online"
+ },
+ "vvol": {
+ "is_bound": False
+ },
+ }
+ ],
+ "num_records": 2,
+}
+
+LUN_GET_ITER_RESULT = [
+ {
+ 'Vserver': LUN_GET_ITER_REST['records'][0]['svm']['name'],
+ 'Volume':
+ LUN_GET_ITER_REST['records'][0]['location']['volume']['name'],
+ 'Size': LUN_GET_ITER_REST['records'][0]['space']['size'],
+ 'Qtree': (LUN_GET_ITER_REST['records'][0]['location']
+ .get('qtree', {}).get('name', '')),
+ 'Path': LUN_GET_ITER_REST['records'][0]['name'],
+ 'OsType': LUN_GET_ITER_REST['records'][0]['os_type'],
+ 'SpaceReserved':
+ LUN_GET_ITER_REST['records'][0]['space']['guarantee']['requested'],
+ 'UUID': LUN_GET_ITER_REST['records'][0]['uuid'],
+ },
+ {
+ 'Vserver': LUN_GET_ITER_REST['records'][1]['svm']['name'],
+ 'Volume':
+ LUN_GET_ITER_REST['records'][1]['location']['volume']['name'],
+ 'Size': LUN_GET_ITER_REST['records'][1]['space']['size'],
+ 'Qtree': (LUN_GET_ITER_REST['records'][1]['location']
+ .get('qtree', {}).get('name', '')),
+ 'Path': LUN_GET_ITER_REST['records'][1]['name'],
+ 'OsType': LUN_GET_ITER_REST['records'][1]['os_type'],
+ 'SpaceReserved':
+ LUN_GET_ITER_REST['records'][1]['space']['guarantee']['requested'],
+ 'UUID': LUN_GET_ITER_REST['records'][1]['uuid'],
+ },
+]
+
+FILE_DIRECTORY_GET_ITER_REST = {
+ "_links": {
+ "next": {
+ "href": "/api/resourcelink"
+ },
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "num_records": 2,
+ "records": [
+ {
+ "_links": {
+ "metadata": {
+ "href": "/api/resourcelink"
+ },
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "name": "test_file",
+ "path": "d1/d2/d3",
+ "size": 200,
+ "type": "file"
+ },
+ {
+ "_links": {
+ "metadata": {
+ "href": "/api/resourcelink"
+ },
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "name": "test_file_2",
+ "path": "d1/d2/d3",
+ "size": 250,
+ "type": "file"
+ }
+ ]
+}
+
+FILE_DIRECTORY_GET_ITER_RESULT_REST = [
+ {
+ 'name': FILE_DIRECTORY_GET_ITER_REST['records'][0]['name'],
+ 'file-size': float(FILE_DIRECTORY_GET_ITER_REST['records'][0]['size'])
+ },
+ {
+ 'name': FILE_DIRECTORY_GET_ITER_REST['records'][1]['name'],
+ 'file-size': float(FILE_DIRECTORY_GET_ITER_REST['records'][1]['size'])
+ }
+]
+
+LUN_GET_MOVEMENT_REST = {
+ "_links": {
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "name": "/vol/volume1/qtree1/lun1",
+ "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412",
+ "movement": {
+ "progress": {
+ "elapsed": 0,
+ "failure": {
+ "arguments": [
+ {
+ "code": "string",
+ "message": "string"
+ }
+ ],
+ "code": "4",
+ "message": "entry doesn't exist",
+ "target": "uuid"
+ },
+ "percent_complete": 0,
+ "state": "preparing",
+ "volume_snapshot_blocked": True
+ }
+ }
+}
+
+LUN_GET_COPY_REST = {
+ "_links": {
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "name": "/vol/volume1/qtree1/lun1",
+ "uuid": "1cd8a442-86d1-11e0-ae1c-123478563412",
+ "copy": {
+ "source": {
+ "_links": {
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "progress": {
+ "elapsed": 0,
+ "failure": {
+ "arguments": [
+ {
+ "code": "string",
+ "message": "string"
+ }
+ ],
+ "code": "4",
+ "message": "entry doesn't exist",
+ "target": "uuid"
+ },
+ "percent_complete": 0,
+ "state": "preparing",
+ "volume_snapshot_blocked": True
+ },
+ }
+ },
+}
+
+VOLUME_GET_ITER_STATE_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": "c19aef05-ac60-4211-9fe4-3ef8c8816c83",
+ "name": "fake_volume",
+ "state": VOLUME_STATE_ONLINE,
+ "style": "flexvol",
+ "nas": {
+ "path": "/fake/vol"
+ },
+ }
+ ],
+ "num_records": 1,
+}
+
+GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE_REST = {
+ 'records': [
+ {
+ 'uuid': 'fake_uuid_1',
+ 'name': 'vserver_name',
+ 'ip': {'address': '1.2.3.4'},
+ 'state': 'up'
+ },
+ {
+ 'uuid': 'fake_uuid_2',
+ 'name': 'vserver_name',
+ 'ip': {'address': '99.98.97.96'},
+ 'state': 'up'
+ }
+ ],
+ 'num_records': 2
+}
+
+ERROR_RESPONSE_REST = {
+ "error": {
+ "code": 1100,
+ "message": "fake error",
+ }
+}
+
+FAKE_ACTION_ENDPOINT = '/fake_endpoint'
+FAKE_BASE_ENDPOINT = '/fake_api'
+FAKE_HEADERS = {'header': 'fake_header'}
+FAKE_BODY = {'body': 'fake_body'}
+FAKE_HTTP_QUERY = {'type': 'fake_type'}
+FAKE_FORMATTED_HTTP_QUERY = '?type=fake_type'
+
+JOB_RESPONSE_REST = {
+ "job": {
+ "uuid": FAKE_UUID,
+ "_links": {
+ "self": {
+ "href": f"/api/cluster/jobs/{FAKE_UUID}"
+ }
+ }
+ }
+}
+
+VSERVER_DATA_LIST_RESPONSE_REST = {
+ 'records': [
+ {
+ 'name': VSERVER_NAME
+ },
+ {
+ 'name': VSERVER_NAME_2
+ }
+ ],
+ 'num_records': 2,
+}
+
+PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST = {
+ 'name': 'wafl',
+ 'counter_schemas': [
+ {
+ 'name': 'cp_phase_times',
+ 'description': 'Array of percentage time spent in different phases'
+ + ' of Consistency Point (CP).',
+ 'type': 'percent',
+ 'unit': 'percent',
+ 'denominator': {
+ 'name': 'total_cp_msecs'
+ }
+ }
+ ],
+}
+
+PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST = [
+ 'cp_setup', 'cp_pre_p0', 'cp_p0_snap_del', 'cp_p1_clean', 'cp_p1_quota',
+ 'cp_ipu_disk_add', 'cp_p2v_inofile', 'cp_p2v_ino_pub', 'cp_p2v_ino_pri',
+ 'cp_p2v_fsinfo', 'cp_p2v_dlog1', 'cp_p2v_dlog2', 'cp_p2v_refcount',
+ 'cp_p2v_topaa', 'cp_p2v_df_scores_sub', 'cp_p2v_bm', 'cp_p2v_snap',
+ 'cp_p2v_df_scores', 'cp_p2v_volinfo', 'cp_p2v_cont', 'cp_p2a_inofile',
+ 'cp_p2a_ino', 'cp_p2a_dlog1', 'cp_p2a_hya', 'cp_p2a_dlog2',
+ 'cp_p2a_fsinfo', 'cp_p2a_ipu_bitmap_grow', 'cp_p2a_refcount',
+ 'cp_p2a_topaa', 'cp_p2a_hyabc', 'cp_p2a_bm', 'cp_p2a_snap',
+ 'cp_p2a_volinfo', 'cp_p2_flush', 'cp_p2_finish', 'cp_p3_wait',
+ 'cp_p3v_volinfo', 'cp_p3a_volinfo', 'cp_p3_finish', 'cp_p4_finish',
+ 'cp_p5_finish',
+]
+
+PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT = [
+ label[3:] for label in PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST
+]
+
+PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST = [
+ 0, 3112, 3, 0, 0, 3, 757, 0, 99, 0, 26, 0, 22, 1, 0, 194, 4, 224, 359, 222,
+ 0, 0, 0, 0, 0, 0, 82, 0, 0, 0, 0, 0, 0, 62, 0, 133, 16, 35, 334219, 43,
+ 2218, 20, 0,
+]
+
+PERF_COUNTER_TABLE_ROWS_WAFL = {
+ 'records': [
+ {
+ 'id': NODE_NAME + ':wafl',
+ 'counters': [
+ {
+ 'name': 'cp_phase_times',
+ 'values': PERF_COUNTER_TOTAL_CP_MSECS_VALUES_REST,
+ 'labels': PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST
+ }
+ ],
+ }
+ ],
+ 'num_records': 1,
+}
+
+PERF_COUNTER_DOMAIN_BUSY_LABELS = [
+ 'exempt', 'ha', 'host_os', 'idle', 'kahuna', 'kahuna_legacy', 'none',
+ 'nwk_exempt', 'network', 'protocol', 'raid', 'raid_exempt', 'sm_exempt',
+ 'ssan_exempt', 'storage', 'target', 'unclassified', 'wafl_exempt',
+ 'wafl_mpcleaner', 'xor_exempt', 'ssan_exempt2', 'exempt_ise', 'zombie',
+]
+
+PERF_COUNTER_DOMAIN_BUSY_VALUES_1 = [
+ 83071627197, 1334877, 19459898, 588539096, 11516887, 14878622, 18,
+ 647698, 20, 229232646, 4310322, 441035, 12946782, 57837913, 38765442,
+ 1111004351701, 1497335, 949657, 109890, 768027, 21, 14, 13
+]
+
+PERF_COUNTER_DOMAIN_BUSY_VALUES_2 = [
+ 1191129018056, 135991, 22842513, 591213798, 9449562, 15345460, 0,
+ 751656, 0, 162605694, 3927323, 511160, 7644403, 29696759, 21787992,
+ 3585552592, 1058902, 957296, 87811, 499766, 0, 0, 0
+]
+
+PERF_COUNTER_ELAPSED_TIME_1 = 1199265469753
+PERF_COUNTER_ELAPSED_TIME_2 = 1199265469755
+
+PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST = {
+ 'records': [
+ {
+ 'counter_table': {
+ 'name': 'processor'
+ },
+ 'id': NODE_NAME + ':processor0',
+ 'counters': [
+ {
+ 'name': 'domain_busy_percent',
+ 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_1,
+ 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS
+ },
+ {
+ 'name': 'elapsed_time',
+ 'value': PERF_COUNTER_ELAPSED_TIME_1,
+ }
+ ],
+ },
+ {
+ 'counter_table': {
+ 'name': 'processor'
+ },
+ 'id': NODE_NAME + ':processor1',
+ 'counters': [
+ {
+ 'name': 'domain_busy_percent',
+ 'values': PERF_COUNTER_DOMAIN_BUSY_VALUES_2,
+ 'labels': PERF_COUNTER_DOMAIN_BUSY_LABELS
+ },
+ {
+ 'name': 'elapsed_time',
+ 'value': PERF_COUNTER_ELAPSED_TIME_2,
+ }
+ ],
+ }
+ ],
+ 'num_records': 2,
+}
+
+PERF_COUNTERS_PROCESSOR_EXPECTED = [
+ {
+ 'instance-name': 'processor',
+ 'instance-uuid': NODE_NAME + ':processor0',
+ 'node-name': NODE_NAME,
+ 'timestamp': mock.ANY,
+ 'domain_busy':
+ ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_1])
+ },
+ {
+ 'instance-name': 'processor',
+ 'instance-uuid': NODE_NAME + ':processor0',
+ 'node-name': NODE_NAME,
+ 'timestamp': mock.ANY,
+ 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_1
+ },
+ {
+ 'instance-name': 'processor',
+ 'instance-uuid': NODE_NAME + ':processor1',
+ 'node-name': NODE_NAME,
+ 'timestamp': mock.ANY,
+ 'domain_busy':
+ ','.join([str(v) for v in PERF_COUNTER_DOMAIN_BUSY_VALUES_2])
+ },
+ {
+ 'instance-name': 'processor',
+ 'instance-uuid': NODE_NAME + ':processor1',
+ 'node-name': NODE_NAME,
+ 'timestamp': mock.ANY,
+ 'processor_elapsed_time': PERF_COUNTER_ELAPSED_TIME_2
+ },
+]
+
+SINGLE_IGROUP_REST = {
+ "svm": {
+ "uuid": FAKE_UUID,
+ "name": VOLUME_VSERVER_NAME,
+ },
+ "uuid": FAKE_UUID,
+ "name": "openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53",
+ "protocol": "iscsi",
+ "os_type": "linux",
+ "initiators": [
+ {
+ "name": "iqn.1993-08.org.fake:01:5b67769f5c5e",
+ }
+ ],
+}
+
+IGROUP_GET_ITER_REST = {
+ "records": [
+ SINGLE_IGROUP_REST
+ ],
+ "num_records": 1,
+}
+
+IGROUP_GET_ITER_MULT_REST = {
+ "records": [
+ SINGLE_IGROUP_REST,
+ SINGLE_IGROUP_REST
+ ],
+ "num_records": 2,
+}
+
+IGROUP_GET_ITER_INITS_REST = {
+ "records": [
+ {
+ "svm": {
+ "uuid": FAKE_UUID,
+ "name": VOLUME_VSERVER_NAME,
+ },
+ "uuid": FAKE_UUID,
+ "name": "openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53",
+ "protocol": "iscsi",
+ "os_type": "linux",
+ "initiators": [
+ {
+ "name": "iqn.1993-08.org.fake:01:5b67769f5c5e",
+ },
+ {
+ "name": "iqn.1993-08.org.fake:02:5b67769f5c5e",
+ }
+ ],
+ }
+ ],
+ "num_records": 1,
+}
+
+GET_LUN_MAP_REST = {
+ "records": [
+ {
+ "svm": {
+ "uuid": FAKE_UUID,
+ "name": VSERVER_NAME,
+ },
+ "lun": {
+ "uuid": "6c2969dc-b022-434c-b7cd-9240bs975187",
+ "name": LUN_NAME_PATH,
+ },
+ "igroup": {
+ "uuid": "08088517-a6f5-11ec-82cc-00a0b89c9a78",
+ "name": IGROUP_NAME,
+ },
+ "logical_unit_number": 0,
+ }
+ ],
+ "num_records": 1,
+}
+
+FC_INTERFACE_REST = {
+ "records": [
+ {
+ "data_protocol": "fcp",
+ "location": {
+ "port": {
+ "name": "0a",
+ "uuid": FAKE_UUID,
+ "node": {
+ "name": "node1"
+ }
+ },
+ "node": {
+ "name": "node1",
+ "uuid": FAKE_UUID,
+ }
+ },
+ "wwpn": "20:00:00:50:56:b4:13:a8",
+ "name": "lif1",
+ "uuid": FAKE_UUID,
+ "state": "up",
+ "port_address": "5060F",
+ "wwnn": "20:00:00:50:56:b4:13:01",
+ "comment": "string",
+ "svm": {
+ "name": VOLUME_VSERVER_NAME,
+ "uuid": FAKE_UUID,
+ },
+ "enabled": True
+ }
+ ],
+ "num_records": 1
+}
+
+GET_LUN_MAPS = {
+ "records": [
+ {
+ "svm": {
+ "uuid": "77deec3a-38ea-11ec-aca8-00a0b89c9a78",
+ "name": VOLUME_NAME,
+ },
+ "uuid": "99809170-a92c-11ec-82cc-0aa0b89c9a78",
+ "name": "openstack-626d20dc-c420-4a5a-929c-59178d64f2c5",
+ "initiators": [
+ {
+ "name": "iqn.2005-03.org.open-iscsi:49ebe8a87d1",
+ }
+ ],
+ "lun_maps": [
+ {
+ "logical_unit_number": 0,
+ "lun": {
+ "name": LUN_NAME_PATH,
+ "uuid": "91e83a0a-72c3-4278-9a24-f2f8135aa5db",
+ "node": {
+ "name": CLUSTER_NAME,
+ "uuid": "9eff6c76-fc13-11ea-8799-525a0006bba9",
+ },
+ },
+ }
+ ],
+ }
+ ],
+ "num_records": 1,
+}
+
+SNAPMIRROR_GET_ITER_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": FAKE_UUID,
+ "source": {
+ "path": SM_SOURCE_VSERVER + ':' + SM_SOURCE_VOLUME,
+ "svm": {
+ "name": SM_SOURCE_VSERVER
+ }
+ },
+ "destination": {
+ "path": SM_DEST_VSERVER + ':' + SM_DEST_VOLUME,
+ "svm": {
+ "name": SM_DEST_VSERVER
+ }
+ },
+ "policy": {
+ "type": "async"
+ },
+ "state": "snapmirrored",
+ "healthy": True
+ }
+ ],
+ "num_records": 1,
+}
+
+GET_LUN_MAPS_NO_MAPS = {
+ "records": [
+ {
+ "svm": {
+ "uuid": "77deec3a-38ea-11ec-aca8-00a0b89c9a78",
+ "name": VOLUME_NAME,
+ },
+ "uuid": "99809170-a92c-11ec-82cc-0aa0b89c9a78",
+ "name": "openstack-626d20dc-c420-4a5a-929c-59178d64f2c5",
+ "initiators": [
+ {
+ "name": "iqn.2005-03.org.open-iscsi:49ebe8a87d1",
+ }
+ ],
+ }
+ ],
+ "num_records": 1,
+}
+
+GET_ISCSI_SERVICE_DETAILS_REST = {
+ "records": [
+ {
+ "svm": {
+ "uuid": FAKE_UUID,
+ "name": VOLUME_VSERVER_NAME,
+ },
+ "target": {
+ "name": INITIATOR_IQN
+ },
+ }
+ ],
+ "num_records": 1,
+}
+
+CHECK_ISCSI_INITIATOR_REST = {
+ "records": [
+ {
+ "svm": {
+ "uuid": FAKE_UUID,
+ "name": VOLUME_VSERVER_NAME,
+ },
+ "initiator": INITIATOR_IQN,
+ }
+ ],
+ "num_records": 1,
+}
+
+GET_ISCSI_TARGET_DETAILS_REST = {
+ "records": [
+ {
+ "uuid": FAKE_UUID,
+ "name": VOLUME_VSERVER_NAME,
+ "ip": {
+ "address": "192.168.1.254"
+ },
+ "enabled": True,
+ "services": [
+ "data_core",
+ "data_iscsi"
+ ],
+ }
+ ],
+ "num_records": 1,
+}
+
+VOLUME_GET_ITER_CAPACITY_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": FAKE_UUID,
+ "name": VOLUME_NAME,
+ "space": {
+ "available": VOLUME_SIZE_AVAILABLE,
+ "afs_total": VOLUME_SIZE_TOTAL
+ },
+ }
+ ],
+ "num_records": 1,
+}
+
+REST_GET_SNAPMIRRORS_RESPONSE = [{
+ 'destination-volume': SM_DEST_VOLUME,
+ 'destination-vserver': SM_DEST_VSERVER,
+ 'is-healthy': True,
+ 'lag-time': None,
+ 'last-transfer-end-timestamp': None,
+ 'mirror-state': 'snapmirrored',
+ 'relationship-status': 'snapmirrored',
+ 'source-volume': SM_SOURCE_VOLUME,
+ 'source-vserver': SM_SOURCE_VSERVER,
+ 'uuid': FAKE_UUID,
+}]
+
+TRANSFERS_GET_ITER_REST = {
+ "records": [
+ {
+ "uuid": FAKE_UUID,
+ "state": "transferring"
+ },
+ {
+ "uuid": FAKE_UUID,
+ "state": "failed"
+ }
+ ],
+ "num_records": 2,
+}
+
+JOB_SUCCESSFUL_REST = {
+ "uuid": FAKE_UUID,
+ "description": "Fake description",
+ "state": "success",
+ "message": "success",
+ "code": 0,
+ "start_time": "2022-02-18T20:08:03+00:00",
+ "end_time": "2022-02-18T20:08:04+00:00",
+}
+
+JOB_ERROR_REST = {
+ "uuid": FAKE_UUID,
+ "description": "Fake description",
+ "state": "failure",
+ "message": "failure",
+ "code": -1,
+ "start_time": "2022-02-18T20:08:03+00:00",
+ "end_time": "2022-02-18T20:08:04+00:00",
+}
+
+GET_CLUSTER_NAME_RESPONSE_REST = {
+ "name": CLUSTER_NAME,
+ "uuid": "fake-cluster-uuid"
+}
+
+GET_VSERVER_PEERS_RECORDS_REST = [
+ {
+ "_links": {
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "applications": [
+ "snapmirror",
+ "lun_copy"
+ ],
+ "name": CLUSTER_NAME,
+ "peer": {
+ "cluster": {
+ "_links": {
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "name": REMOTE_CLUSTER_NAME,
+ "uuid": "fake-cluster-uuid-2"
+ },
+ "svm": {
+ "_links": {
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "name": VSERVER_NAME_2,
+ "uuid": "fake-svm-uuid-2"
+ }
+ },
+ "state": "peered",
+ "svm": {
+ "_links": {
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "name": VSERVER_NAME,
+ "uuid": "fake-svm-uuid"
+ },
+ "uuid": "fake-cluster-uuid"
+ }
+]
+
+GET_VSERVER_PEERS_RESPONSE_REST = {
+ "_links": {
+ "next": {
+ "href": "/api/resourcelink"
+ },
+ "self": {
+ "href": "/api/resourcelink"
+ }
+ },
+ "num_records": 1,
+ "records": GET_VSERVER_PEERS_RECORDS_REST
+}
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py
index a2f4c3d70..c8f723183 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_api.py
@@ -21,8 +21,11 @@ from unittest import mock
import ddt
from lxml import etree
+from oslo_serialization import jsonutils
from oslo_utils import netutils
import paramiko
+import requests
+from requests import auth
import six
from six.moves import urllib
@@ -565,3 +568,321 @@ class SSHUtilTests(test.TestCase):
stderr = mock.Mock()
stderr.channel = mock.Mock(channel)
return stdin, stdout, stderr
+
+
+@ddt.ddt
+class NetAppRestApiServerTests(test.TestCase):
+ """Test case for NetApp REST API server methods."""
+ def setUp(self):
+ self.rest_client = netapp_api.RestNaServer('127.0.0.1')
+ super(NetAppRestApiServerTests, self).setUp()
+
+ @ddt.data(None, 'my_cert')
+ def test__init__ssl_verify(self, ssl_cert_path):
+ client = netapp_api.RestNaServer('127.0.0.1',
+ ssl_cert_path=ssl_cert_path)
+
+ if ssl_cert_path:
+ self.assertEqual(ssl_cert_path, client._ssl_verify)
+ else:
+ self.assertTrue(client._ssl_verify)
+
+ @ddt.data(None, 'ftp')
+ def test_set_transport_type_value_error(self, transport_type):
+ self.assertRaises(ValueError, self.rest_client.set_transport_type,
+ transport_type)
+
+ @ddt.data('http', 'https')
+ def test_set_transport_type_valid(self, transport_type):
+ """Tests setting a valid transport type"""
+ self.rest_client.set_transport_type(transport_type)
+ self.assertEqual(self.rest_client._protocol, transport_type)
+
+ @ddt.data('!&', '80na', '')
+ def test_set_port__value_error(self, port):
+ self.assertRaises(ValueError, self.rest_client.set_port, port)
+
+ @ddt.data(
+ {'port': None, 'protocol': 'http', 'expected_port': '80'},
+ {'port': None, 'protocol': 'https', 'expected_port': '443'},
+ {'port': '111', 'protocol': None, 'expected_port': '111'}
+ )
+ @ddt.unpack
+ def test_set_port(self, port, protocol, expected_port):
+ self.rest_client._protocol = protocol
+
+ self.rest_client.set_port(port=port)
+
+ self.assertEqual(expected_port, self.rest_client._port)
+
+ @ddt.data('!&', '80na', '')
+ def test_set_timeout_value_error(self, timeout):
+ self.assertRaises(ValueError, self.rest_client.set_timeout, timeout)
+
+ @ddt.data({'params': {'major': 1, 'minor': '20a'}},
+ {'params': {'major': '20a', 'minor': 1}},
+ {'params': {'major': '!*', 'minor': '20a'}})
+ @ddt.unpack
+ def test_set_api_version_value_error(self, params):
+ self.assertRaises(ValueError, self.rest_client.set_api_version,
+ **params)
+
+ def test_set_api_version_valid(self):
+ args = {'major': '20', 'minor': 1}
+
+ self.rest_client.set_api_version(**args)
+
+ self.assertEqual(self.rest_client._api_major_version, 20)
+ self.assertEqual(self.rest_client._api_minor_version, 1)
+ self.assertEqual(self.rest_client._api_version, "20.1")
+
+ def test_invoke_successfully_naapi_error(self):
+ self.mock_object(self.rest_client, '_build_headers', return_value={})
+ self.mock_object(self.rest_client, '_get_base_url', return_value='')
+ self.mock_object(self.rest_client, 'send_http_request',
+ return_value=(10, zapi_fakes.ERROR_RESPONSE_REST))
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.rest_client.invoke_successfully,
+ zapi_fakes.FAKE_ACTION_ENDPOINT, 'get')
+
+ @ddt.data(None, {'fields': 'fake_fields'})
+ def test_invoke_successfully(self, query):
+ mock_build_header = self.mock_object(
+ self.rest_client, '_build_headers',
+ return_value=zapi_fakes.FAKE_HEADERS)
+ mock_base = self.mock_object(
+ self.rest_client, '_get_base_url',
+ return_value=zapi_fakes.FAKE_BASE_ENDPOINT)
+ mock_add_query = self.mock_object(
+ self.rest_client, '_add_query_params_to_url',
+ return_value=zapi_fakes.FAKE_ACTION_ENDPOINT)
+ http_code = 200
+ mock_send_http = self.mock_object(
+ self.rest_client, 'send_http_request',
+ return_value=(http_code, zapi_fakes.NO_RECORDS_RESPONSE_REST))
+
+ code, response = self.rest_client.invoke_successfully(
+ zapi_fakes.FAKE_ACTION_ENDPOINT, 'get', body=zapi_fakes.FAKE_BODY,
+ query=query, enable_tunneling=True)
+
+ self.assertEqual(response, zapi_fakes.NO_RECORDS_RESPONSE_REST)
+ self.assertEqual(code, http_code)
+ mock_build_header.assert_called_once_with(True)
+ mock_base.assert_called_once_with()
+ self.assertEqual(bool(query), mock_add_query.called)
+ mock_send_http.assert_called_once_with(
+ 'get',
+ zapi_fakes.FAKE_BASE_ENDPOINT + zapi_fakes.FAKE_ACTION_ENDPOINT,
+ zapi_fakes.FAKE_BODY, zapi_fakes.FAKE_HEADERS)
+
+ @ddt.data(
+ {'error': requests.HTTPError(), 'raised': netapp_api.NaApiError},
+ {'error': Exception, 'raised': netapp_api.NaApiError})
+ @ddt.unpack
+ def test_send_http_request_http_error(self, error, raised):
+ self.mock_object(netapp_api, 'LOG')
+ self.mock_object(self.rest_client, '_build_session')
+ self.rest_client._session = mock.Mock()
+ self.mock_object(
+ self.rest_client, '_get_request_method', mock.Mock(
+ return_value=mock.Mock(side_effect=error)))
+
+ self.assertRaises(raised, self.rest_client.send_http_request,
+ 'get', zapi_fakes.FAKE_ACTION_ENDPOINT,
+ zapi_fakes.FAKE_BODY, zapi_fakes.FAKE_HEADERS)
+
+ @ddt.data(
+ {
+ 'resp_content': zapi_fakes.NO_RECORDS_RESPONSE_REST,
+ 'body': zapi_fakes.FAKE_BODY,
+ 'timeout': 10,
+ },
+ {
+ 'resp_content': zapi_fakes.NO_RECORDS_RESPONSE_REST,
+ 'body': zapi_fakes.FAKE_BODY,
+ 'timeout': None,
+ },
+ {
+ 'resp_content': zapi_fakes.NO_RECORDS_RESPONSE_REST,
+ 'body': None,
+ 'timeout': None,
+ },
+ {
+ 'resp_content': None,
+ 'body': None,
+ 'timeout': None,
+ }
+ )
+ @ddt.unpack
+ def test_send_http_request(self, resp_content, body, timeout):
+ if timeout:
+ self.rest_client._timeout = timeout
+ self.mock_object(netapp_api, 'LOG')
+ mock_json_dumps = self.mock_object(
+ jsonutils, 'dumps', mock.Mock(return_value='fake_dump_body'))
+ mock_build_session = self.mock_object(
+ self.rest_client, '_build_session')
+ _mock_session = mock.Mock()
+ self.rest_client._session = _mock_session
+ response = mock.Mock()
+ response.content = resp_content
+ response.status_code = 10
+ mock_post = mock.Mock(return_value=response)
+ mock_get_request_method = self.mock_object(
+ self.rest_client, '_get_request_method', mock.Mock(
+ return_value=mock_post))
+ mock_json_loads = self.mock_object(
+ jsonutils, 'loads',
+ mock.Mock(return_value='fake_loads_response'))
+
+ code, res = self.rest_client.send_http_request(
+ 'post', zapi_fakes.FAKE_ACTION_ENDPOINT,
+ body, zapi_fakes.FAKE_HEADERS)
+
+ expected_res = 'fake_loads_response' if resp_content else {}
+ self.assertEqual(expected_res, res)
+ self.assertEqual(10, code)
+ self.assertEqual(bool(body), mock_json_dumps.called)
+ self.assertEqual(bool(resp_content), mock_json_loads.called)
+ mock_build_session.assert_called_once_with(zapi_fakes.FAKE_HEADERS)
+ mock_get_request_method.assert_called_once_with('post', _mock_session)
+ expected_data = 'fake_dump_body' if body else {}
+ if timeout:
+ mock_post.assert_called_once_with(
+ zapi_fakes.FAKE_ACTION_ENDPOINT, data=expected_data,
+ timeout=timeout)
+ else:
+ mock_post.assert_called_once_with(zapi_fakes.FAKE_ACTION_ENDPOINT,
+ data=expected_data)
+
+ @ddt.data(
+ {'host': '192.168.1.0', 'port': '80', 'protocol': 'http'},
+ {'host': '0.0.0.0', 'port': '443', 'protocol': 'https'},
+ {'host': '::ffff:8', 'port': '80', 'protocol': 'http'},
+ {'host': 'fdf8:f53b:82e4::53', 'port': '443', 'protocol': 'https'})
+ @ddt.unpack
+ def test__get_base_url(self, host, port, protocol):
+ client = netapp_api.RestNaServer(host, port=port,
+ transport_type=protocol)
+ expected_host = f'[{host}]' if ':' in host else host
+ expected_url = '%s://%s:%s/api/' % (protocol, expected_host, port)
+
+ url = client._get_base_url()
+
+ self.assertEqual(expected_url, url)
+
+ def test__add_query_params_to_url(self):
+ formatted_url = self.rest_client._add_query_params_to_url(
+ zapi_fakes.FAKE_ACTION_ENDPOINT, zapi_fakes.FAKE_HTTP_QUERY)
+
+ expected_formatted_url = zapi_fakes.FAKE_ACTION_ENDPOINT
+ expected_formatted_url += zapi_fakes.FAKE_FORMATTED_HTTP_QUERY
+ self.assertEqual(expected_formatted_url, formatted_url)
+
+ @ddt.data('post', 'get', 'put', 'delete', 'patch')
+ def test_get_request_method(self, method):
+ _mock_session = mock.Mock()
+ _mock_session.post = mock.Mock()
+ _mock_session.get = mock.Mock()
+ _mock_session.put = mock.Mock()
+ _mock_session.delete = mock.Mock()
+ _mock_session.patch = mock.Mock()
+
+ res = self.rest_client._get_request_method(method, _mock_session)
+
+ expected_method = getattr(_mock_session, method)
+ self.assertEqual(expected_method, res)
+
+ def test__str__(self):
+ fake_host = 'fake_host'
+ client = netapp_api.RestNaServer(fake_host)
+
+ expected_str = "server: %s" % fake_host
+ self.assertEqual(expected_str, str(client))
+
+ def test_get_transport_type(self):
+ expected_protocol = 'fake_protocol'
+ self.rest_client._protocol = expected_protocol
+
+ res = self.rest_client.get_transport_type()
+
+ self.assertEqual(expected_protocol, res)
+
+ @ddt.data(None, ('1', '0'))
+ def test_get_api_version(self, api_version):
+ if api_version:
+ self.rest_client._api_version = str(api_version)
+ (self.rest_client._api_major_version, _) = api_version
+ (_, self.rest_client._api_minor_version) = api_version
+
+ res = self.rest_client.get_api_version()
+
+ self.assertEqual(api_version, res)
+
+ @ddt.data(None, '9.10')
+ def test_get_ontap_version(self, ontap_version):
+ if ontap_version:
+ self.rest_client._ontap_version = ontap_version
+
+ res = self.rest_client.get_ontap_version()
+
+ self.assertEqual(ontap_version, res)
+
+ def test_set_vserver(self):
+ expected_vserver = 'fake_vserver'
+ self.rest_client.set_vserver(expected_vserver)
+
+ self.assertEqual(expected_vserver, self.rest_client._vserver)
+
+ def test_get_vserver(self):
+ expected_vserver = 'fake_vserver'
+ self.rest_client._vserver = expected_vserver
+
+ res = self.rest_client.get_vserver()
+
+ self.assertEqual(expected_vserver, res)
+
+ def test__build_session(self):
+ fake_session = mock.Mock()
+ mock_requests_session = self.mock_object(
+ requests, 'Session', mock.Mock(return_value=fake_session))
+ mock_auth = self.mock_object(
+ self.rest_client, '_create_basic_auth_handler',
+ mock.Mock(return_value='fake_auth'))
+ self.rest_client._ssl_verify = 'fake_ssl'
+
+ self.rest_client._build_session(zapi_fakes.FAKE_HEADERS)
+
+ self.assertEqual(fake_session, self.rest_client._session)
+ self.assertEqual('fake_auth', self.rest_client._session.auth)
+ self.assertEqual('fake_ssl', self.rest_client._session.verify)
+ self.assertEqual(zapi_fakes.FAKE_HEADERS,
+ self.rest_client._session.headers)
+ mock_requests_session.assert_called_once_with()
+ mock_auth.assert_called_once_with()
+
+ @ddt.data(True, False)
+ def test__build_headers(self, enable_tunneling):
+ self.rest_client._vserver = zapi_fakes.VSERVER_NAME
+
+ res = self.rest_client._build_headers(enable_tunneling)
+
+ expected = {
+ "Accept": "application/json",
+ "Content-Type": "application/json"
+ }
+ if enable_tunneling:
+ expected["X-Dot-SVM-Name"] = zapi_fakes.VSERVER_NAME
+ self.assertEqual(expected, res)
+
+ def test__create_basic_auth_handler(self):
+ username = 'fake_username'
+ password = 'fake_password'
+ client = netapp_api.RestNaServer('10.1.1.1', username=username,
+ password=password)
+
+ res = client._create_basic_auth_handler()
+
+ expected = auth.HTTPBasicAuth(username, password)
+ self.assertEqual(expected.__dict__, res.__dict__)
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py
index 16a517916..e5ed24de6 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode.py
@@ -1441,6 +1441,7 @@ class NetAppCmodeClientTestCase(test.TestCase):
<num-records>1</num-records>
<attributes-list>
<net-interface-info>
+ <vserver>fake_vserver</vserver>
</net-interface-info>
</attributes-list>
</results>"""))
@@ -4097,7 +4098,7 @@ class NetAppCmodeClientTestCase(test.TestCase):
'aggregate': 'fake_aggr1',
'compression_enabled': False,
'dedupe_enabled': True,
- 'language': 'en_US',
+ 'language': 'c.utf_8',
'size': 1,
'snapshot_policy': 'default',
'snapshot_reserve': '5',
@@ -4494,3 +4495,46 @@ class NetAppCmodeClientTestCase(test.TestCase):
}
self.client.connection.send_request.assert_called_once_with(
'file-rename-file', api_args)
+
+ def test_check_api_permissions(self):
+
+ mock_log = self.mock_object(client_cmode.LOG, 'warning')
+ self.mock_object(self.client, 'check_cluster_api', return_value=True)
+
+ self.client.check_api_permissions()
+
+ self.client.check_cluster_api.assert_has_calls(
+ [mock.call(*key) for key in client_cmode.SSC_API_MAP.keys()])
+ self.assertEqual(0, mock_log.call_count)
+
+ def test_check_api_permissions_failed_ssc_apis(self):
+
+ def check_cluster_api(object_name, operation_name, api):
+ if api != 'volume-get-iter':
+ return False
+ return True
+
+ self.mock_object(self.client, 'check_cluster_api',
+ side_effect=check_cluster_api)
+
+ mock_log = self.mock_object(client_cmode.LOG, 'warning')
+
+ self.client.check_api_permissions()
+
+ self.assertEqual(1, mock_log.call_count)
+
+ def test_check_api_permissions_failed_volume_api(self):
+
+ def check_cluster_api(object_name, operation_name, api):
+ if api == 'volume-get-iter':
+ return False
+ return True
+
+ self.mock_object(self.client, 'check_cluster_api',
+ side_effect=check_cluster_api)
+ mock_log = self.mock_object(client_cmode.LOG, 'warning')
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.client.check_api_permissions)
+
+ self.assertEqual(0, mock_log.call_count)
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py
new file mode 100644
index 000000000..66578c79a
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/client/test_client_cmode_rest.py
@@ -0,0 +1,3709 @@
+# Copyright (c) 2014 Alex Meade. All rights reserved.
+# Copyright (c) 2015 Dustin Schoenbrun. All rights reserved.
+# Copyright (c) 2015 Tom Barron. All rights reserved.
+# Copyright (c) 2016 Mike Rooney. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from unittest import mock
+import uuid
+
+import ddt
+from oslo_utils import units
+import six
+
+from cinder import exception
+from cinder.tests.unit import test
+from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
+ fakes as fake_client)
+from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import client_base
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest
+from cinder.volume.drivers.netapp import utils as netapp_utils
+
+
+CONNECTION_INFO = {'hostname': 'hostname',
+ 'transport_type': 'https',
+ 'port': 443,
+ 'username': 'admin',
+ 'password': 'passw0rd',
+ 'vserver': 'fake_vserver',
+ 'ssl_cert_path': 'fake_ca',
+ 'api_trace_pattern': 'fake_regex'}
+
+
+@ddt.ddt
+class NetAppRestCmodeClientTestCase(test.TestCase):
+
+ def setUp(self):
+ super(NetAppRestCmodeClientTestCase, self).setUp()
+
+ # Setup Client mocks
+ self.mock_object(client_cmode.Client, '_init_ssh_client')
+ # store the original reference so we can call it later in
+ # test__get_cluster_nodes_info
+ self.original_get_cluster_nodes_info = (
+ client_cmode.Client._get_cluster_nodes_info)
+ self.mock_object(client_cmode.Client, '_get_cluster_nodes_info',
+ return_value=fake.HYBRID_SYSTEM_NODES_INFO)
+ self.mock_object(client_cmode.Client, 'get_ontap_version',
+ return_value=(9, 11, 1))
+ self.mock_object(client_cmode.Client,
+ 'get_ontapi_version',
+ return_value=(1, 20))
+
+ # Setup RestClient mocks
+ self.mock_object(client_cmode_rest.RestClient, '_init_ssh_client')
+ # store the original reference so we can call it later in
+ # test__get_cluster_nodes_info
+ self.original_get_cluster_nodes_info = (
+ client_cmode_rest.RestClient._get_cluster_nodes_info)
+
+ # Temporary fix because the function is under implementation
+ if not hasattr(client_cmode_rest.RestClient,
+ '_get_cluster_nodes_info'):
+ setattr(client_cmode_rest.RestClient,
+ '_get_cluster_nodes_info',
+ None)
+ self.original_get_cluster_nodes_info = (
+ client_cmode_rest.RestClient._get_cluster_nodes_info)
+
+ self.mock_object(client_cmode_rest.RestClient,
+ '_get_cluster_nodes_info',
+ return_value=fake.HYBRID_SYSTEM_NODES_INFO)
+ self.mock_object(client_cmode_rest.RestClient, 'get_ontap_version',
+ return_value=(9, 11, 1))
+ with mock.patch.object(client_cmode_rest.RestClient,
+ 'get_ontap_version',
+ return_value=(9, 11, 1)):
+ self.client = client_cmode_rest.RestClient(**CONNECTION_INFO)
+
+ self.client.ssh_client = mock.MagicMock()
+ self.client.connection = mock.MagicMock()
+ self.connection = self.client.connection
+
+ self.vserver = CONNECTION_INFO['vserver']
+ self.fake_volume = six.text_type(uuid.uuid4())
+ self.fake_lun = six.text_type(uuid.uuid4())
+ # this line interferes in test__get_cluster_nodes_info
+ # self.mock_send_request = self.mock_object(
+ # self.client, 'send_request')
+
+ def _mock_api_error(self, code='fake'):
+ return mock.Mock(side_effect=netapp_api.NaApiError(code=code))
+
+ def test_send_request(self):
+ expected = 'fake_response'
+ mock_get_records = self.mock_object(
+ self.client, 'get_records',
+ mock.Mock(return_value=expected))
+
+ res = self.client.send_request(
+ fake_client.FAKE_ACTION_ENDPOINT, 'get',
+ body=fake_client.FAKE_BODY,
+ query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False)
+
+ self.assertEqual(expected, res)
+ mock_get_records.assert_called_once_with(
+ fake_client.FAKE_ACTION_ENDPOINT,
+ fake_client.FAKE_HTTP_QUERY, False, 10000)
+
+ def test_send_request_post(self):
+ expected = (201, 'fake_response')
+ mock_invoke = self.mock_object(
+ self.client.connection, 'invoke_successfully',
+ mock.Mock(return_value=expected))
+
+ res = self.client.send_request(
+ fake_client.FAKE_ACTION_ENDPOINT, 'post',
+ body=fake_client.FAKE_BODY,
+ query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False)
+
+ self.assertEqual(expected[1], res)
+ mock_invoke.assert_called_once_with(
+ fake_client.FAKE_ACTION_ENDPOINT, 'post',
+ body=fake_client.FAKE_BODY,
+ query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False)
+
+ def test_send_request_wait(self):
+ expected = (202, fake_client.JOB_RESPONSE_REST)
+ mock_invoke = self.mock_object(
+ self.client.connection, 'invoke_successfully',
+ mock.Mock(return_value=expected))
+
+ mock_wait = self.mock_object(
+ self.client, '_wait_job_result',
+ mock.Mock(return_value=expected[1]))
+
+ res = self.client.send_request(
+ fake_client.FAKE_ACTION_ENDPOINT, 'post',
+ body=fake_client.FAKE_BODY,
+ query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False)
+
+ self.assertEqual(expected[1], res)
+ mock_invoke.assert_called_once_with(
+ fake_client.FAKE_ACTION_ENDPOINT, 'post',
+ body=fake_client.FAKE_BODY,
+ query=fake_client.FAKE_HTTP_QUERY, enable_tunneling=False)
+ mock_wait.assert_called_once_with(
+ expected[1]['job']['_links']['self']['href'][4:])
+
+ @ddt.data(True, False)
+ def test_get_records(self, enable_tunneling):
+ api_responses = [
+ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE),
+ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE),
+ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE),
+ ]
+
+ mock_invoke = self.mock_object(
+ self.client.connection, 'invoke_successfully',
+ side_effect=copy.deepcopy(api_responses))
+
+ query = {
+ 'fields': 'name'
+ }
+
+ result = self.client.get_records(
+ '/storage/volumes/', query=query,
+ enable_tunneling=enable_tunneling,
+ max_page_length=10)
+
+ num_records = result['num_records']
+ self.assertEqual(28, num_records)
+ self.assertEqual(28, len(result['records']))
+
+ expected_records = []
+ expected_records.extend(api_responses[0][1]['records'])
+ expected_records.extend(api_responses[1][1]['records'])
+ expected_records.extend(api_responses[2][1]['records'])
+
+ self.assertEqual(expected_records, result['records'])
+
+ next_tag = result.get('next')
+ self.assertIsNone(next_tag)
+
+ expected_query = copy.deepcopy(query)
+ expected_query['max_records'] = 10
+
+ next_url_1 = api_responses[0][1]['_links']['next']['href'][4:]
+ next_url_2 = api_responses[1][1]['_links']['next']['href'][4:]
+
+ mock_invoke.assert_has_calls([
+ mock.call('/storage/volumes/', 'get', query=expected_query,
+ enable_tunneling=enable_tunneling),
+ mock.call(next_url_1, 'get', query=None,
+ enable_tunneling=enable_tunneling),
+ mock.call(next_url_2, 'get', query=None,
+ enable_tunneling=enable_tunneling),
+ ])
+
+ def test_get_records_single_page(self):
+
+ api_response = (
+ 200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE)
+ mock_invoke = self.mock_object(self.client.connection,
+ 'invoke_successfully',
+ return_value=api_response)
+
+ query = {
+ 'fields': 'name'
+ }
+
+ result = self.client.get_records(
+ '/storage/volumes/', query=query, max_page_length=10)
+
+ num_records = result['num_records']
+ self.assertEqual(8, num_records)
+ self.assertEqual(8, len(result['records']))
+
+ next_tag = result.get('next')
+ self.assertIsNone(next_tag)
+
+ args = copy.deepcopy(query)
+ args['max_records'] = 10
+
+ mock_invoke.assert_has_calls([
+ mock.call('/storage/volumes/', 'get', query=args,
+ enable_tunneling=True),
+ ])
+
+ def test_get_records_not_found(self):
+
+ api_response = (200, fake_client.NO_RECORDS_RESPONSE_REST)
+ mock_invoke = self.mock_object(self.client.connection,
+ 'invoke_successfully',
+ return_value=api_response)
+
+ result = self.client.get_records('/storage/volumes/')
+
+ num_records = result['num_records']
+ self.assertEqual(0, num_records)
+ self.assertEqual(0, len(result['records']))
+
+ args = {
+ 'max_records': client_cmode_rest.DEFAULT_MAX_PAGE_LENGTH
+ }
+
+ mock_invoke.assert_has_calls([
+ mock.call('/storage/volumes/', 'get', query=args,
+ enable_tunneling=True),
+ ])
+
+ def test_get_records_timeout(self):
+ # To simulate timeout, max_records is 30, but the API returns less
+ # records and fill the 'next url' pointing to the next page.
+ max_records = 30
+ api_responses = [
+ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE),
+ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_PAGE),
+ (200, fake_client.VOLUME_GET_ITER_RESPONSE_REST_LAST_PAGE),
+ ]
+
+ mock_invoke = self.mock_object(
+ self.client.connection, 'invoke_successfully',
+ side_effect=copy.deepcopy(api_responses))
+
+ query = {
+ 'fields': 'name'
+ }
+
+ result = self.client.get_records(
+ '/storage/volumes/', query=query, max_page_length=max_records)
+
+ num_records = result['num_records']
+ self.assertEqual(28, num_records)
+ self.assertEqual(28, len(result['records']))
+
+ expected_records = []
+ expected_records.extend(api_responses[0][1]['records'])
+ expected_records.extend(api_responses[1][1]['records'])
+ expected_records.extend(api_responses[2][1]['records'])
+
+ self.assertEqual(expected_records, result['records'])
+
+ next_tag = result.get('next', None)
+ self.assertIsNone(next_tag)
+
+ args1 = copy.deepcopy(query)
+ args1['max_records'] = max_records
+
+ next_url_1 = api_responses[0][1]['_links']['next']['href'][4:]
+ next_url_2 = api_responses[1][1]['_links']['next']['href'][4:]
+
+ mock_invoke.assert_has_calls([
+ mock.call('/storage/volumes/', 'get', query=args1,
+ enable_tunneling=True),
+ mock.call(next_url_1, 'get', query=None, enable_tunneling=True),
+ mock.call(next_url_2, 'get', query=None, enable_tunneling=True),
+ ])
+
+ def test__get_unique_volume(self):
+ api_response = fake_client.VOLUME_GET_ITER_STYLE_RESPONSE_REST
+
+ result = self.client._get_unique_volume(api_response["records"])
+
+ expected = fake_client.VOLUME_FLEXGROUP_STYLE_REST
+ self.assertEqual(expected, result)
+
+ def test__get_unique_volume_raise_exception(self):
+ api_response = fake_client.VOLUME_GET_ITER_SAME_STYLE_RESPONSE_REST
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.client._get_unique_volume,
+ api_response["records"])
+
+ @ddt.data(fake.REST_FIELDS, None)
+ def test__get_volume_by_args(self, fields):
+ mock_get_unique_vol = self.mock_object(
+ self.client, '_get_unique_volume',
+ return_value=fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST)
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST)
+
+ volume = self.client._get_volume_by_args(
+ vol_name=fake.VOLUME_NAME, vol_path=fake.VOLUME_PATH,
+ vserver=fake.VSERVER_NAME, fields=fields)
+
+ self.assertEqual(fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST,
+ volume)
+ mock_get_unique_vol.assert_called_once_with(
+ fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST['records'])
+ expected_query = {
+ 'type': 'rw',
+ 'style': 'flex*',
+ 'is_svm_root': 'false',
+ 'error_state.is_inconsistent': 'false',
+ 'state': 'online',
+ 'name': fake.VOLUME_NAME,
+ 'nas.path': fake.VOLUME_PATH,
+ 'svm.name': fake.VSERVER_NAME,
+ 'fields': 'name,style' if not fields else fields,
+ }
+ mock_send_request.assert_called_once_with('/storage/volumes/', 'get',
+ query=expected_query)
+
+ @ddt.data(False, True)
+ def test_get_flexvol(self, is_flexgroup):
+
+ if is_flexgroup:
+ api_response = \
+ fake_client.VOLUME_GET_ITER_SSC_RESPONSE_FLEXGROUP_REST
+ volume_response = \
+ fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_FLEXGROUP_REST
+ else:
+ api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST
+ volume_response = \
+ fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST
+
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ mock_get_unique_vol = self.mock_object(
+ self.client, '_get_volume_by_args', return_value=volume_response)
+
+ result = self.client.get_flexvol(
+ flexvol_name=fake_client.VOLUME_NAMES[0],
+ flexvol_path='/%s' % fake_client.VOLUME_NAMES[0])
+
+ fields = ('aggregates.name,name,svm.name,nas.path,'
+ 'type,guarantee.honored,guarantee.type,'
+ 'space.snapshot.reserve_percent,space.size,'
+ 'qos.policy.name,snapshot_policy,language,style')
+ mock_get_unique_vol.assert_called_once_with(
+ vol_name=fake_client.VOLUME_NAMES[0],
+ vol_path='/%s' % fake_client.VOLUME_NAMES[0], fields=fields)
+
+ if is_flexgroup:
+ self.assertEqual(fake_client.VOLUME_INFO_SSC_FLEXGROUP, result)
+ else:
+ self.assertEqual(fake_client.VOLUME_INFO_SSC, result)
+
+ def test_list_flexvols(self):
+ api_response = fake_client.VOLUME_GET_ITER_LIST_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.list_flexvols()
+
+ query = {
+ 'type': 'rw',
+ 'style': 'flex*', # Match both 'flexvol' and 'flexgroup'
+ 'is_svm_root': 'false',
+ 'error_state.is_inconsistent': 'false',
+ # 'is-invalid': 'false',
+ 'state': 'online',
+ 'fields': 'name'
+ }
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/volumes/', 'get', query=query)
+ self.assertEqual(list(fake_client.VOLUME_NAMES), result)
+
+ def test_list_flexvols_not_found(self):
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.list_flexvols()
+ self.assertEqual([], result)
+
+ def test_is_flexvol_mirrored(self):
+
+ api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.is_flexvol_mirrored(
+ fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
+
+ query = {
+ 'source.path': fake_client.VOLUME_VSERVER_NAME +
+ ':' + fake_client.VOLUME_NAMES[0],
+ 'state': 'snapmirrored',
+ 'return_records': 'false',
+ }
+
+ self.client.send_request.assert_called_once_with(
+ '/snapmirror/relationships/', 'get', query=query)
+ self.assertTrue(result)
+
+ def test_is_flexvol_mirrored_not_mirrored(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.is_flexvol_mirrored(
+ fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
+
+ self.assertFalse(result)
+
+ def test_is_flexvol_mirrored_api_error(self):
+
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=self._mock_api_error())
+
+ result = self.client.is_flexvol_mirrored(
+ fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
+
+ self.assertFalse(result)
+
+ def test_is_flexvol_encrypted(self):
+
+ api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST
+ self.client.features.add_feature('FLEXVOL_ENCRYPTION')
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.is_flexvol_encrypted(
+ fake_client.VOLUME_NAME, fake_client.VOLUME_VSERVER_NAME)
+
+ query = {
+ 'encryption.enabled': 'true',
+ 'name': fake_client.VOLUME_NAME,
+ 'svm.name': fake_client.VOLUME_VSERVER_NAME,
+ 'return_records': 'false',
+ }
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/volumes/', 'get', query=query)
+
+ self.assertTrue(result)
+
+ def test_is_flexvol_encrypted_unsupported_version(self):
+
+ self.client.features.add_feature('FLEXVOL_ENCRYPTION', supported=False)
+ result = self.client.is_flexvol_encrypted(
+ fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
+
+ self.assertFalse(result)
+
+ def test_is_flexvol_encrypted_no_records_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.is_flexvol_encrypted(
+ fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
+
+ self.assertFalse(result)
+
+ def test_is_flexvol_encrypted_api_error(self):
+
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=self._mock_api_error())
+
+ result = self.client.is_flexvol_encrypted(
+ fake_client.VOLUME_NAMES[0], fake_client.VOLUME_VSERVER_NAME)
+
+ self.assertFalse(result)
+
+ @ddt.data({'types': {'FCAL'}, 'expected': ['FCAL']},
+ {'types': {'SATA', 'SSD'}, 'expected': ['SATA', 'SSD']},)
+ @ddt.unpack
+ def test_get_aggregate_disk_types(self, types, expected):
+
+ mock_get_aggregate_disk_types = self.mock_object(
+ self.client, '_get_aggregate_disk_types', return_value=types)
+
+ result = self.client.get_aggregate_disk_types(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertCountEqual(expected, result)
+ mock_get_aggregate_disk_types.assert_called_once_with(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ def test_get_aggregate_disk_types_not_found(self):
+
+ mock_get_aggregate_disk_types = self.mock_object(
+ self.client, '_get_aggregate_disk_types', return_value=set())
+
+ result = self.client.get_aggregate_disk_types(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertIsNone(result)
+ mock_get_aggregate_disk_types.assert_called_once_with(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ def test_get_aggregate_disk_types_api_not_found(self):
+
+ api_error = netapp_api.NaApiError()
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=api_error)
+
+ result = self.client.get_aggregate_disk_types(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertIsNone(result)
+
+ def test__get_aggregates(self):
+
+ api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client._get_aggregates()
+
+ mock_send_request.assert_has_calls(
+ [mock.call('/storage/aggregates', 'get', query={},
+ enable_tunneling=False)])
+ self.assertEqual(result, api_response['records'])
+
+ def test__get_aggregates_with_filters(self):
+
+ api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+ query = {
+ 'fields': 'space.block_storage.size,space.block_storage.available',
+ 'name': ','.join(fake_client.VOLUME_AGGREGATE_NAMES),
+ }
+
+ result = self.client._get_aggregates(
+ aggregate_names=fake_client.VOLUME_AGGREGATE_NAMES,
+ fields=query['fields'])
+
+ mock_send_request.assert_has_calls([
+ mock.call('/storage/aggregates', 'get', query=query,
+ enable_tunneling=False)])
+ self.assertEqual(result, api_response['records'])
+
+ def test__get_aggregates_not_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client._get_aggregates()
+
+ mock_send_request.assert_has_calls([
+ mock.call('/storage/aggregates', 'get', query={},
+ enable_tunneling=False)])
+ self.assertEqual([], result)
+
+ def test_get_aggregate_none_specified(self):
+
+ result = self.client.get_aggregate('')
+
+ self.assertEqual({}, result)
+
+ def test_get_aggregate(self):
+
+ api_response = [fake_client.AGGR_GET_ITER_RESPONSE_REST['records'][1]]
+
+ mock__get_aggregates = self.mock_object(self.client,
+ '_get_aggregates',
+ return_value=api_response)
+
+ response = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
+
+ fields = ('name,block_storage.primary.raid_type,'
+ 'block_storage.storage_type,home_node.name')
+ mock__get_aggregates.assert_has_calls([
+ mock.call(
+ aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
+ fields=fields)])
+
+ expected = {
+ 'name': fake_client.VOLUME_AGGREGATE_NAME,
+ 'raid-type': 'raid0',
+ 'is-hybrid': False,
+ 'node-name': fake_client.NODE_NAME,
+ }
+ self.assertEqual(expected, response)
+
+ def test_get_aggregate_not_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertEqual({}, result)
+
+ def test_get_aggregate_api_error(self):
+
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=self._mock_api_error())
+
+ result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertEqual({}, result)
+
+ def test_get_aggregate_api_not_found(self):
+
+ api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND)
+
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=api_error)
+
+ result = self.client.get_aggregate(fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertEqual({}, result)
+
+ @ddt.data(True, False)
+ def test_is_qos_min_supported(self, supported):
+ self.client.features.add_feature('test', supported=supported)
+ mock_name = self.mock_object(netapp_utils,
+ 'qos_min_feature_name',
+ return_value='test')
+ result = self.client.is_qos_min_supported(True, 'node')
+
+ mock_name.assert_called_once_with(True, 'node')
+ self.assertEqual(result, supported)
+
+ def test_is_qos_min_supported_invalid_node(self):
+ mock_name = self.mock_object(netapp_utils,
+ 'qos_min_feature_name',
+ return_value='invalid_feature')
+ result = self.client.is_qos_min_supported(True, 'node')
+
+ mock_name.assert_called_once_with(True, 'node')
+ self.assertFalse(result)
+
+ def test_is_qos_min_supported_none_node(self):
+ result = self.client.is_qos_min_supported(True, None)
+
+ self.assertFalse(result)
+
+ def test_get_flexvol_dedupe_info(self):
+
+ api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.get_flexvol_dedupe_info(
+ fake_client.VOLUME_NAMES[0])
+
+ query = {
+ 'efficiency.volume_path': '/vol/%s' % fake_client.VOLUME_NAMES[0],
+ 'fields': 'efficiency.state,efficiency.compression'
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/storage/volumes', 'get', query=query)
+ self.assertEqual(
+ fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA, result)
+
+ def test_get_flexvol_dedupe_info_no_logical_data_values(self):
+
+ api_response = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.get_flexvol_dedupe_info(
+ fake_client.VOLUME_NAMES[0])
+
+ self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
+ result)
+
+ def test_get_flexvol_dedupe_info_not_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.get_flexvol_dedupe_info(
+ fake_client.VOLUME_NAMES[0])
+
+ self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
+ result)
+
+ def test_get_flexvol_dedupe_info_api_error(self):
+
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=self._mock_api_error())
+
+ result = self.client.get_flexvol_dedupe_info(
+ fake_client.VOLUME_NAMES[0])
+
+ self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
+ result)
+
+ def test_get_flexvol_dedupe_info_api_insufficient_privileges(self):
+
+ api_error = netapp_api.NaApiError(code=netapp_api.EAPIPRIVILEGE)
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=api_error)
+
+ result = self.client.get_flexvol_dedupe_info(
+ fake_client.VOLUME_NAMES[0])
+
+ self.assertEqual(fake_client.VOLUME_DEDUPE_INFO_SSC_NO_LOGICAL_DATA,
+ result)
+
+ def test_get_lun_list(self):
+ response = fake_client.LUN_GET_ITER_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=response)
+
+ expected_result = fake_client.LUN_GET_ITER_RESULT
+ luns = self.client.get_lun_list()
+
+ self.assertEqual(expected_result, luns)
+ self.assertEqual(2, len(luns))
+
+ def test_get_lun_list_no_records(self):
+ response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=response)
+
+ luns = self.client.get_lun_list()
+
+ self.assertEqual([], luns)
+
+ def test_get_lun_sizes_by_volume(self):
+ volume_name = fake_client.VOLUME_NAME
+ query = {
+ 'location.volume.name': volume_name,
+ 'fields': 'space.size,name'
+ }
+ response = fake_client.LUN_GET_ITER_REST
+ expected_result = []
+ for lun in fake_client.LUN_GET_ITER_RESULT:
+ expected_result.append({
+ 'size': lun['Size'],
+ 'path': lun['Path'],
+ })
+
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=response)
+
+ luns = self.client.get_lun_sizes_by_volume(volume_name)
+
+ self.assertEqual(expected_result, luns)
+ self.assertEqual(2, len(luns))
+ self.client.send_request.assert_called_once_with(
+ '/storage/luns/', 'get', query=query)
+
+ def test_get_lun_sizes_by_volume_no_records(self):
+ volume_name = fake_client.VOLUME_NAME
+ query = {
+ 'location.volume.name': volume_name,
+ 'fields': 'space.size,name'
+ }
+ response = fake_client.NO_RECORDS_RESPONSE_REST
+
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=response)
+
+ luns = self.client.get_lun_sizes_by_volume(volume_name)
+
+ self.assertEqual([], luns)
+ self.client.send_request.assert_called_once_with(
+ '/storage/luns/', 'get', query=query)
+
+ def test_get_lun_by_args(self):
+ response = fake_client.LUN_GET_ITER_REST
+ mock_send_request = self.mock_object(
+ self.client, 'send_request', return_value=response)
+
+ lun_info_args = {
+ 'vserver': fake.VSERVER_NAME,
+ 'path': fake.LUN_PATH,
+ 'uuid': fake.UUID1,
+ }
+
+ luns = self.client.get_lun_by_args(**lun_info_args)
+
+ query = {
+ 'svm.name': fake.VSERVER_NAME,
+ 'name': fake.LUN_PATH,
+ 'uuid': fake.UUID1,
+ 'fields': 'svm.name,location.volume.name,space.size,'
+ 'location.qtree.name,name,os_type,'
+ 'space.guarantee.requested,uuid'
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/storage/luns/', 'get', query=query)
+
+ self.assertEqual(2, len(luns))
+
+ def test_get_lun_by_args_no_lun_found(self):
+ response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=response)
+
+ luns = self.client.get_lun_by_args()
+
+ self.assertEqual([], luns)
+
+ def test_get_lun_by_args_with_one_arg(self):
+ path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
+ response = fake_client.LUN_GET_ITER_REST
+ mock_send_request = self.mock_object(
+ self.client, 'send_request', return_value=response)
+
+ luns = self.client.get_lun_by_args(path=path)
+
+ query = {
+ 'name': path,
+ 'fields': 'svm.name,location.volume.name,space.size,'
+ 'location.qtree.name,name,os_type,'
+ 'space.guarantee.requested,uuid'
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/storage/luns/', 'get', query=query)
+
+ self.assertEqual(2, len(luns))
+
+ def test_get_file_sizes_by_dir(self):
+ volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ query = {
+ 'type': 'file',
+ 'fields': 'size,name'
+ }
+ response = fake_client.FILE_DIRECTORY_GET_ITER_REST
+ expected_result = fake_client.FILE_DIRECTORY_GET_ITER_RESULT_REST
+
+ self.mock_object(self.client,
+ '_get_volume_by_args',
+ return_value=volume)
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=response)
+
+ files = self.client.get_file_sizes_by_dir(volume['name'])
+
+ self.assertEqual(expected_result, files)
+ self.assertEqual(2, len(files))
+ self.client.send_request.assert_called_once_with(
+ f'/storage/volumes/{volume["uuid"]}/files',
+ 'get', query=query)
+
+ def test_get_file_sizes_by_dir_no_records(self):
+ volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ query = {
+ 'type': 'file',
+ 'fields': 'size,name'
+ }
+
+ api_error = netapp_api.NaApiError(code=netapp_api.REST_NO_SUCH_FILE)
+
+ self.mock_object(self.client,
+ '_get_volume_by_args',
+ return_value=volume)
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=api_error)
+
+ files = self.client.get_file_sizes_by_dir(volume['name'])
+
+ self.assertEqual([], files)
+ self.assertEqual(0, len(files))
+ self.client.send_request.assert_called_once_with(
+ f'/storage/volumes/{volume["uuid"]}/files',
+ 'get', query=query)
+
+ def test_get_file_sizes_by_dir_exception(self):
+ volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ api_error = netapp_api.NaApiError(code=0)
+
+ self.mock_object(self.client,
+ '_get_volume_by_args',
+ return_value=volume)
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=api_error)
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.get_file_sizes_by_dir,
+ volume['name'])
+
+ @ddt.data({'junction_path': '/fake/vol'},
+ {'name': 'fake_volume'},
+ {'junction_path': '/fake/vol', 'name': 'fake_volume'})
+ def test_get_volume_state(self, kwargs):
+ query_args = {}
+ query_args['fields'] = 'state'
+
+ if 'name' in kwargs:
+ query_args['name'] = kwargs['name']
+ if 'junction_path' in kwargs:
+ query_args['nas.path'] = kwargs['junction_path']
+
+ response = fake_client.VOLUME_GET_ITER_STATE_RESPONSE_REST
+ mock_send_request = self.mock_object(
+ self.client, 'send_request', return_value=response)
+
+ state = self.client.get_volume_state(**kwargs)
+
+ mock_send_request.assert_called_once_with(
+ '/storage/volumes/', 'get', query=query_args)
+
+ self.assertEqual(fake_client.VOLUME_STATE_ONLINE, state)
+
+ def test_delete_snapshot(self):
+ volume = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST
+ self.mock_object(
+ self.client, '_get_volume_by_args',
+ return_value=volume)
+ snap_name = fake.SNAPSHOT["name"]
+ self.mock_object(self.client, 'send_request')
+
+ self.client.delete_snapshot(volume["name"], snap_name)
+
+ self.client._get_volume_by_args.assert_called_once_with(
+ vol_name=volume["name"])
+ self.client.send_request.assert_called_once_with(
+ f'/storage/volumes/{volume["uuid"]}/snapshots'
+ f'?name={snap_name}', 'delete')
+
+ def test_get_operational_lif_addresses(self):
+ expected_result = ['1.2.3.4', '99.98.97.96']
+ api_response = fake_client.GET_OPERATIONAL_LIF_ADDRESSES_RESPONSE_REST
+
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+ address_list = self.client.get_operational_lif_addresses()
+
+ query = {
+ 'state': 'up',
+ 'fields': 'ip.address',
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/network/ip/interfaces/', 'get', query=query)
+
+ self.assertEqual(expected_result, address_list)
+
+ def test__list_vservers(self):
+ api_response = fake_client.VSERVER_DATA_LIST_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+ result = self.client._list_vservers()
+ query = {
+ 'fields': 'name',
+ }
+ self.client.send_request.assert_has_calls([
+ mock.call('/svm/svms', 'get', query=query,
+ enable_tunneling=False)])
+ self.assertListEqual(
+ [fake_client.VSERVER_NAME, fake_client.VSERVER_NAME_2], result)
+
+ def test_list_vservers_not_found(self):
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+ result = self.client._list_vservers()
+ self.assertListEqual([], result)
+
+ def test_get_ems_log_destination_vserver(self):
+ mock_list_vservers = self.mock_object(
+ self.client,
+ '_list_vservers',
+ return_value=[fake_client.VSERVER_NAME])
+ result = self.client._get_ems_log_destination_vserver()
+ mock_list_vservers.assert_called_once_with()
+ self.assertEqual(fake_client.VSERVER_NAME, result)
+
+ def test_get_ems_log_destination_vserver_not_found(self):
+ mock_list_vservers = self.mock_object(
+ self.client,
+ '_list_vservers',
+ return_value=[])
+
+ self.assertRaises(exception.NotFound,
+ self.client._get_ems_log_destination_vserver)
+
+ mock_list_vservers.assert_called_once_with()
+
+ def test_send_ems_log_message(self):
+
+ message_dict = {
+ 'computer-name': '25-dev-vm',
+ 'event-source': 'Cinder driver NetApp_iSCSI_Cluster_direct',
+ 'app-version': '20.1.0.dev|vendor|Linux-5.4.0-120-generic-x86_64',
+ 'category': 'provisioning',
+ 'log-level': '5',
+ 'auto-support': 'false',
+ 'event-id': '1',
+ 'event-description':
+ '{"pools": {"vserver": "vserver_name",'
+ + '"aggregates": [], "flexvols": ["flexvol_01"]}}'
+ }
+
+ body = {
+ 'computer_name': message_dict['computer-name'],
+ 'event_source': message_dict['event-source'],
+ 'app_version': message_dict['app-version'],
+ 'category': message_dict['category'],
+ 'severity': 'notice',
+ 'autosupport_required': message_dict['auto-support'] == 'true',
+ 'event_id': message_dict['event-id'],
+ 'event_description': message_dict['event-description'],
+ }
+
+ self.mock_object(self.client, '_get_ems_log_destination_vserver',
+ return_value='vserver_name')
+ self.mock_object(self.client, 'send_request')
+
+ self.client.send_ems_log_message(message_dict)
+
+ self.client.send_request.assert_called_once_with(
+ '/support/ems/application-logs', 'post', body=body)
+
+ @ddt.data('cp_phase_times', 'domain_busy')
+ def test_get_performance_counter_info(self, counter_name):
+
+ response1 = fake_client.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST
+ response2 = fake_client.PERF_COUNTER_TABLE_ROWS_WAFL
+
+ object_name = 'wafl'
+
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ side_effect=[response1, response2])
+
+ result = self.client.get_performance_counter_info(object_name,
+ counter_name)
+
+ expected = {
+ 'name': 'cp_phase_times',
+ 'base-counter': 'total_cp_msecs',
+ 'labels': fake_client.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_RESULT,
+ }
+
+ query1 = {
+ 'counter_schemas.name': counter_name,
+ 'fields': 'counter_schemas.*'
+ }
+
+ query2 = {
+ 'counters.name': counter_name,
+ 'fields': 'counters.*'
+ }
+
+ if counter_name == 'domain_busy':
+ expected['name'] = 'domain_busy'
+ expected['labels'] = (
+ fake_client.PERF_COUNTER_TOTAL_CP_MSECS_LABELS_REST)
+ query1['counter_schemas.name'] = 'domain_busy_percent'
+ query2['counters.name'] = 'domain_busy_percent'
+
+ self.assertEqual(expected, result)
+
+ mock_send_request.assert_has_calls([
+ mock.call(f'/cluster/counter/tables/{object_name}',
+ 'get', query=query1, enable_tunneling=False),
+ mock.call(f'/cluster/counter/tables/{object_name}/rows',
+ 'get', query=query2, enable_tunneling=False),
+ ])
+
+ def test_get_performance_counter_info_not_found_rows(self):
+ response1 = fake_client.PERF_COUNTER_LIST_INFO_WAFL_RESPONSE_REST
+ response2 = fake_client.NO_RECORDS_RESPONSE_REST
+
+ object_name = 'wafl'
+ counter_name = 'cp_phase_times'
+
+ self.mock_object(
+ self.client, 'send_request',
+ side_effect=[response1, response2])
+
+ result = self.client.get_performance_counter_info(object_name,
+ counter_name)
+
+ expected = {
+ 'name': 'cp_phase_times',
+ 'base-counter': 'total_cp_msecs',
+ 'labels': [],
+ }
+ self.assertEqual(expected, result)
+
+ def test_get_performance_instance_uuids(self):
+ response = fake_client.PERF_COUNTER_TABLE_ROWS_WAFL
+
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=response)
+
+ object_name = 'wafl'
+ result = self.client.get_performance_instance_uuids(
+ object_name, fake_client.NODE_NAME)
+
+ expected = [fake_client.NODE_NAME + ':wafl']
+ self.assertEqual(expected, result)
+
+ query = {
+ 'id': fake_client.NODE_NAME + ':*',
+ }
+ mock_send_request.assert_called_once_with(
+ f'/cluster/counter/tables/{object_name}/rows',
+ 'get', query=query, enable_tunneling=False)
+
+ def test_get_performance_counters(self):
+ response = fake_client.PERF_GET_INSTANCES_PROCESSOR_RESPONSE_REST
+
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=response)
+
+ instance_uuids = [
+ fake_client.NODE_NAME + ':processor0',
+ fake_client.NODE_NAME + ':processor1',
+ ]
+ object_name = 'processor'
+ counter_names = ['domain_busy', 'processor_elapsed_time']
+ rest_counter_names = ['domain_busy_percent', 'elapsed_time']
+ result = self.client.get_performance_counters(object_name,
+ instance_uuids,
+ counter_names)
+
+ expected = fake_client.PERF_COUNTERS_PROCESSOR_EXPECTED
+ self.assertEqual(expected, result)
+
+ query = {
+ 'id': '|'.join(instance_uuids),
+ 'counters.name': '|'.join(rest_counter_names),
+ 'fields': 'id,counter_table.name,counters.*',
+ }
+
+ mock_send_request.assert_called_once_with(
+ f'/cluster/counter/tables/{object_name}/rows',
+ 'get', query=query, enable_tunneling=False)
+
+ def test_get_aggregate_capacities(self):
+ aggr1_capacities = {
+ 'percent-used': 50,
+ 'size-available': 100.0,
+ 'size-total': 200.0,
+ }
+ aggr2_capacities = {
+ 'percent-used': 75,
+ 'size-available': 125.0,
+ 'size-total': 500.0,
+ }
+ mock_get_aggregate_capacity = self.mock_object(
+ self.client, '_get_aggregate_capacity',
+ side_effect=[aggr1_capacities, aggr2_capacities])
+
+ result = self.client.get_aggregate_capacities(['aggr1', 'aggr2'])
+
+ expected = {
+ 'aggr1': aggr1_capacities,
+ 'aggr2': aggr2_capacities,
+ }
+ self.assertEqual(expected, result)
+ mock_get_aggregate_capacity.assert_has_calls([
+ mock.call('aggr1'),
+ mock.call('aggr2'),
+ ])
+
+ def test_get_aggregate_capacities_not_found(self):
+ mock_get_aggregate_capacity = self.mock_object(
+ self.client, '_get_aggregate_capacity', side_effect=[{}, {}])
+
+ result = self.client.get_aggregate_capacities(['aggr1', 'aggr2'])
+
+ expected = {
+ 'aggr1': {},
+ 'aggr2': {},
+ }
+ self.assertEqual(expected, result)
+ mock_get_aggregate_capacity.assert_has_calls([
+ mock.call('aggr1'),
+ mock.call('aggr2'),
+ ])
+
+ def test_get_aggregate_capacities_not_list(self):
+ result = self.client.get_aggregate_capacities('aggr1')
+ self.assertEqual({}, result)
+
+ def test__get_aggregate_capacity(self):
+ api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST['records']
+ mock_get_aggregates = self.mock_object(self.client,
+ '_get_aggregates',
+ return_value=api_response)
+
+ result = self.client._get_aggregate_capacity(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ fields = ('space.block_storage.available,space.block_storage.size,'
+ 'space.block_storage.used')
+ mock_get_aggregates.assert_has_calls([
+ mock.call(aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
+ fields=fields)])
+
+ available = float(fake_client.AGGR_SIZE_AVAILABLE)
+ total = float(fake_client.AGGR_SIZE_TOTAL)
+ used = float(fake_client.AGGR_SIZE_USED)
+ percent_used = int((used * 100) // total)
+
+ expected = {
+ 'percent-used': percent_used,
+ 'size-available': available,
+ 'size-total': total,
+ }
+ self.assertEqual(expected, result)
+
+ def test__get_aggregate_capacity_not_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client._get_aggregate_capacity(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertEqual({}, result)
+
+ def test__get_aggregate_capacity_api_error(self):
+
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=self._mock_api_error())
+
+ result = self.client._get_aggregate_capacity(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertEqual({}, result)
+
+ def test__get_aggregate_capacity_api_not_found(self):
+
+ api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND)
+ self.mock_object(
+ self.client, 'send_request', side_effect=api_error)
+
+ result = self.client._get_aggregate_capacity(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertEqual({}, result)
+
+ def test_get_node_for_aggregate(self):
+
+ api_response = fake_client.AGGR_GET_ITER_RESPONSE_REST['records']
+ mock_get_aggregates = self.mock_object(self.client,
+ '_get_aggregates',
+ return_value=api_response)
+
+ result = self.client.get_node_for_aggregate(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ fields = 'home_node.name'
+ mock_get_aggregates.assert_has_calls([
+ mock.call(
+ aggregate_names=[fake_client.VOLUME_AGGREGATE_NAME],
+ fields=fields)])
+
+ self.assertEqual(fake_client.NODE_NAME, result)
+
+ def test_get_node_for_aggregate_none_requested(self):
+ result = self.client.get_node_for_aggregate(None)
+ self.assertIsNone(result)
+
+ def test_get_node_for_aggregate_api_not_found(self):
+ api_error = netapp_api.NaApiError(code=netapp_api.REST_API_NOT_FOUND)
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=api_error)
+
+ result = self.client.get_node_for_aggregate(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertIsNone(result)
+
+ def test_get_node_for_aggregate_api_error(self):
+
+ self.mock_object(self.client,
+ 'send_request',
+ self._mock_api_error())
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.get_node_for_aggregate,
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ def test_get_node_for_aggregate_not_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.get_node_for_aggregate(
+ fake_client.VOLUME_AGGREGATE_NAME)
+
+ self.assertIsNone(result)
+
+ @ddt.data(None, {'legacy': 'fake'}, {})
+ def test_provision_qos_policy_group_invalid_policy_info(self, policy_info):
+ self.mock_object(self.client, '_validate_qos_policy_group')
+ self.mock_object(self.client, '_get_qos_first_policy_group_by_name')
+ self.mock_object(self.client, '_create_qos_policy_group')
+ self.mock_object(self.client, '_modify_qos_policy_group')
+
+ self.client.provision_qos_policy_group(policy_info, False)
+
+ self.client._validate_qos_policy_group.assert_not_called()
+ self.client._get_qos_first_policy_group_by_name.assert_not_called()
+ self.client._create_qos_policy_group.assert_not_called()
+ self.client._modify_qos_policy_group.assert_not_called()
+
+ @ddt.data(True, False)
+ def test_provision_qos_policy_group_qos_policy_create(self, is_adaptive):
+ policy_info = fake.QOS_POLICY_GROUP_INFO
+ policy_spec = fake.QOS_POLICY_GROUP_SPEC
+ if is_adaptive:
+ policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO
+ policy_spec = fake.ADAPTIVE_QOS_SPEC
+
+ self.mock_object(self.client, '_validate_qos_policy_group')
+ self.mock_object(self.client, '_get_qos_first_policy_group_by_name',
+ return_value=None)
+ self.mock_object(self.client, '_create_qos_policy_group')
+ self.mock_object(self.client, '_modify_qos_policy_group')
+
+ self.client.provision_qos_policy_group(policy_info, True)
+
+ self.client._validate_qos_policy_group.assert_called_once_with(
+ is_adaptive, spec=policy_spec, qos_min_support=True)
+ (self.client._get_qos_first_policy_group_by_name.
+ assert_called_once_with(policy_spec['policy_name']))
+ self.client._create_qos_policy_group.assert_called_once_with(
+ policy_spec, is_adaptive)
+ self.client._modify_qos_policy_group.assert_not_called()
+
+ @ddt.data(True, False)
+ def test_provision_qos_policy_group_qos_policy_modify(self, is_adaptive):
+ policy_rest_item = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0]
+ policy_info = fake.QOS_POLICY_GROUP_INFO
+ policy_spec = fake.QOS_POLICY_GROUP_SPEC
+ if is_adaptive:
+ policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO
+ policy_spec = fake.ADAPTIVE_QOS_SPEC
+
+ self.mock_object(self.client, '_validate_qos_policy_group')
+ self.mock_object(self.client, '_get_qos_first_policy_group_by_name',
+ return_value=policy_rest_item)
+ self.mock_object(self.client, '_create_qos_policy_group')
+ self.mock_object(self.client, '_modify_qos_policy_group')
+
+ self.client.provision_qos_policy_group(policy_info, True)
+
+ self.client._validate_qos_policy_group.assert_called_once_with(
+ is_adaptive, spec=policy_spec, qos_min_support=True)
+ (self.client._get_qos_first_policy_group_by_name.
+ assert_called_once_with(policy_spec['policy_name']))
+ self.client._create_qos_policy_group.assert_not_called()
+ self.client._modify_qos_policy_group.assert_called_once_with(
+ policy_spec, is_adaptive, policy_rest_item)
+
+ @ddt.data(True, False)
+ def test__get_qos_first_policy_group_by_name(self, is_empty):
+ qos_rest_records = []
+ qos_item = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0]
+ if not is_empty:
+ qos_rest_records = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records']
+
+ self.mock_object(self.client, '_get_qos_policy_group_by_name',
+ return_value=qos_rest_records)
+
+ result = self.client._get_qos_first_policy_group_by_name(
+ qos_item['name'])
+
+ self.client._get_qos_policy_group_by_name.assert_called_once_with(
+ qos_item['name']
+ )
+ if not is_empty:
+ self.assertEqual(qos_item, result)
+ else:
+ self.assertTrue(result is None)
+
+ @ddt.data(True, False)
+ def test__get_qos_policy_group_by_name(self, is_empty):
+ qos_rest_response = {}
+ qos_rest_records = []
+ qos_name = fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0]['name']
+ if not is_empty:
+ qos_rest_response = fake.QOS_POLICY_BY_NAME_RESPONSE_REST
+ qos_rest_records = qos_rest_response['records']
+
+ self.mock_object(self.client, 'send_request',
+ return_value=qos_rest_response)
+
+ result = self.client._get_qos_policy_group_by_name(qos_name)
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/qos/policies/', 'get', query={'name': qos_name})
+ self.assertEqual(qos_rest_records, result)
+
+ @ddt.data(True, False)
+ def test__qos_spec_to_api_args(self, is_adaptive):
+ policy_spec = copy.deepcopy(fake.QOS_POLICY_GROUP_SPEC)
+ expected_args = fake.QOS_POLICY_GROUP_API_ARGS_REST
+ if is_adaptive:
+ policy_spec = fake.ADAPTIVE_QOS_SPEC
+ expected_args = fake.ADAPTIVE_QOS_API_ARGS_REST
+
+ result = self.client._qos_spec_to_api_args(
+ policy_spec, is_adaptive, vserver=fake.VSERVER_NAME)
+
+ self.assertEqual(expected_args, result)
+
+ def test__qos_spec_to_api_args_bps(self):
+ policy_spec = copy.deepcopy(fake.QOS_POLICY_GROUP_SPEC_BPS)
+ expected_args = fake.QOS_POLICY_GROUP_API_ARGS_REST_BPS
+
+ result = self.client._qos_spec_to_api_args(
+ policy_spec, False, vserver=fake.VSERVER_NAME)
+
+ self.assertEqual(expected_args, result)
+
+ @ddt.data('100IOPS', '100iops', '100B/s', '100b/s')
+ def test__sanitize_qos_spec_value(self, value):
+ result = self.client._sanitize_qos_spec_value(value)
+
+ self.assertEqual(100, result)
+
+ @ddt.data(True, False)
+ def test__create_qos_policy_group(self, is_adaptive):
+ self.client.vserver = fake.VSERVER_NAME
+ policy_spec = fake.QOS_POLICY_GROUP_SPEC
+ body_args = fake.QOS_POLICY_GROUP_API_ARGS_REST
+ if is_adaptive:
+ policy_spec = fake.ADAPTIVE_QOS_SPEC
+ body_args = fake.ADAPTIVE_QOS_API_ARGS_REST
+
+ self.mock_object(self.client, '_qos_spec_to_api_args',
+ return_value=body_args)
+ self.mock_object(self.client, 'send_request')
+
+ self.client._create_qos_policy_group(policy_spec, is_adaptive)
+
+ self.client._qos_spec_to_api_args.assert_called_once_with(
+ policy_spec, is_adaptive, vserver=fake.VSERVER_NAME)
+ self.client.send_request.assert_called_once_with(
+ '/storage/qos/policies/', 'post', body=body_args,
+ enable_tunneling=False)
+
+ @ddt.data((False, False), (False, True), (True, False), (True, True))
+ @ddt.unpack
+ def test__modify_qos_policy_group(self, is_adaptive, same_name):
+ self.client.vserver = fake.VSERVER_NAME
+ policy_spec = fake.QOS_POLICY_GROUP_SPEC
+ body_args = copy.deepcopy(fake.QOS_POLICY_GROUP_API_ARGS_REST)
+ if is_adaptive:
+ policy_spec = fake.ADAPTIVE_QOS_SPEC
+ body_args = copy.deepcopy(fake.ADAPTIVE_QOS_API_ARGS_REST)
+
+ expected_body_args = copy.deepcopy(body_args)
+ qos_group_item = copy.deepcopy(
+ fake.QOS_POLICY_BY_NAME_RESPONSE_REST['records'][0])
+ if same_name:
+ qos_group_item['name'] = policy_spec['policy_name']
+ expected_body_args.pop('name')
+
+ self.mock_object(self.client, '_qos_spec_to_api_args',
+ return_value=body_args)
+ self.mock_object(self.client, 'send_request')
+
+ self.client._modify_qos_policy_group(
+ policy_spec, is_adaptive, qos_group_item)
+
+ self.client._qos_spec_to_api_args.assert_called_once_with(
+ policy_spec, is_adaptive)
+ self.client.send_request.assert_called_once_with(
+ f'/storage/qos/policies/{qos_group_item["uuid"]}', 'patch',
+ body=expected_body_args, enable_tunneling=False)
+
+ def test_get_vol_by_junc_vserver(self):
+ api_response = fake_client.VOLUME_LIST_SIMPLE_RESPONSE_REST
+ volume_response = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ file_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-vol'
+
+ self.mock_object(self.client, 'send_request',
+ return_value=api_response)
+ self.mock_object(self.client, '_get_unique_volume',
+ return_value=volume_response)
+
+ result = self.client.get_vol_by_junc_vserver(
+ fake_client.VOLUME_VSERVER_NAME, file_path)
+
+ query = {
+ 'type': 'rw',
+ 'style': 'flex*',
+ 'is_svm_root': 'false',
+ 'error_state.is_inconsistent': 'false',
+ 'state': 'online',
+ 'nas.path': file_path,
+ 'svm.name': fake_client.VOLUME_VSERVER_NAME,
+ 'fields': 'name,style'
+ }
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/volumes/', 'get', query=query)
+ self.client._get_unique_volume.assert_called_once_with(
+ api_response["records"])
+
+ self.assertEqual(volume_response['name'], result)
+
+ def test_file_assign_qos(self):
+ volume = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_STR_REST
+ self.mock_object(
+ self.client, '_get_volume_by_args',
+ return_value=volume)
+ self.mock_object(self.client, 'send_request')
+
+ self.client.file_assign_qos(
+ volume['name'], fake.QOS_POLICY_GROUP_NAME, True, fake.VOLUME_NAME)
+
+ self.client._get_volume_by_args.assert_called_once_with(volume['name'])
+ body = {'qos_policy.name': fake.QOS_POLICY_GROUP_NAME}
+ self.client.send_request.assert_called_once_with(
+ f'/storage/volumes/{volume["uuid"]}/files/{fake.VOLUME_NAME}',
+ 'patch', body=body, enable_tunneling=False)
+
+ @ddt.data(None, {})
+ def test_mark_qos_policy_group_for_deletion_invalid_policy(self,
+ policy_info):
+ self.mock_object(self.client, '_rename_qos_policy_group')
+ self.mock_object(self.client, 'remove_unused_qos_policy_groups')
+
+ self.client.mark_qos_policy_group_for_deletion(policy_info, False)
+
+ self.client._rename_qos_policy_group.assert_not_called()
+ if policy_info is None:
+ self.client.remove_unused_qos_policy_groups.assert_not_called()
+ else:
+ (self.client.remove_unused_qos_policy_groups
+ .assert_called_once_with())
+
+ @ddt.data((False, False), (False, True), (True, False), (True, True))
+ @ddt.unpack
+ def test_mark_qos_policy_group_for_deletion(self, is_adaptive, has_error):
+ policy_info = fake.QOS_POLICY_GROUP_INFO
+ if is_adaptive:
+ policy_info = fake.ADAPTIVE_QOS_POLICY_GROUP_INFO
+ current_name = policy_info['spec']['policy_name']
+ deleted_name = client_base.DELETED_PREFIX + current_name
+
+ self.mock_object(self.client, 'remove_unused_qos_policy_groups')
+ if has_error:
+ self.mock_object(self.client, '_rename_qos_policy_group',
+ side_effect=self._mock_api_error())
+ else:
+ self.mock_object(self.client, '_rename_qos_policy_group')
+
+ self.client.mark_qos_policy_group_for_deletion(
+ policy_info, is_adaptive)
+
+ self.client._rename_qos_policy_group.assert_called_once_with(
+ current_name, deleted_name)
+ self.client.remove_unused_qos_policy_groups.assert_called_once_with()
+
+ def test__rename_qos_policy_group(self):
+ self.mock_object(self.client, 'send_request')
+ new_policy_name = 'fake_new_policy'
+
+ self.client._rename_qos_policy_group(fake.QOS_POLICY_GROUP_NAME,
+ new_policy_name)
+
+ body = {'name': new_policy_name}
+ query = {'name': fake.QOS_POLICY_GROUP_NAME}
+ self.client.send_request.assert_called_once_with(
+ '/storage/qos/policies/', 'patch', body=body, query=query,
+ enable_tunneling=False)
+
+ def test_remove_unused_qos_policy_groups(self):
+ deleted_preffix = f'{client_base.DELETED_PREFIX}*'
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.remove_unused_qos_policy_groups()
+
+ query = {'name': deleted_preffix}
+ self.client.send_request.assert_called_once_with(
+ '/storage/qos/policies', 'delete', query=query)
+
+ def test_create_lun(self):
+ metadata = copy.deepcopy(fake_client.LUN_GET_ITER_RESULT[0])
+ path = f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}'
+ size = 2048
+ initial_size = size
+ qos_policy_group_is_adaptive = False
+
+ self.mock_object(self.client, '_validate_qos_policy_group')
+ self.mock_object(self.client, 'send_request')
+
+ body = {
+ 'name': path,
+ 'space.size': str(initial_size),
+ 'os_type': metadata['OsType'],
+ 'space.guarantee.requested': metadata['SpaceReserved'],
+ 'qos_policy.name': fake.QOS_POLICY_GROUP_NAME
+ }
+
+ self.client.create_lun(
+ fake.VOLUME_NAME, fake.LUN_NAME, size, metadata,
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME,
+ qos_policy_group_is_adaptive=qos_policy_group_is_adaptive)
+
+ self.client._validate_qos_policy_group.assert_called_once_with(
+ qos_policy_group_is_adaptive)
+ self.client.send_request.assert_called_once_with(
+ '/storage/luns', 'post', body=body)
+
+ def test_do_direct_resize(self):
+ lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun'
+ new_size_bytes = '1073741824'
+ body = {'name': lun_path, 'space.size': new_size_bytes}
+
+ self.mock_object(self.client, '_lun_update_by_path')
+
+ self.client.do_direct_resize(lun_path, new_size_bytes)
+
+ self.client._lun_update_by_path.assert_called_once_with(lun_path, body)
+
+ @ddt.data(True, False)
+ def test__get_lun_by_path(self, is_empty):
+ lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun'
+ lun_response = fake_client.LUN_GET_ITER_REST
+ lun_records = fake_client.LUN_GET_ITER_REST['records']
+ if is_empty:
+ lun_response = {}
+ lun_records = []
+
+ self.mock_object(self.client, 'send_request',
+ return_value=lun_response)
+
+ result = self.client._get_lun_by_path(lun_path)
+
+ query = {'name': lun_path}
+ self.client.send_request.assert_called_once_with(
+ '/storage/luns', 'get', query=query)
+ self.assertEqual(result, lun_records)
+
+ @ddt.data(True, False)
+ def test__get_first_lun_by_path(self, is_empty):
+ lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun'
+ lun_records = fake_client.LUN_GET_ITER_REST['records']
+ lun_item = lun_records[0]
+ if is_empty:
+ lun_records = []
+
+ self.mock_object(self.client, '_get_lun_by_path',
+ return_value=lun_records)
+
+ result = self.client._get_first_lun_by_path(lun_path)
+
+ self.client._get_lun_by_path.assert_called_once_with(
+ lun_path, fields=None)
+ if is_empty:
+ self.assertTrue(result is None)
+ else:
+ self.assertEqual(result, lun_item)
+
+ def test__lun_update_by_path(self):
+ lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun'
+ lun_item = fake_client.LUN_GET_ITER_REST['records'][0]
+ new_size_bytes = '1073741824'
+ body = {
+ 'name': lun_path,
+ 'space.guarantee.requested': 'True',
+ 'space.size': new_size_bytes
+ }
+
+ self.mock_object(self.client, '_get_first_lun_by_path',
+ return_value=lun_item)
+ self.mock_object(self.client, 'send_request')
+
+ self.client._lun_update_by_path(lun_path, body)
+
+ self.client._get_first_lun_by_path.assert_called_once_with(lun_path)
+ self.client.send_request.assert_called_once_with(
+ f'/storage/luns/{lun_item["uuid"]}', 'patch', body=body)
+
+ def test__lun_update_by_path_not_found(self):
+ lun_path = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-lun'
+ lun_item = None
+ new_size_bytes = '1073741824'
+ body = {
+ 'name': lun_path,
+ 'space.guarantee.requested': 'True',
+ 'space.size': new_size_bytes
+ }
+
+ self.mock_object(self.client, '_get_first_lun_by_path',
+ return_value=lun_item)
+ self.mock_object(self.client, 'send_request')
+
+ self.assertRaises(
+ netapp_api.NaApiError,
+ self.client._lun_update_by_path,
+ lun_path,
+ body
+ )
+
+ self.client._get_first_lun_by_path.assert_called_once_with(lun_path)
+ self.client.send_request.assert_not_called()
+
+ def test__validate_qos_policy_group_unsupported_qos(self):
+ is_adaptive = True
+ self.client.features.ADAPTIVE_QOS = False
+
+ self.assertRaises(
+ netapp_utils.NetAppDriverException,
+ self.client._validate_qos_policy_group,
+ is_adaptive
+ )
+
+ def test__validate_qos_policy_group_no_spec(self):
+ is_adaptive = True
+ self.client.features.ADAPTIVE_QOS = True
+
+ result = self.client._validate_qos_policy_group(is_adaptive)
+
+ self.assertTrue(result is None)
+
+ def test__validate_qos_policy_group_unsupported_feature(self):
+ is_adaptive = True
+ self.client.features.ADAPTIVE_QOS = True
+ spec = {
+ 'min_throughput': fake.MIN_IOPS_REST
+ }
+
+ self.assertRaises(
+ netapp_utils.NetAppDriverException,
+ self.client._validate_qos_policy_group,
+ is_adaptive,
+ spec=spec,
+ qos_min_support=False
+ )
+
+ @ddt.data(True, False)
+ def test__validate_qos_policy_group(self, is_adaptive):
+ self.client.features.ADAPTIVE_QOS = True
+ spec = {
+ 'max_throughput': fake.MAX_IOPS_REST,
+ 'min_throughput': fake.MIN_IOPS_REST
+ }
+
+ self.client._validate_qos_policy_group(
+ is_adaptive, spec=spec, qos_min_support=True)
+
+ def test_delete_file(self):
+ """Delete file at path."""
+ path_to_file = fake.VOLUME_PATH
+ volume_response = fake_client.VOLUME_LIST_SIMPLE_RESPONSE_REST
+ volume_item = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+
+ volume_name = path_to_file.split('/')[2]
+ relative_path = '/'.join(path_to_file.split('/')[3:])
+
+ query = {
+ 'type': 'rw',
+ 'style': 'flex*', # Match both 'flexvol' and 'flexgroup'
+ 'is_svm_root': 'false',
+ 'error_state.is_inconsistent': 'false',
+ 'state': 'online',
+ 'name': volume_name,
+ 'fields': 'name,style'
+ }
+ self.mock_object(self.client, 'send_request',
+ return_value=volume_response)
+ self.mock_object(self.client, '_get_unique_volume',
+ return_value=volume_item)
+ self.client.delete_file(path_to_file)
+
+ relative_path = relative_path.replace('/', '%2F').replace('.', '%2E')
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/storage/volumes/', 'get', query=query),
+ mock.call(f'/storage/volumes/{volume_item["uuid"]}'
+ + f'/files/{relative_path}', 'delete')
+ ])
+
+ self.client._get_unique_volume.assert_called_once_with(
+ volume_response['records'])
+
+ def test_get_igroup_by_initiators_none_found(self):
+ initiator = 'initiator'
+ expected_response = fake_client.NO_RECORDS_RESPONSE_REST
+
+ self.mock_object(self.client, 'send_request',
+ return_value=expected_response)
+
+ igroup_list = self.client.get_igroup_by_initiators([initiator])
+
+ self.assertEqual([], igroup_list)
+
+ def test_get_igroup_by_initiators(self):
+ initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e']
+ expected_igroup = [{
+ 'initiator-group-os-type': 'linux',
+ 'initiator-group-type': 'iscsi',
+ 'initiator-group-name':
+ 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53'
+ }]
+
+ expected_query = {
+ 'svm.name': fake_client.VOLUME_VSERVER_NAME,
+ 'initiators.name': ' '.join(initiators),
+ 'fields': 'name,protocol,os_type'
+ }
+
+ self.mock_object(self.client, 'send_request',
+ return_value=fake_client.IGROUP_GET_ITER_REST)
+
+ igroup_list = self.client.get_igroup_by_initiators(initiators)
+ self.client.send_request.assert_called_once_with(
+ '/protocols/san/igroups', 'get', query=expected_query)
+ self.assertEqual(expected_igroup, igroup_list)
+
+ def test_get_igroup_by_initiators_multiple(self):
+ initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e',
+ 'iqn.1993-08.org.fake:02:5b67769f5c5e']
+
+ expected_igroup = [{
+ 'initiator-group-os-type': 'linux',
+ 'initiator-group-type': 'iscsi',
+ 'initiator-group-name':
+ 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53'
+ }]
+
+ expected_query = {
+ 'svm.name': fake_client.VOLUME_VSERVER_NAME,
+ 'initiators.name': ' '.join(initiators),
+ 'fields': 'name,protocol,os_type'
+ }
+
+ self.mock_object(self.client, 'send_request',
+ return_value=fake_client.IGROUP_GET_ITER_INITS_REST)
+
+ igroup_list = self.client.get_igroup_by_initiators(initiators)
+ self.client.send_request.assert_called_once_with(
+ '/protocols/san/igroups', 'get', query=expected_query)
+ self.assertEqual(expected_igroup, igroup_list)
+
+ def test_get_igroup_by_initiators_multiple_records(self):
+ initiators = ['iqn.1993-08.org.fake:01:5b67769f5c5e']
+ expected_element = {
+ 'initiator-group-os-type': 'linux',
+ 'initiator-group-type': 'iscsi',
+ 'initiator-group-name':
+ 'openstack-e6bf1584-bfb3-4cdb-950d-525bf6f26b53'
+ }
+ expected_igroup = [expected_element, expected_element]
+
+ self.mock_object(self.client, 'send_request',
+ return_value=fake_client.IGROUP_GET_ITER_MULT_REST)
+
+ igroup_list = self.client.get_igroup_by_initiators(initiators)
+ self.assertEqual(expected_igroup, igroup_list)
+
+ def test_add_igroup_initiator(self):
+ igroup = 'fake_igroup'
+ initiator = 'fake_initator'
+
+ mock_return = fake_client.IGROUP_GET_ITER_REST
+ expected_uuid = fake_client.IGROUP_GET_ITER_REST['records'][0]['uuid']
+ mock_send_request = self.mock_object(self.client, 'send_request',
+ return_value = mock_return)
+
+ self.client.add_igroup_initiator(igroup, initiator)
+
+ expected_body = {
+ 'name': initiator
+ }
+ mock_send_request.assert_has_calls([
+ mock.call('/protocols/san/igroups/' +
+ expected_uuid + '/initiators',
+ 'post', body=expected_body)])
+
+ def test_create_igroup(self):
+ igroup = 'fake_igroup'
+ igroup_type = 'fake_type'
+ os_type = 'fake_os'
+
+ body = {
+ 'name': igroup,
+ 'protocol': igroup_type,
+ 'os_type': os_type,
+ }
+
+ self.mock_object(self.client, 'send_request')
+ self.client.create_igroup(igroup, igroup_type, os_type)
+ self.client.send_request.assert_called_once_with(
+ '/protocols/san/igroups', 'post', body=body)
+
+ @ddt.data(None, 0, 4095)
+ def test_map_lun(self, lun_id):
+ fake_record = fake_client.GET_LUN_MAP_REST['records'][0]
+ path = fake_record['lun']['name']
+ igroup_name = fake_record['igroup']['name']
+
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.GET_LUN_MAP_REST)
+
+ result = self.client.map_lun(path, igroup_name, lun_id)
+
+ self.assertEqual(0, result)
+ expected_body = {
+ 'lun.name': path,
+ 'igroup.name': igroup_name,
+ }
+ if lun_id is not None:
+ expected_body['logical_unit_number'] = lun_id
+
+ mock_send_request.assert_has_calls([
+ mock.call('/protocols/san/lun-maps', 'post',
+ body=expected_body, query={'return_records': 'true'})])
+
+ def test_get_lun_map(self):
+ fake_record = fake_client.GET_LUN_MAP_REST['records'][0]
+ path = fake_record['lun']['name']
+
+ expected_lun_map = [{
+ 'initiator-group': fake_record['igroup']['name'],
+ 'lun-id': fake_record['logical_unit_number'],
+ 'vserver': fake_record['svm']['name'],
+ }]
+
+ expected_query = {
+ 'lun.name': path,
+ 'fields': 'igroup.name,logical_unit_number,svm.name',
+ }
+
+ self.mock_object(self.client, 'send_request',
+ return_value=fake_client.GET_LUN_MAP_REST)
+
+ lun_map = self.client.get_lun_map(path)
+ self.assertEqual(observed=lun_map, expected=expected_lun_map)
+ self.client.send_request.assert_called_once_with(
+ '/protocols/san/lun-maps', 'get', query=expected_query)
+
+ def test_get_lun_map_no_luns_mapped(self):
+ fake_record = fake_client.GET_LUN_MAP_REST['records'][0]
+ path = fake_record['lun']['name']
+
+ expected_lun_map = []
+ expected_query = {
+ 'lun.name': path,
+ 'fields': 'igroup.name,logical_unit_number,svm.name',
+ }
+
+ self.mock_object(self.client, 'send_request',
+ return_value = fake_client.NO_RECORDS_RESPONSE_REST)
+
+ lun_map = self.client.get_lun_map(path)
+ self.assertEqual(observed=lun_map, expected=expected_lun_map)
+ self.client.send_request.assert_called_once_with(
+ '/protocols/san/lun-maps', 'get', query=expected_query)
+
+ def test_get_fc_target_wwpns(self):
+ fake_record = fake_client.FC_INTERFACE_REST['records'][0]
+ expected_wwpns = [fake_record['wwpn']]
+ expected_query = {
+ 'fields': 'wwpn'
+ }
+ self.mock_object(self.client, 'send_request',
+ return_value = fake_client.FC_INTERFACE_REST)
+ wwpns = self.client.get_fc_target_wwpns()
+ self.assertEqual(observed=wwpns, expected=expected_wwpns)
+ self.client.send_request.assert_called_once_with(
+ '/network/fc/interfaces', 'get', query=expected_query)
+
+ def test_get_fc_target_wwpns_not_found(self):
+ expected_wwpns = []
+ expected_query = {
+ 'fields': 'wwpn'
+ }
+ self.mock_object(self.client, 'send_request',
+ return_value = fake_client.NO_RECORDS_RESPONSE_REST)
+ wwpns = self.client.get_fc_target_wwpns()
+ self.assertEqual(observed=wwpns, expected=expected_wwpns)
+ self.client.send_request.assert_called_once_with(
+ '/network/fc/interfaces', 'get', query=expected_query)
+
+ def test_unmap_lun(self):
+ get_uuid_response = fake_client.GET_LUN_MAP_REST
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ side_effect=[get_uuid_response, None])
+
+ self.client.unmap_lun(fake_client.LUN_NAME_PATH,
+ fake_client.IGROUP_NAME)
+
+ query_uuid = {
+ 'igroup.name': fake_client.IGROUP_NAME,
+ 'lun.name': fake_client.LUN_NAME_PATH,
+ 'fields': 'lun.uuid,igroup.uuid'
+ }
+
+ lun_uuid = get_uuid_response['records'][0]['lun']['uuid']
+ igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid']
+
+ mock_send_request.assert_has_calls([
+ mock.call('/protocols/san/lun-maps', 'get', query=query_uuid),
+ mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}',
+ 'delete'),
+ ])
+
+ def test_unmap_lun_with_api_error(self):
+ get_uuid_response = fake_client.GET_LUN_MAP_REST
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ side_effect=[get_uuid_response, netapp_api.NaApiError()])
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.unmap_lun,
+ fake_client.LUN_NAME_PATH,
+ fake_client.IGROUP_NAME)
+
+ query_uuid = {
+ 'igroup.name': fake_client.IGROUP_NAME,
+ 'lun.name': fake_client.LUN_NAME_PATH,
+ 'fields': 'lun.uuid,igroup.uuid'
+ }
+
+ lun_uuid = get_uuid_response['records'][0]['lun']['uuid']
+ igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid']
+
+ mock_send_request.assert_has_calls([
+ mock.call('/protocols/san/lun-maps', 'get', query=query_uuid),
+ mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}',
+ 'delete'),
+ ])
+
+ def test_unmap_lun_invalid_input(self):
+ get_uuid_response = fake_client.NO_RECORDS_RESPONSE_REST
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ side_effect=[get_uuid_response,
+ None])
+
+ self.client.unmap_lun(fake_client.LUN_NAME_PATH,
+ fake_client.IGROUP_NAME)
+
+ query_uuid = {
+ 'igroup.name': fake_client.IGROUP_NAME,
+ 'lun.name': fake_client.LUN_NAME_PATH,
+ 'fields': 'lun.uuid,igroup.uuid'
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/lun-maps', 'get', query=query_uuid)
+
+ def test_unmap_lun_not_mapped_in_group(self):
+ get_uuid_response = fake_client.GET_LUN_MAP_REST
+
+ # Exception REST_NO_SUCH_LUN_MAP is handled inside the function
+ # and should not be re-raised
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ side_effect=[
+ get_uuid_response,
+ netapp_api.NaApiError(
+ code=netapp_api.REST_NO_SUCH_LUN_MAP)])
+
+ self.client.unmap_lun(fake_client.LUN_NAME_PATH,
+ fake_client.IGROUP_NAME)
+
+ query_uuid = {
+ 'igroup.name': fake_client.IGROUP_NAME,
+ 'lun.name': fake_client.LUN_NAME_PATH,
+ 'fields': 'lun.uuid,igroup.uuid'
+ }
+
+ lun_uuid = get_uuid_response['records'][0]['lun']['uuid']
+ igroup_uuid = get_uuid_response['records'][0]['igroup']['uuid']
+
+ mock_send_request.assert_has_calls([
+ mock.call('/protocols/san/lun-maps', 'get', query=query_uuid),
+ mock.call(f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}',
+ 'delete'),
+ ])
+
+ def test_has_luns_mapped_to_initiators(self):
+ initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1']
+ api_response = fake_client.GET_LUN_MAPS
+ mock_send_request = self.mock_object(
+ self.client, 'send_request', return_value=api_response)
+
+ self.assertTrue(self.client.has_luns_mapped_to_initiators(initiators))
+
+ query = {
+ 'initiators.name': ' '.join(initiators),
+ 'fields': 'lun_maps'
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/igroups', 'get', query=query)
+
+ def test_has_luns_mapped_to_initiators_no_records(self):
+ initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1']
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ mock_send_request = self.mock_object(
+ self.client, 'send_request', return_value=api_response)
+
+ self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators))
+
+ query = {
+ 'initiators.name': ' '.join(initiators),
+ 'fields': 'lun_maps'
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/igroups', 'get', query=query)
+
+ def test_has_luns_mapped_to_initiators_not_mapped(self):
+ initiators = ['iqn.2005-03.org.open-iscsi:49ebe8a87d1']
+ api_response = fake_client.GET_LUN_MAPS_NO_MAPS
+ mock_send_request = self.mock_object(
+ self.client, 'send_request', return_value=api_response)
+
+ self.assertFalse(self.client.has_luns_mapped_to_initiators(initiators))
+
+ query = {
+ 'initiators.name': ' '.join(initiators),
+ 'fields': 'lun_maps'
+ }
+
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/igroups', 'get', query=query)
+
+ def test_iscsi_service_details(self):
+ fake_record = fake_client.GET_ISCSI_SERVICE_DETAILS_REST['records'][0]
+ expected_iqn = fake_record['target']['name']
+ expected_query = {
+ 'fields': 'target.name'
+ }
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.GET_ISCSI_SERVICE_DETAILS_REST)
+ iqn = self.client.get_iscsi_service_details()
+ self.assertEqual(expected_iqn, iqn)
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/iscsi/services', 'get', query=expected_query)
+
+ def test_iscsi_service_details_not_found(self):
+ expected_iqn = None
+ expected_query = {
+ 'fields': 'target.name'
+ }
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.NO_RECORDS_RESPONSE_REST)
+ iqn = self.client.get_iscsi_service_details()
+ self.assertEqual(expected_iqn, iqn)
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/iscsi/services', 'get', query=expected_query)
+
+ def test_check_iscsi_initiator_exists(self):
+ fake_record = fake_client.CHECK_ISCSI_INITIATOR_REST['records'][0]
+ iqn = fake_record['initiator']
+ expected_query = {
+ 'initiator': iqn
+ }
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.CHECK_ISCSI_INITIATOR_REST)
+ initiator_exists = self.client.check_iscsi_initiator_exists(iqn)
+ self.assertEqual(expected=True, observed=initiator_exists)
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/iscsi/credentials', 'get',
+ query=expected_query)
+
+ def test_check_iscsi_initiator_exists_not_found(self):
+ fake_record = fake_client.CHECK_ISCSI_INITIATOR_REST['records'][0]
+ iqn = fake_record['initiator']
+ expected_query = {
+ 'initiator': iqn
+ }
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.NO_RECORDS_RESPONSE_REST)
+ initiator_exists = self.client.check_iscsi_initiator_exists(iqn)
+ self.assertEqual(expected=False, observed=initiator_exists)
+ mock_send_request.assert_called_once_with(
+ '/protocols/san/iscsi/credentials', 'get',
+ query=expected_query)
+
+ def test_get_iscsi_target_details(self):
+ fake_record = fake_client.GET_ISCSI_TARGET_DETAILS_REST['records'][0]
+ expected_details = [{
+ 'address': fake_record['ip']['address'],
+ 'port': 3260,
+ 'tpgroup-tag': None,
+ 'interface-enabled': fake_record['enabled'],
+ }]
+ expected_query = {
+ 'services': 'data_iscsi',
+ 'fields': 'ip.address,enabled'
+ }
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.GET_ISCSI_TARGET_DETAILS_REST)
+ details = self.client.get_iscsi_target_details()
+ self.assertEqual(expected_details, details)
+ mock_send_request.assert_called_once_with('/network/ip/interfaces',
+ 'get', query=expected_query)
+
+ def test_get_iscsi_target_details_no_details(self):
+ expected_details = []
+ expected_query = {
+ 'services': 'data_iscsi',
+ 'fields': 'ip.address,enabled'
+ }
+ mock_send_request = self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.NO_RECORDS_RESPONSE_REST)
+ details = self.client.get_iscsi_target_details()
+ self.assertEqual(expected_details, details)
+ mock_send_request.assert_called_once_with('/network/ip/interfaces',
+ 'get', query=expected_query)
+
+ def test_move_lun(self):
+ fake_cur_path = '/vol/fake_vol/fake_lun_cur'
+ fake_new_path = '/vol/fake_vol/fake_lun_new'
+ expected_query = {
+ 'svm.name': self.vserver,
+ 'name': fake_cur_path,
+ }
+ expected_body = {
+ 'name': fake_new_path,
+ }
+ mock_send_request = self.mock_object(self.client, 'send_request')
+ self.client.move_lun(fake_cur_path, fake_new_path)
+ mock_send_request.assert_called_once_with(
+ '/storage/luns/', 'patch', query=expected_query,
+ body=expected_body)
+
+ @ddt.data(True, False)
+ def test_clone_file_snapshot(self, overwrite_dest):
+ fake_volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ self.client.features.BACKUP_CLONE_PARAM = True
+
+ fake_name = fake.NFS_VOLUME['name']
+ fake_new_name = fake.SNAPSHOT_NAME
+ api_version = (1, 19)
+
+ expected_body = {
+ 'volume': {
+ 'uuid': fake_volume['uuid'],
+ 'name': fake_volume['name']
+ },
+ 'source_path': fake_name,
+ 'destination_path': fake_new_name,
+ 'is_backup': True
+ }
+ if overwrite_dest:
+ api_version = (1, 20)
+ expected_body['overwrite_destination'] = True
+
+ self.mock_object(self.client, 'send_request')
+ self.mock_object(self.client, '_get_volume_by_args',
+ return_value=fake_volume)
+ self.mock_object(self.client.connection, 'get_api_version',
+ return_value=api_version)
+
+ self.client.clone_file(
+ fake_volume['name'], fake_name, fake_new_name, fake.VSERVER_NAME,
+ is_snapshot=True, dest_exists=overwrite_dest)
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/storage/file/clone', 'post', body=expected_body),
+ ])
+
+ def test_clone_lun(self):
+ self.client.vserver = fake.VSERVER_NAME
+
+ expected_body = {
+ 'svm': {
+ 'name': fake.VSERVER_NAME
+ },
+ 'name': f'/vol/{fake.VOLUME_NAME}/{fake.SNAPSHOT_NAME}',
+ 'clone': {
+ 'source': {
+ 'name': f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}',
+ }
+ },
+ 'space': {
+ 'guarantee': {
+ 'requested': True,
+ }
+ },
+ 'qos_policy': {
+ 'name': fake.QOS_POLICY_GROUP_NAME,
+ }
+ }
+
+ mock_send_request = self.mock_object(
+ self.client, 'send_request', return_value=None)
+ mock_validate_policy = self.mock_object(
+ self.client, '_validate_qos_policy_group')
+
+ self.client.clone_lun(
+ volume=fake.VOLUME_NAME, name=fake.LUN_NAME,
+ new_name=fake.SNAPSHOT_NAME,
+ qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME,
+ is_snapshot=True)
+
+ mock_validate_policy.assert_called_once_with(False)
+ mock_send_request.assert_called_once_with(
+ '/storage/luns', 'post', body=expected_body)
+
+ @ddt.data(True, False)
+ def test_destroy_lun(self, force=True):
+ path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.FILE_NAME}'
+
+ query = {}
+ query['name'] = path
+ query['svm'] = fake_client.VOLUME_VSERVER_NAME
+ if force:
+ query['allow_delete_while_mapped'] = 'true'
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.destroy_lun(path, force)
+
+ self.client.send_request.assert_called_once_with('/storage/luns/',
+ 'delete', query=query)
+
+ def test_get_flexvol_capacity(self, ):
+
+ api_response = fake_client.VOLUME_GET_ITER_CAPACITY_RESPONSE_REST
+ volume_response = api_response['records'][0]
+ mock_get_unique_vol = self.mock_object(
+ self.client, '_get_volume_by_args', return_value=volume_response)
+
+ capacity = self.client.get_flexvol_capacity(
+ flexvol_path=fake.VOLUME_PATH, flexvol_name=fake.VOLUME_NAME)
+
+ mock_get_unique_vol.assert_called_once_with(
+ vol_name=fake.VOLUME_NAME, vol_path=fake.VOLUME_PATH,
+ fields='name,space.available,space.afs_total')
+ self.assertEqual(float(fake_client.VOLUME_SIZE_TOTAL),
+ capacity['size-total'])
+ self.assertEqual(float(fake_client.VOLUME_SIZE_AVAILABLE),
+ capacity['size-available'])
+
+ def test_get_flexvol_capacity_not_found(self):
+
+ self.mock_object(
+ self.client, '_get_volume_by_args',
+ side_effect=exception.VolumeBackendAPIException(data="fake"))
+
+ self.assertRaises(netapp_utils.NetAppDriverException,
+ self.client.get_flexvol_capacity,
+ flexvol_path='fake_path')
+
+ def test_check_api_permissions(self):
+
+ mock_log = self.mock_object(client_cmode_rest.LOG, 'warning')
+ self.mock_object(self.client, 'check_cluster_api', return_value=True)
+
+ self.client.check_api_permissions()
+
+ self.client.check_cluster_api.assert_has_calls(
+ [mock.call(key) for key in client_cmode_rest.SSC_API_MAP.keys()])
+ self.assertEqual(0, mock_log.call_count)
+
+ def test_check_api_permissions_failed_ssc_apis(self):
+
+ def check_cluster_api(api):
+ if api != '/storage/volumes':
+ return False
+ return True
+
+ self.mock_object(self.client, 'check_cluster_api',
+ side_effect=check_cluster_api)
+
+ mock_log = self.mock_object(client_cmode_rest.LOG, 'warning')
+
+ self.client.check_api_permissions()
+
+ self.assertEqual(1, mock_log.call_count)
+
+ def test_check_api_permissions_failed_volume_api(self):
+
+ def check_cluster_api(api):
+ if api == '/storage/volumes':
+ return False
+ return True
+
+ self.mock_object(self.client, 'check_cluster_api',
+ side_effect=check_cluster_api)
+
+ mock_log = self.mock_object(client_cmode_rest.LOG, 'warning')
+
+ self.assertRaises(exception.VolumeBackendAPIException,
+ self.client.check_api_permissions)
+
+ self.assertEqual(0, mock_log.call_count)
+
+ def test_check_cluster_api(self):
+
+ endpoint_api = '/storage/volumes'
+ endpoint_request = '/storage/volumes?return_records=false'
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=True)
+
+ result = self.client.check_cluster_api(endpoint_api)
+
+ mock_send_request.assert_has_calls([mock.call(endpoint_request, 'get',
+ enable_tunneling=False)])
+ self.assertTrue(result)
+
+ def test_check_cluster_api_error(self):
+
+ endpoint_api = '/storage/volumes'
+ api_error = netapp_api.NaApiError(code=netapp_api.REST_UNAUTHORIZED)
+
+ self.mock_object(self.client, 'send_request',
+ side_effect=[api_error])
+
+ result = self.client.check_cluster_api(endpoint_api)
+
+ self.assertFalse(result)
+
+ def test_get_provisioning_options_from_flexvol(self):
+
+ self.mock_object(self.client, 'get_flexvol',
+ return_value=fake_client.VOLUME_INFO_SSC)
+ self.mock_object(self.client, 'get_flexvol_dedupe_info',
+ return_value=fake_client.VOLUME_DEDUPE_INFO_SSC)
+
+ expected_prov_opts = {
+ 'aggregate': 'fake_aggr1',
+ 'compression_enabled': False,
+ 'dedupe_enabled': True,
+ 'language': 'c.utf_8',
+ 'size': 1,
+ 'snapshot_policy': 'default',
+ 'snapshot_reserve': '5',
+ 'space_guarantee_type': 'none',
+ 'volume_type': 'rw',
+ 'is_flexgroup': False,
+ }
+
+ actual_prov_opts = self.client.get_provisioning_options_from_flexvol(
+ fake_client.VOLUME_NAME)
+
+ self.assertEqual(expected_prov_opts, actual_prov_opts)
+
+ def test_flexvol_exists(self):
+
+ api_response = fake_client.GET_NUM_RECORDS_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.flexvol_exists(fake_client.VOLUME_NAME)
+
+ query = {
+ 'name': fake_client.VOLUME_NAME,
+ 'return_records': 'false'
+ }
+
+ mock_send_request.assert_has_calls([
+ mock.call('/storage/volumes/', 'get', query=query)])
+ self.assertTrue(result)
+
+ def test_flexvol_exists_not_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ self.assertFalse(self.client.flexvol_exists(fake_client.VOLUME_NAME))
+
+ @ddt.data(fake_client.VOLUME_AGGREGATE_NAME,
+ [fake_client.VOLUME_AGGREGATE_NAME],
+ [fake_client.VOLUME_AGGREGATE_NAMES[0],
+ fake_client.VOLUME_AGGREGATE_NAMES[1]])
+ def test_create_volume_async(self, aggregates):
+ self.mock_object(self.client, 'send_request')
+
+ self.client.create_volume_async(
+ fake_client.VOLUME_NAME, aggregates, 100, volume_type='dp')
+
+ body = {
+ 'name': fake_client.VOLUME_NAME,
+ 'size': 100 * units.Gi,
+ 'type': 'dp'
+ }
+
+ if isinstance(aggregates, list):
+ body['style'] = 'flexgroup'
+ body['aggregates'] = [{'name': aggr} for aggr in aggregates]
+ else:
+ body['style'] = 'flexvol'
+ body['aggregates'] = [{'name': aggregates}]
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/volumes/', 'post', body=body, wait_on_accepted=False)
+
+ @ddt.data('dp', 'rw', None)
+ def test_create_volume_async_with_extra_specs(self, volume_type):
+ self.mock_object(self.client, 'send_request')
+
+ aggregates = [fake_client.VOLUME_AGGREGATE_NAME]
+ snapshot_policy = 'default'
+ size = 100
+ space_guarantee_type = 'volume'
+ language = 'en-US'
+ snapshot_reserve = 15
+
+ self.client.create_volume_async(
+ fake_client.VOLUME_NAME, aggregates, size,
+ space_guarantee_type=space_guarantee_type, language=language,
+ snapshot_policy=snapshot_policy, snapshot_reserve=snapshot_reserve,
+ volume_type=volume_type)
+
+ body = {
+ 'name': fake_client.VOLUME_NAME,
+ 'size': size * units.Gi,
+ 'type': volume_type,
+ 'guarantee': {'type': space_guarantee_type},
+ 'space': {'snapshot': {'reserve_percent': str(snapshot_reserve)}},
+ 'language': language,
+ }
+
+ if isinstance(aggregates, list):
+ body['style'] = 'flexgroup'
+ body['aggregates'] = [{'name': aggr} for aggr in aggregates]
+ else:
+ body['style'] = 'flexvol'
+ body['aggregates'] = [{'name': aggregates}]
+
+ if volume_type == 'dp':
+ snapshot_policy = None
+ else:
+ body['nas'] = {'path': '/%s' % fake_client.VOLUME_NAME}
+
+ if snapshot_policy is not None:
+ body['snapshot_policy'] = {'name': snapshot_policy}
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/volumes/', 'post', body=body, wait_on_accepted=False)
+
+ def test_create_flexvol(self):
+ aggregates = [fake_client.VOLUME_AGGREGATE_NAME]
+ size = 100
+
+ mock_response = {
+ 'job': {
+ 'uuid': fake.JOB_UUID,
+ }
+ }
+
+ self.mock_object(self.client, 'send_request',
+ return_value=mock_response)
+
+ expected_response = {
+ 'status': None,
+ 'jobid': fake.JOB_UUID,
+ 'error-code': None,
+ 'error-message': None
+ }
+
+ response = self.client.create_volume_async(fake_client.VOLUME_NAME,
+ aggregates, size_gb = size)
+ self.assertEqual(expected_response, response)
+
+ def test_enable_volume_dedupe_async(self):
+ query = {
+ 'name': fake_client.VOLUME_NAME,
+ 'fields': 'uuid,style',
+ }
+
+ # This is needed because the first calling to send_request inside
+ # enable_volume_dedupe_async must return a valid uuid for the given
+ # volume name.
+ mock_response = {
+ 'records': [
+ {
+ 'uuid': fake.JOB_UUID,
+ 'name': fake_client.VOLUME_NAME,
+ "style": 'flexgroup',
+ }
+ ],
+ "num_records": 1,
+ }
+
+ body = {
+ 'efficiency': {'dedupe': 'background'}
+ }
+
+ mock_send_request = self.mock_object(self.client, 'send_request',
+ return_value=mock_response)
+
+ call_list = [mock.call('/storage/volumes/',
+ 'patch', body=body, query=query,
+ wait_on_accepted=False)]
+
+ self.client.enable_volume_dedupe_async(fake_client.VOLUME_NAME)
+ mock_send_request.assert_has_calls(call_list)
+
+ def test_enable_volume_compression_async(self):
+ query = {
+ 'name': fake_client.VOLUME_NAME,
+ }
+
+ # This is needed because the first calling to send_request inside
+ # enable_volume_compression_async must return a valid uuid for the
+ # given volume name.
+ mock_response = {
+ 'records': [
+ {
+ 'uuid': fake.JOB_UUID,
+ 'name': fake_client.VOLUME_NAME,
+ "style": 'flexgroup',
+ }
+ ],
+ "num_records": 1,
+ }
+
+ body = {
+ 'efficiency': {'compression': 'background'}
+ }
+
+ mock_send_request = self.mock_object(self.client, 'send_request',
+ return_value=mock_response)
+
+ call_list = [mock.call('/storage/volumes/',
+ 'patch', body=body, query=query,
+ wait_on_accepted=False)]
+
+ self.client.enable_volume_compression_async(fake_client.VOLUME_NAME)
+ mock_send_request.assert_has_calls(call_list)
+
+ def test__get_snapmirrors(self):
+
+ api_response = fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client._get_snapmirrors(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ query = {
+ 'source.path': (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME),
+ 'destination.path': (fake_client.SM_DEST_VSERVER +
+ ':' + fake_client.SM_DEST_VOLUME),
+ 'fields': 'state,source.svm.name,source.path,destination.svm.name,'
+ 'destination.path,transfer.end_time,lag_time,healthy,'
+ 'uuid'
+ }
+
+ mock_send_request.assert_called_once_with('/snapmirror/relationships',
+ 'get', query=query)
+ self.assertEqual(1, len(result))
+
+ def test__get_snapmirrors_not_found(self):
+
+ api_response = fake_client.NO_RECORDS_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client._get_snapmirrors(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ query = {
+ 'source.path': (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME),
+ 'destination.path': (fake_client.SM_DEST_VSERVER +
+ ':' + fake_client.SM_DEST_VOLUME),
+ 'fields': 'state,source.svm.name,source.path,destination.svm.name,'
+ 'destination.path,transfer.end_time,lag_time,healthy,'
+ 'uuid'
+ }
+
+ mock_send_request.assert_called_once_with('/snapmirror/relationships',
+ 'get', query=query)
+ self.assertEqual([], result)
+
+ def test_get_snapmirrors(self):
+
+ api_response = fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST
+ mock_send_request = self.mock_object(self.client,
+ 'send_request',
+ return_value=api_response)
+
+ result = self.client.get_snapmirrors(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ expected = fake_client.REST_GET_SNAPMIRRORS_RESPONSE
+
+ query = {
+ 'source.path': (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME),
+ 'destination.path': (fake_client.SM_DEST_VSERVER +
+ ':' + fake_client.SM_DEST_VOLUME),
+ 'fields': 'state,source.svm.name,source.path,destination.svm.name,'
+ 'destination.path,transfer.end_time,lag_time,healthy,'
+ 'uuid'
+ }
+
+ mock_send_request.assert_called_once_with('/snapmirror/relationships',
+ 'get', query=query)
+ self.assertEqual(expected, result)
+
+ @ddt.data({'policy': 'fake_policy'},
+ {'policy': None})
+ @ddt.unpack
+ def test_create_snapmirror(self, policy):
+ api_responses = [
+ {
+ "job": {
+ "uuid": fake_client.FAKE_UUID,
+ },
+ },
+ ]
+ self.mock_object(self.client, 'send_request',
+ side_effect = copy.deepcopy(api_responses))
+ self.client.create_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
+ policy=policy)
+
+ body = {
+ 'source': {
+ 'path': (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME),
+ },
+ 'destination': {
+ 'path': (fake_client.SM_DEST_VSERVER + ':' +
+ fake_client.SM_DEST_VOLUME)
+ }
+ }
+
+ if policy:
+ body['policy'] = {'name': policy}
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/', 'post', body=body)])
+
+ def test_create_snapmirror_already_exists(self):
+ api_responses = netapp_api.NaApiError(
+ code=netapp_api.REST_ERELATION_EXISTS)
+ self.mock_object(self.client, 'send_request',
+ side_effect=api_responses)
+
+ response = self.client.create_snapmirror(
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME,
+ schedule=None,
+ policy=None,
+ relationship_type='data_protection')
+ self.assertIsNone(response)
+ self.assertTrue(self.client.send_request.called)
+
+ def test_create_snapmirror_error(self):
+ self.mock_object(self.client, 'send_request',
+ side_effect=netapp_api.NaApiError(code=123))
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.create_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME,
+ schedule=None,
+ policy=None,
+ relationship_type='data_protection')
+ self.assertTrue(self.client.send_request.called)
+
+ def test__set_snapmirror_state(self):
+
+ api_responses = [
+ fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST,
+ {
+ "job":
+ {
+ "uuid": fake_client.FAKE_UUID
+ },
+ "num_records": 1
+ }
+ ]
+
+ expected_body = {'state': 'snapmirrored'}
+ self.mock_object(self.client,
+ 'send_request',
+ side_effect=copy.deepcopy(api_responses))
+
+ result = self.client._set_snapmirror_state(
+ 'snapmirrored',
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID,
+ 'patch', body=expected_body, wait_on_accepted=True)])
+
+ expected = {
+ 'operation-id': None,
+ 'status': None,
+ 'jobid': fake_client.FAKE_UUID,
+ 'error-code': None,
+ 'error-message': None,
+ 'relationship-uuid': fake_client.FAKE_UUID
+ }
+ self.assertEqual(expected, result)
+
+ def test_initialize_snapmirror(self):
+
+ expected_job = {
+ 'operation-id': None,
+ 'status': None,
+ 'jobid': fake_client.FAKE_UUID,
+ 'error-code': None,
+ 'error-message': None,
+ }
+
+ mock_set_snapmirror_state = self.mock_object(
+ self.client,
+ '_set_snapmirror_state',
+ return_value=expected_job)
+
+ result = self.client.initialize_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ mock_set_snapmirror_state.assert_called_once_with(
+ 'snapmirrored',
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
+ wait_result=False)
+
+ self.assertEqual(expected_job, result)
+
+ @ddt.data(True, False)
+ def test_abort_snapmirror(self, clear_checkpoint):
+
+ self.mock_object(
+ self.client, 'get_snapmirrors',
+ return_value=fake_client.REST_GET_SNAPMIRRORS_RESPONSE)
+ responses = [fake_client.TRANSFERS_GET_ITER_REST, None, None]
+ self.mock_object(self.client, 'send_request',
+ side_effect=copy.deepcopy(responses))
+
+ self.client.abort_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
+ clear_checkpoint=clear_checkpoint)
+
+ body = {'state': 'hard_aborted' if clear_checkpoint else 'aborted'}
+ query = {'state': 'transferring'}
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/' +
+ fake_client.FAKE_UUID + '/transfers/', 'get',
+ query=query),
+ mock.call('/snapmirror/relationships/' +
+ fake_client.FAKE_UUID + '/transfers/' +
+ fake_client.FAKE_UUID, 'patch', body=body)])
+ self.client.get_snapmirrors.assert_called_once_with(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ def test_abort_snapmirror_no_transfer_in_progress(self):
+
+ self.mock_object(self.client, 'send_request',
+ return_value=fake_client.NO_RECORDS_RESPONSE_REST)
+ self.mock_object(
+ self.client, 'get_snapmirrors',
+ return_value=fake_client.REST_GET_SNAPMIRRORS_RESPONSE)
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.abort_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME,
+ clear_checkpoint=True)
+
+ query = {'state': 'transferring'}
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID +
+ '/transfers/', 'get', query=query)])
+
+ def test_delete_snapmirror(self):
+
+ response_list = [fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST,
+ fake_client.JOB_RESPONSE_REST,
+ fake_client.JOB_SUCCESSFUL_REST]
+
+ self.mock_object(self.client, 'send_request',
+ side_effect=copy.deepcopy(response_list))
+
+ self.client.delete_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ query_uuid = {}
+ query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME)
+ query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' +
+ fake_client.SM_DEST_VOLUME)
+ query_uuid['fields'] = 'uuid'
+
+ query_delete = {"destination_only": "true"}
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/', 'get', query=query_uuid),
+ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID,
+ 'delete', query=query_delete)])
+
+ def test_delete_snapmirror_timeout(self):
+ # when a timeout happens, an exception is thrown by send_request
+ api_error = netapp_api.NaRetryableError()
+ self.mock_object(self.client, 'send_request',
+ side_effect=api_error)
+
+ self.assertRaises(netapp_api.NaRetryableError,
+ self.client.delete_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ @ddt.data('async', 'sync')
+ def test_resume_snapmirror(self, snapmirror_policy):
+ snapmirror_response = copy.deepcopy(
+ fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST)
+ snapmirror_response['records'][0]['policy'] = {
+ 'type': snapmirror_policy}
+
+ if snapmirror_policy == 'async':
+ snapmirror_response['state'] = 'snapmirrored'
+ elif snapmirror_policy == 'sync':
+ snapmirror_response['state'] = 'in_sync'
+
+ response_list = [snapmirror_response,
+ fake_client.JOB_RESPONSE_REST,
+ snapmirror_response]
+
+ self.mock_object(self.client, 'send_request',
+ side_effect=copy.deepcopy(response_list))
+
+ self.client.resync_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ query_uuid = {}
+ query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME)
+ query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' +
+ fake_client.SM_DEST_VOLUME)
+ query_uuid['fields'] = 'uuid,policy.type'
+
+ body_resync = {}
+ if snapmirror_policy == 'async':
+ body_resync['state'] = 'snapmirrored'
+ elif snapmirror_policy == 'sync':
+ body_resync['state'] = 'in_sync'
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/', 'get', query=query_uuid),
+ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID,
+ 'patch', body=body_resync)])
+
+ def test_resume_snapmirror_not_found(self):
+ query_uuid = {}
+ query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME)
+ query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' +
+ fake_client.SM_DEST_VOLUME)
+ query_uuid['fields'] = 'uuid,policy.type'
+
+ self.mock_object(
+ self.client, 'send_request',
+ return_value={'records': []})
+
+ self.assertRaises(
+ netapp_api.NaApiError,
+ self.client.resume_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ self.client.send_request.assert_called_once_with(
+ '/snapmirror/relationships/', 'get', query=query_uuid)
+
+ def test_resume_snapmirror_api_error(self):
+ query_resume = {}
+ query_resume['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME)
+ query_resume['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' +
+ fake_client.SM_DEST_VOLUME)
+
+ query_uuid = copy.deepcopy(query_resume)
+ query_uuid['fields'] = 'uuid,policy.type'
+
+ api_error = netapp_api.NaApiError(code=0)
+ self.mock_object(
+ self.client, 'send_request',
+ side_effect=[fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST,
+ api_error])
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.resume_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ @ddt.data(True, False)
+ def test_release_snapmirror(self, relationship_info_only):
+
+ response_list = [fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST,
+ fake_client.JOB_RESPONSE_REST,
+ fake_client.JOB_SUCCESSFUL_REST]
+
+ self.mock_object(self.client, 'send_request',
+ side_effect=copy.deepcopy(response_list))
+
+ self.client.release_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME,
+ relationship_info_only)
+
+ query_uuid = {}
+ query_uuid['list_destinations_only'] = 'true'
+ query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME)
+ query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' +
+ fake_client.SM_DEST_VOLUME)
+ query_uuid['fields'] = 'uuid'
+
+ query_release = {}
+ if relationship_info_only:
+ # release WITHOUT removing related snapshots
+ query_release['source_info_only'] = 'true'
+ else:
+ # release and REMOVING all related snapshots
+ query_release['source_only'] = 'true'
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/', 'get', query=query_uuid),
+ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID,
+ 'delete', query=query_release)])
+
+ def test_release_snapmirror_timeout(self):
+ # when a timeout happens, an exception is thrown by send_request
+ api_error = netapp_api.NaRetryableError()
+ self.mock_object(self.client, 'send_request',
+ side_effect=api_error)
+
+ self.assertRaises(netapp_api.NaRetryableError,
+ self.client.release_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ @ddt.data('async', 'sync')
+ def test_resync_snapmirror(self, snapmirror_policy):
+
+ snapmirror_response = copy.deepcopy(
+ fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST)
+ snapmirror_response['records'][0]['policy'] = {
+ 'type': snapmirror_policy}
+
+ if snapmirror_policy == 'async':
+ snapmirror_response['state'] = 'snapmirrored'
+ elif snapmirror_policy == 'sync':
+ snapmirror_response['state'] = 'in_sync'
+
+ response_list = [snapmirror_response,
+ fake_client.JOB_RESPONSE_REST,
+ snapmirror_response]
+
+ self.mock_object(self.client, 'send_request',
+ side_effect=copy.deepcopy(response_list))
+
+ self.client.resync_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ query_uuid = {}
+ query_uuid['source.path'] = (fake_client.SM_SOURCE_VSERVER + ':' +
+ fake_client.SM_SOURCE_VOLUME)
+ query_uuid['destination.path'] = (fake_client.SM_DEST_VSERVER + ':' +
+ fake_client.SM_DEST_VOLUME)
+ query_uuid['fields'] = 'uuid,policy.type'
+
+ body_resync = {}
+ if snapmirror_policy == 'async':
+ body_resync['state'] = 'snapmirrored'
+ elif snapmirror_policy == 'sync':
+ body_resync['state'] = 'in_sync'
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/', 'get', query=query_uuid),
+ mock.call('/snapmirror/relationships/' + fake_client.FAKE_UUID,
+ 'patch', body=body_resync)])
+
+ def test_resync_snapmirror_timeout(self):
+ api_error = netapp_api.NaRetryableError()
+ self.mock_object(self.client, 'resume_snapmirror',
+ side_effect=api_error)
+
+ self.assertRaises(netapp_api.NaRetryableError,
+ self.client.resync_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ def test_quiesce_snapmirror(self):
+
+ expected_job = {
+ 'operation-id': None,
+ 'status': None,
+ 'jobid': fake_client.FAKE_UUID,
+ 'error-code': None,
+ 'error-message': None,
+ 'relationship-uuid': fake_client.FAKE_UUID,
+ }
+
+ mock_set_snapmirror_state = self.mock_object(
+ self.client,
+ '_set_snapmirror_state',
+ return_value=expected_job)
+
+ result = self.client.quiesce_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ mock_set_snapmirror_state.assert_called_once_with(
+ 'paused',
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ self.assertEqual(expected_job, result)
+
+ def test_break_snapmirror(self):
+ snapmirror_response = copy.deepcopy(
+ fake_client.SNAPMIRROR_GET_ITER_RESPONSE_REST)
+
+ snapmirror_response['state'] = 'broken_off'
+ response_list = [snapmirror_response]
+
+ self.mock_object(self.client, 'send_request',
+ side_effect=copy.deepcopy(response_list))
+
+ expected_job = {
+ 'operation-id': None,
+ 'status': None,
+ 'jobid': fake_client.FAKE_UUID,
+ 'error-code': None,
+ 'error-message': None,
+ 'relationship-uuid': fake_client.FAKE_UUID,
+ }
+
+ mock_set_snapmirror_state = self.mock_object(
+ self.client,
+ '_set_snapmirror_state',
+ return_value=expected_job)
+
+ self.client.break_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ mock_set_snapmirror_state.assert_called_once_with(
+ 'broken-off',
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ def test_break_snapmirror_not_found(self):
+ self.mock_object(
+ self.client, 'send_request',
+ return_value={'records': []})
+
+ self.assertRaises(
+ netapp_utils.NetAppDriverException,
+ self.client.break_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ def test_break_snapmirror_timeout(self):
+ # when a timeout happens, an exception is thrown by send_request
+ api_error = netapp_api.NaRetryableError()
+ self.mock_object(self.client, 'send_request',
+ side_effect=api_error)
+
+ self.assertRaises(netapp_api.NaRetryableError,
+ self.client.break_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ def test_update_snapmirror(self):
+
+ snapmirrors = fake_client.REST_GET_SNAPMIRRORS_RESPONSE
+ self.mock_object(self.client, 'send_request')
+ self.mock_object(self.client, 'get_snapmirrors',
+ return_value=snapmirrors)
+
+ self.client.update_snapmirror(
+ fake_client.SM_SOURCE_VSERVER, fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER, fake_client.SM_DEST_VOLUME)
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/' +
+ snapmirrors[0]['uuid'] + '/transfers/', 'post',
+ wait_on_accepted=False)])
+
+ def test_update_snapmirror_no_records(self):
+
+ self.mock_object(self.client, 'send_request')
+ self.mock_object(self.client, 'get_snapmirrors',
+ return_value=[])
+
+ self.assertRaises(netapp_utils.NetAppDriverException,
+ self.client.update_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ self.client.send_request.assert_not_called()
+
+ def test_update_snapmirror_exception(self):
+
+ snapmirrors = fake_client.REST_GET_SNAPMIRRORS_RESPONSE
+ api_error = netapp_api.NaApiError(
+ code=netapp_api.REST_UPDATE_SNAPMIRROR_FAILED)
+ self.mock_object(self.client, 'send_request',
+ side_effect=api_error)
+ self.mock_object(self.client, 'get_snapmirrors',
+ return_value=snapmirrors)
+
+ self.assertRaises(netapp_api.NaApiError,
+ self.client.update_snapmirror,
+ fake_client.SM_SOURCE_VSERVER,
+ fake_client.SM_SOURCE_VOLUME,
+ fake_client.SM_DEST_VSERVER,
+ fake_client.SM_DEST_VOLUME)
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/snapmirror/relationships/' +
+ snapmirrors[0]['uuid'] + '/transfers/', 'post',
+ wait_on_accepted=False)])
+
+ def test_mount_flexvol(self):
+ volumes = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST
+ self.mock_object(self.client, 'send_request',
+ side_effect=[volumes, None])
+
+ fake_path = '/fake_path'
+ fake_vol_name = volumes['records'][0]['name']
+
+ body = {
+ 'nas.path': fake_path
+ }
+ query = {
+ 'name': fake_vol_name
+ }
+
+ self.client.mount_flexvol(fake_client.VOLUME_NAME,
+ junction_path=fake_path)
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/storage/volumes', 'patch', body=body, query=query)])
+
+ def test_mount_flexvol_default_junction_path(self):
+ volumes = fake_client.VOLUME_GET_ITER_SSC_RESPONSE_REST
+ self.mock_object(self.client, 'send_request',
+ side_effect=[volumes, None])
+
+ fake_vol_name = volumes['records'][0]['name']
+ body = {
+ 'nas.path': '/' + fake_client.VOLUME_NAME
+ }
+ query = {
+ 'name': fake_vol_name
+ }
+
+ self.client.mount_flexvol(fake_client.VOLUME_NAME)
+
+ self.client.send_request.assert_has_calls([
+ mock.call('/storage/volumes', 'patch', body=body, query=query)])
+
+ def test_get_cluster_name(self):
+ query = {'fields': 'name'}
+
+ self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.GET_CLUSTER_NAME_RESPONSE_REST)
+
+ result = self.client.get_cluster_name()
+
+ self.client.send_request.assert_called_once_with(
+ '/cluster', 'get', query=query, enable_tunneling=False)
+ self.assertEqual(
+ fake_client.GET_CLUSTER_NAME_RESPONSE_REST['name'], result)
+
+ @ddt.data(
+ (fake_client.VSERVER_NAME, fake_client.VSERVER_NAME_2),
+ (fake_client.VSERVER_NAME, None),
+ (None, fake_client.VSERVER_NAME_2),
+ (None, None))
+ @ddt.unpack
+ def test_get_vserver_peers(self, svm_name, peer_svm_name):
+ query = {
+ 'fields': 'svm.name,state,peer.svm.name,peer.cluster.name,'
+ 'applications'
+ }
+ if peer_svm_name:
+ query['name'] = peer_svm_name
+ if svm_name:
+ query['svm.name'] = svm_name
+
+ vserver_info = fake_client.GET_VSERVER_PEERS_RECORDS_REST[0]
+
+ expected_result = [{
+ 'vserver': vserver_info['svm']['name'],
+ 'peer-vserver': vserver_info['peer']['svm']['name'],
+ 'peer-state': vserver_info['state'],
+ 'peer-cluster': vserver_info['peer']['cluster']['name'],
+ 'applications': vserver_info['applications'],
+ }]
+
+ self.mock_object(
+ self.client, 'send_request',
+ return_value=fake_client.GET_VSERVER_PEERS_RESPONSE_REST)
+
+ result = self.client.get_vserver_peers(
+ vserver_name=svm_name, peer_vserver_name=peer_svm_name)
+
+ self.client.send_request.assert_called_once_with(
+ '/svm/peers', 'get', query=query, enable_tunneling=False)
+ self.assertEqual(expected_result, result)
+
+ def test_get_vserver_peers_empty(self):
+ vserver_peers_response = copy.deepcopy(
+ fake_client.GET_VSERVER_PEERS_RESPONSE_REST)
+ vserver_peers_response['records'] = []
+ vserver_peers_response['num_records'] = 0
+ query = {
+ 'fields': 'svm.name,state,peer.svm.name,peer.cluster.name,'
+ 'applications'
+ }
+ self.mock_object(
+ self.client, 'send_request', return_value=vserver_peers_response)
+
+ result = self.client.get_vserver_peers()
+
+ self.client.send_request.assert_called_once_with(
+ '/svm/peers', 'get', query=query, enable_tunneling=False)
+ self.assertEqual([], result)
+
+ @ddt.data(['snapmirror', 'lun_copy'], None)
+ def test_create_vserver_peer(self, applications):
+ body = {
+ 'svm.name': fake_client.VSERVER_NAME,
+ 'name': fake_client.VSERVER_NAME_2,
+ 'applications': applications if applications else ['snapmirror']
+ }
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.create_vserver_peer(
+ fake_client.VSERVER_NAME, fake_client.VSERVER_NAME_2,
+ vserver_peer_application=applications)
+
+ self.client.send_request.assert_called_once_with(
+ '/svm/peers', 'post', body=body, enable_tunneling=False)
+
+ @ddt.data(
+ (fake.VOLUME_NAME, fake.LUN_NAME),
+ (None, fake.LUN_NAME),
+ (fake.VOLUME_NAME, None),
+ (None, None)
+ )
+ @ddt.unpack
+ def test_start_lun_move(self, src_vol, dest_lun):
+ src_lun = f'src-lun-{fake.LUN_NAME}'
+ dest_vol = f'dest-vol-{fake.VOLUME_NAME}'
+
+ src_path = f'/vol/{src_vol if src_vol else dest_vol}/{src_lun}'
+ dest_path = f'/vol/{dest_vol}/{dest_lun if dest_lun else src_lun}'
+ body = {'name': dest_path}
+
+ self.mock_object(self.client, '_lun_update_by_path')
+
+ result = self.client.start_lun_move(
+ src_lun, dest_vol, src_ontap_volume=src_vol,
+ dest_lun_name=dest_lun)
+
+ self.client._lun_update_by_path.assert_called_once_with(
+ src_path, body)
+ self.assertEqual(dest_path, result)
+
+ @ddt.data(fake_client.LUN_GET_MOVEMENT_REST, None)
+ def test_get_lun_move_status(self, lun_moved):
+ dest_path = f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}'
+ move_status = None
+ if lun_moved:
+ move_progress = lun_moved['movement']['progress']
+ move_status = {
+ 'job-status': move_progress['state'],
+ 'last-failure-reason': move_progress['failure']['message']
+ }
+
+ self.mock_object(self.client, '_get_first_lun_by_path',
+ return_value=lun_moved)
+
+ result = self.client.get_lun_move_status(dest_path)
+
+ self.client._get_first_lun_by_path.assert_called_once_with(
+ dest_path, fields='movement.progress')
+ self.assertEqual(move_status, result)
+
+ @ddt.data(
+ (fake.VOLUME_NAME, fake.LUN_NAME),
+ (None, fake.LUN_NAME),
+ (fake.VOLUME_NAME, None),
+ (None, None)
+ )
+ @ddt.unpack
+ def test_start_lun_copy(self, src_vol, dest_lun):
+ src_lun = f'src-lun-{fake.LUN_NAME}'
+ dest_vol = f'dest-vol-{fake.VOLUME_NAME}'
+ dest_vserver = f'dest-vserver-{fake.VSERVER_NAME}'
+
+ src_path = f'/vol/{src_vol if src_vol else dest_vol}/{src_lun}'
+ dest_path = f'/vol/{dest_vol}/{dest_lun if dest_lun else src_lun}'
+ body = {
+ 'name': dest_path,
+ 'copy.source.name': src_path,
+ 'svm.name': dest_vserver
+ }
+
+ self.mock_object(self.client, 'send_request')
+
+ result = self.client.start_lun_copy(
+ src_lun, dest_vol, dest_vserver,
+ src_ontap_volume=src_vol, src_vserver=fake_client.VSERVER_NAME,
+ dest_lun_name=dest_lun)
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/luns', 'post', body=body, enable_tunneling=False)
+ self.assertEqual(dest_path, result)
+
+ @ddt.data(fake_client.LUN_GET_COPY_REST, None)
+ def test_get_lun_copy_status(self, lun_copied):
+ dest_path = f'/vol/{fake.VOLUME_NAME}/{fake.LUN_NAME}'
+ copy_status = None
+ if lun_copied:
+ copy_progress = lun_copied['copy']['source']['progress']
+ copy_status = {
+ 'job-status': copy_progress['state'],
+ 'last-failure-reason': copy_progress['failure']['message']
+ }
+
+ self.mock_object(self.client, '_get_first_lun_by_path',
+ return_value=lun_copied)
+
+ result = self.client.get_lun_copy_status(dest_path)
+
+ self.client._get_first_lun_by_path.assert_called_once_with(
+ dest_path, fields='copy.source.progress')
+ self.assertEqual(copy_status, result)
+
+ def test_cancel_lun_copy(self):
+ dest_path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.FILE_NAME}'
+
+ query = {
+ 'name': dest_path,
+ 'svm.name': fake_client.VSERVER_NAME
+ }
+
+ self.mock_object(self.client, 'send_request')
+
+ self.client.cancel_lun_copy(dest_path)
+
+ self.client.send_request.assert_called_once_with('/storage/luns/',
+ 'delete', query=query)
+
+ def test_cancel_lun_copy_exception(self):
+ dest_path = f'/vol/{fake_client.VOLUME_NAME}/{fake_client.FILE_NAME}'
+ query = {
+ 'name': dest_path,
+ 'svm.name': fake_client.VSERVER_NAME
+ }
+
+ self.mock_object(self.client, 'send_request',
+ side_effect=self._mock_api_error())
+
+ self.assertRaises(
+ netapp_utils.NetAppDriverException,
+ self.client.cancel_lun_copy,
+ dest_path)
+ self.client.send_request.assert_called_once_with('/storage/luns/',
+ 'delete', query=query)
+
+ # TODO(rfluisa): Add ddt data with None values for optional parameters to
+ # improve coverage.
+ def test_start_file_copy(self):
+ volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ file_name = fake_client.FILE_NAME
+ dest_ontap_volume = fake_client.VOLUME_NAME
+ src_ontap_volume = dest_ontap_volume
+ dest_file_name = file_name
+ response = {'job': {'uuid': 'fake-uuid'}}
+
+ body = {
+ 'files_to_copy': [
+ {
+ 'source': {
+ 'path': f'{src_ontap_volume}/{file_name}',
+ 'volume': {
+ 'uuid': volume['uuid']
+ }
+ },
+ 'destination': {
+ 'path': f'{dest_ontap_volume}/{dest_file_name}',
+ 'volume': {
+ 'uuid': volume['uuid']
+ }
+ }
+ }
+ ]
+ }
+
+ self.mock_object(self.client, '_get_volume_by_args',
+ return_value=volume)
+ self.mock_object(self.client, 'send_request',
+ return_value=response)
+
+ result = self.client.start_file_copy(
+ file_name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
+ dest_file_name=dest_file_name)
+
+ self.client.send_request.assert_called_once_with(
+ '/storage/file/copy', 'post', body=body, enable_tunneling=False)
+ self.assertEqual(response['job']['uuid'], result)
+
+ # TODO(rfluisa): Add ddt data with None values for possible api responses
+ # to improve coverage.
+ def test_get_file_copy_status(self):
+ job_uuid = fake_client.FAKE_UUID
+ query = {}
+ query['fields'] = '*'
+ response = {
+ 'state': 'fake-state',
+ 'error': {
+ 'message': 'fake-error-message'
+ }
+ }
+ expected_result = {
+ 'job-status': response['state'],
+ 'last-failure-reason': response['error']['message']
+ }
+
+ self.mock_object(self.client, 'send_request', return_value=response)
+ result = self.client.get_file_copy_status(job_uuid)
+
+ self.client.send_request.assert_called_once_with(
+ f'/cluster/jobs/{job_uuid}', 'get', query=query,
+ enable_tunneling=False)
+ self.assertEqual(expected_result, result)
+
+ @ddt.data(('success', 'complete'), ('failure', 'destroyed'))
+ @ddt.unpack
+ def test_get_file_copy_status_translate_state(self, from_state, to_state):
+ job_uuid = fake_client.FAKE_UUID
+ query = {}
+ query['fields'] = '*'
+ response = {
+ 'state': from_state,
+ 'error': {
+ 'message': 'fake-error-message'
+ }
+ }
+ expected_result = {
+ 'job-status': to_state,
+ 'last-failure-reason': response['error']['message']
+ }
+
+ self.mock_object(self.client, 'send_request', return_value=response)
+ result = self.client.get_file_copy_status(job_uuid)
+
+ self.client.send_request.assert_called_once_with(
+ f'/cluster/jobs/{job_uuid}', 'get', query=query,
+ enable_tunneling=False)
+ self.assertEqual(expected_result, result)
+
+ def test_rename_file(self):
+ volume = fake_client.VOLUME_ITEM_SIMPLE_RESPONSE_REST
+ orig_file_name = f'/vol/{fake_client.VOLUME_NAMES[0]}/cinder-vol'
+ new_file_name = f'/vol/{fake_client.VOLUME_NAMES[0]}/new-cinder-vol'
+ body = {'path': new_file_name.split('/')[3]}
+
+ self.mock_object(self.client, 'send_request')
+ self.mock_object(self.client, '_get_volume_by_args',
+ return_value=volume)
+
+ self.client.rename_file(orig_file_name, new_file_name)
+
+ orig_file_name = orig_file_name.split('/')[3]
+ self.client.send_request.assert_called_once_with(
+ f'/storage/volumes/{volume["uuid"]}/files/{orig_file_name}',
+ 'patch', body=body)
+ self.client._get_volume_by_args.assert_called_once_with(
+ vol_name=fake_client.VOLUME_NAMES[0])
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py
index 950e92283..2b3d07265 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/fakes.py
@@ -275,8 +275,9 @@ IGROUP1 = {'initiator-group-os-type': 'linux',
QOS_SPECS = {}
EXTRA_SPECS = {}
MAX_THROUGHPUT = '21734278B/s'
-MIN_IOPS = '256IOPS'
-MAX_IOPS = '512IOPS'
+MIN_IOPS = '256iops'
+MAX_IOPS = '512iops'
+MAX_BPS = '1000000B/s'
QOS_POLICY_GROUP_NAME = 'fake_qos_policy_group_name'
QOS_POLICY_GROUP_INFO_LEGACY = {
@@ -290,6 +291,11 @@ QOS_POLICY_GROUP_SPEC = {
'policy_name': QOS_POLICY_GROUP_NAME,
}
+QOS_POLICY_GROUP_SPEC_BPS = {
+ 'max_throughput': MAX_BPS,
+ 'policy_name': QOS_POLICY_GROUP_NAME,
+}
+
QOS_POLICY_GROUP_SPEC_MAX = {
'max_throughput': MAX_THROUGHPUT,
'policy_name': QOS_POLICY_GROUP_NAME,
@@ -417,6 +423,19 @@ FAKE_LUN = netapp_api.NaElement.create_node_with_children(
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
+FAKE_LUN_GET_ITER_RESULT = [
+ {
+ 'Vserver': 'fake_vserver',
+ 'Volume': 'fake_volume',
+ 'Size': 123,
+ 'Qtree': 'fake_qtree',
+ 'Path': 'fake_path',
+ 'OsType': 'fake_os',
+ 'SpaceReserved': 'true',
+ 'UUID': 'fake-uuid',
+ },
+]
+
CG_VOLUME_NAME = 'fake_cg_volume'
CG_GROUP_NAME = 'fake_consistency_group'
CG_POOL_NAME = 'cdot'
@@ -740,12 +759,219 @@ def get_fake_net_interface_get_iter_response():
def get_fake_ifs():
- list_of_ifs = [
- etree.XML("""<net-interface-info>
- <address>FAKE_IP</address></net-interface-info>"""),
- etree.XML("""<net-interface-info>
- <address>FAKE_IP2</address></net-interface-info>"""),
- etree.XML("""<net-interface-info>
- <address>FAKE_IP3</address></net-interface-info>"""),
- ]
- return [netapp_api.NaElement(el) for el in list_of_ifs]
+ return [{'vserver': VSERVER_NAME}]
+
+
+AFF_SYSTEM_NODE_GET_ITER_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": "9eff6c76-fc13-11ea-8799-525400",
+ "name": "aff-node1",
+ "model": "AFFA400",
+ "is_all_flash_optimized": True,
+ "is_all_flash_select_optimized": False,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400"
+ }
+ }
+ },
+ {
+ "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9",
+ "name": "aff-node2",
+ "model": "AFFA400",
+ "is_all_flash_optimized": True,
+ "is_all_flash_select_optimized": False,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400"
+ }
+ }
+ }
+ ],
+ "num_records": 2,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes?fields=model,name,"
+ "is_all_flash_optimized,is_all_flash_select_optimized"
+ }
+ }
+}
+
+FAS_SYSTEM_NODE_GET_ITER_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9",
+ "name": "fas-node1",
+ "model": "FAS2554",
+ "is_all_flash_optimized": False,
+ "is_all_flash_select_optimized": False,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400"
+ }
+ }
+ },
+ {
+ "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9",
+ "name": "fas-node2",
+ "model": "FAS2554",
+ "is_all_flash_optimized": False,
+ "is_all_flash_select_optimized": False,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400"
+ }
+ }
+ }
+ ],
+ "num_records": 2,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes?fields=model,name,"
+ "is_all_flash_optimized,is_all_flash_select_optimized"
+ }
+ }
+}
+
+HYBRID_SYSTEM_NODE_GET_ITER_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9",
+ "name": "select-node",
+ "model": "FDvM300",
+ "is_all_flash_optimized": False,
+ "is_all_flash_select_optimized": True,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400"
+ }
+ }
+ },
+ {
+ "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9",
+ "name": "c190-node",
+ "model": "AFF-C190",
+ "is_all_flash_optimized": True,
+ "is_all_flash_select_optimized": False,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes/9eff6c76-fc13-11ea-8799-525400"
+ }
+ }
+ }
+ ],
+ "num_records": 2,
+ "_links": {
+ "self": {
+ "href": "/api/cluster/nodes?fields=model,name,"
+ "is_all_flash_optimized,is_all_flash_select_optimized"
+ }
+ }
+}
+
+QOS_POLICY_BY_NAME_RESPONSE_REST = {
+ "records": [
+ {
+ "uuid": "9eff6c76-fc13-11ea-8799-52540006bba9",
+ "name": "openstack-cd-uuid",
+ "_links": {
+ "self": {
+ "href": "/api/storage/qos/policies/"
+ "9eff6c76-fc13-11ea-8799-52540006bba9"
+ }
+ }
+ }
+ ],
+ "num_records": 1,
+ "_links": {
+ "self": {
+ "href": "/api/storage/qos/policies?fields=name"
+ }
+ }
+}
+
+QOS_SPECS_REST = {}
+MAX_THROUGHPUT_REST = '21734278'
+MIN_IOPS_REST = '256'
+MAX_IOPS_REST = '512'
+MAX_BPS_REST = '1'
+
+QOS_POLICY_GROUP_INFO_LEGACY_REST = {
+ 'legacy': 'legacy-' + QOS_POLICY_GROUP_NAME,
+ 'spec': None,
+}
+
+QOS_POLICY_GROUP_SPEC_REST = {
+ 'min_throughput': MIN_IOPS_REST,
+ 'max_throughput': MAX_IOPS_REST,
+ 'policy_name': QOS_POLICY_GROUP_NAME,
+}
+
+QOS_POLICY_GROUP_API_ARGS_REST = {
+ 'name': QOS_POLICY_GROUP_NAME,
+ 'svm': {
+ 'name': VSERVER_NAME
+ },
+ 'fixed': {
+ 'max_throughput_iops': int(MAX_IOPS_REST),
+ 'min_throughput_iops': int(MIN_IOPS_REST)
+ }
+}
+
+QOS_POLICY_GROUP_API_ARGS_REST_BPS = {
+ 'name': QOS_POLICY_GROUP_NAME,
+ 'svm': {
+ 'name': VSERVER_NAME
+ },
+ 'fixed': {
+ 'max_throughput_mbps': int(MAX_BPS_REST),
+ }
+}
+
+QOS_POLICY_GROUP_SPEC_MAX_REST = {
+ 'max_throughput': MAX_THROUGHPUT_REST,
+ 'policy_name': QOS_POLICY_GROUP_NAME,
+}
+
+EXPECTED_IOPS_PER_GB_REST = '128'
+PEAK_IOPS_PER_GB_REST = '512'
+PEAK_IOPS_ALLOCATION_REST = 'used-space'
+EXPECTED_IOPS_ALLOCATION_REST = 'used-space'
+ABSOLUTE_MIN_IOPS_REST = '75'
+BLOCK_SIZE_REST = 'ANY'
+ADAPTIVE_QOS_SPEC_REST = {
+ 'policy_name': QOS_POLICY_GROUP_NAME,
+ 'expected_iops': EXPECTED_IOPS_PER_GB_REST,
+ 'expected_iops_allocation': EXPECTED_IOPS_ALLOCATION_REST,
+ 'peak_iops': PEAK_IOPS_PER_GB_REST,
+ 'peak_iops_allocation': PEAK_IOPS_ALLOCATION_REST,
+ 'absolute_min_iops': ABSOLUTE_MIN_IOPS_REST,
+ 'block_size': BLOCK_SIZE_REST,
+}
+
+ADAPTIVE_QOS_API_ARGS_REST = {
+ 'name': QOS_POLICY_GROUP_NAME,
+ 'svm': {
+ 'name': VSERVER_NAME
+ },
+ 'adaptive': {
+ 'absolute_min_iops': int(ABSOLUTE_MIN_IOPS_REST),
+ 'expected_iops': int(EXPECTED_IOPS_PER_GB_REST),
+ 'expected_iops_allocation': EXPECTED_IOPS_ALLOCATION_REST,
+ 'peak_iops': int(PEAK_IOPS_PER_GB_REST),
+ 'peak_iops_allocation': PEAK_IOPS_ALLOCATION_REST,
+ 'block_size': BLOCK_SIZE_REST,
+ }
+}
+
+QOS_POLICY_GROUP_INFO_REST = {
+ 'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_REST}
+QOS_POLICY_GROUP_INFO_MAX_REST = {
+ 'legacy': None, 'spec': QOS_POLICY_GROUP_SPEC_MAX_REST}
+ADAPTIVE_QOS_POLICY_GROUP_INFO_REST = {
+ 'legacy': None,
+ 'spec': ADAPTIVE_QOS_SPEC_REST,
+}
+
+REST_FIELDS = 'uuid,name,style'
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py
index 8598cedc9..13a8b77d5 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_block_cmode.py
@@ -19,7 +19,6 @@
from unittest import mock
import ddt
-import six
from cinder import exception
from cinder.objects import fields
@@ -280,10 +279,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.library._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.library.zapi_client = mock.Mock()
- self.library.zapi_client.get_lun_by_args.return_value = [
- mock.Mock(spec=netapp_api.NaElement)]
- lun = fake.FAKE_LUN
- self.library._get_lun_by_args = mock.Mock(return_value=[lun])
+ lun = fake.FAKE_LUN_GET_ITER_RESULT
+ self.library.zapi_client.get_lun_by_args.return_value = lun
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false')
@@ -303,10 +300,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.library._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.library.zapi_client = mock.Mock()
- self.library.zapi_client.get_lun_by_args.return_value = [
- mock.Mock(spec=netapp_api.NaElement)]
- lun = fake.FAKE_LUN
- self.library._get_lun_by_args = mock.Mock(return_value=[lun])
+ lun = fake.FAKE_LUN_GET_ITER_RESULT
+ self.library.zapi_client.get_lun_by_args.return_value = lun
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', 'false',
@@ -327,10 +322,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
'fakeLUN'})
self.library.zapi_client = mock.Mock()
self.library.lun_space_reservation = 'false'
- self.library.zapi_client.get_lun_by_args.return_value = [
- mock.Mock(spec=netapp_api.NaElement)]
- lun = fake.FAKE_LUN
- self.library._get_lun_by_args = mock.Mock(return_value=[lun])
+ lun = fake.FAKE_LUN_GET_ITER_RESULT
+ self.library.zapi_client.get_lun_by_args.return_value = lun
self.library._add_lun_to_table = mock.Mock()
self.library._clone_lun('fakeLUN', 'newFakeLUN', is_snapshot=True)
@@ -1542,27 +1535,22 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
fake.LUN_WITH_METADATA['metadata'])
new_snap_name = 'new-%s' % fake.SNAPSHOT['name']
snapshot_path = lun_obj.metadata['Path']
- flexvol_name = lun_obj.metadata['Volume']
block_count = 40960
mock__get_lun_from_table = self.mock_object(
self.library, '_get_lun_from_table', return_value=lun_obj)
mock__get_lun_block_count = self.mock_object(
self.library, '_get_lun_block_count', return_value=block_count)
- mock_create_lun = self.mock_object(self.library.zapi_client,
- 'create_lun')
mock__clone_lun = self.mock_object(self.library, '_clone_lun')
self.library._clone_snapshot(fake.SNAPSHOT['name'])
mock__get_lun_from_table.assert_called_once_with(fake.SNAPSHOT['name'])
mock__get_lun_block_count.assert_called_once_with(snapshot_path)
- mock_create_lun.assert_called_once_with(flexvol_name, new_snap_name,
- six.text_type(lun_obj.size),
- lun_obj.metadata)
mock__clone_lun.assert_called_once_with(fake.SNAPSHOT['name'],
new_snap_name,
- block_count=block_count)
+ space_reserved='false',
+ is_snapshot=True)
def test__clone_snapshot_invalid_block_count(self):
lun_obj = block_base.NetAppLun(fake.LUN_WITH_METADATA['handle'],
@@ -1594,8 +1582,6 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.library, '_get_lun_from_table', return_value=lun_obj)
mock__get_lun_block_count = self.mock_object(
self.library, '_get_lun_block_count', return_value=block_count)
- mock_create_lun = self.mock_object(self.library.zapi_client,
- 'create_lun')
side_effect = exception.VolumeBackendAPIException(data='data')
mock__clone_lun = self.mock_object(self.library, '_clone_lun',
side_effect=side_effect)
@@ -1608,12 +1594,10 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
mock__get_lun_from_table.assert_called_once_with(fake.SNAPSHOT['name'])
mock__get_lun_block_count.assert_called_once_with(snapshot_path)
- mock_create_lun.assert_called_once_with(flexvol_name, new_snap_name,
- six.text_type(lun_obj.size),
- lun_obj.metadata)
mock__clone_lun.assert_called_once_with(fake.SNAPSHOT['name'],
new_snap_name,
- block_count=block_count)
+ space_reserved='false',
+ is_snapshot=True)
mock_destroy_lun.assert_called_once_with(new_lun_path)
def test__swap_luns(self):
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
index d443234c5..d08d8f2dd 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
@@ -453,7 +453,7 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
vserver = self.driver._get_vserver_for_ip('FAKE_IP')
- self.assertIsNone(vserver)
+ self.assertEqual(fake.VSERVER_NAME, vserver)
def test_check_for_setup_error(self):
mock_add_looping_tasks = self.mock_object(
@@ -892,9 +892,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
is_snapshot=is_snapshot)
def test__clone_backing_file_for_volume(self):
- body = fake.get_fake_net_interface_get_iter_response()
self.driver.zapi_client.get_if_info_by_ip = mock.Mock(
- return_value=[netapp_api.NaElement(body)])
+ return_value=[{'ip': 'fake_ip'}])
self.driver.zapi_client.get_vol_by_junc_vserver = mock.Mock(
return_value='nfsvol')
self.mock_object(self.driver, '_get_export_ip_path',
@@ -924,7 +923,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"])
drv._check_get_nfs_path_segs = mock.Mock(
return_value=("test:test", "dr"))
- drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.128.1.1")
+ drv._get_ip_verify_on_cluster = mock.Mock(return_value=("192.128.1.1",
+ "vserver"))
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
@@ -937,9 +937,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._discover_file_till_timeout.assert_not_called()
- @mock.patch.object(image_utils, 'qemu_img_info')
+ @ddt.data(True, False)
def test_copy_from_img_service_raw_copyoffload_workflow_success(
- self, mock_qemu_img_info):
+ self, use_tool):
drv = self.driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1,
'host': 'openstack@nfscmode#ip1:/mnt_point'}
@@ -952,16 +952,18 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._check_get_nfs_path_segs =\
mock.Mock(return_value=('ip1', '/openstack'))
- drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
+ drv._get_ip_verify_on_cluster = mock.Mock(return_value=('ip1',
+ 'vserver'))
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
+ drv._copy_file = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
- mock_qemu_img_info.return_value = img_inf
+ image_utils.qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file_at_path = mock.Mock()
@@ -969,13 +971,19 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._post_clone_image = mock.Mock()
retval = drv._copy_from_img_service(
- context, volume, image_service, image_id)
+ context, volume, image_service, image_id,
+ use_copyoffload_tool=use_tool)
self.assertTrue(retval)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._check_share_can_hold_size.assert_called_with(
'ip1:/mnt_point', 1)
- self.assertEqual(1, drv._execute.call_count)
+ if use_tool:
+ self.assertEqual(1, drv._execute.call_count)
+ self.assertEqual(0, drv._copy_file.call_count)
+ else:
+ self.assertEqual(1, drv._copy_file.call_count)
+ self.assertEqual(0, drv._execute.call_count)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@@ -1007,7 +1015,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock.Mock(return_value=('203.0.113.122', '/openstack'))
)
- drv._get_ip_verify_on_cluster = mock.Mock(return_value='203.0.113.122')
+ drv._get_ip_verify_on_cluster = mock.Mock(
+ return_value=('203.0.113.122', 'vserver'))
drv._execute = mock.Mock()
drv._execute_as_root = False
drv._get_mount_point_for_share = mock.Mock(
@@ -1054,7 +1063,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
'host': 'openstack@nfscmode#192.128.1.1:/exp_path'}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
- drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
+ drv._get_ip_verify_on_cluster = mock.Mock(return_value=('ip1',
+ 'vserver'))
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._post_clone_image = mock.Mock()
@@ -1241,25 +1251,29 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
@ddt.unpack
def test_get_source_ip_and_path(self, share, ip):
self.driver._get_ip_verify_on_cluster = mock.Mock(
- return_value=ip)
+ return_value=(ip, fake.VSERVER_NAME))
- src_ip, src_path = self.driver._get_source_ip_and_path(
- share, fake.IMAGE_FILE_ID)
+ src_ip, src_vserver, src_share, src_path = (
+ self.driver._get_source_ip_and_path(
+ share, fake.IMAGE_FILE_ID))
self.assertEqual(ip, src_ip)
+ self.assertEqual(fake.VSERVER_NAME, src_vserver)
+ self.assertEqual(fake.EXPORT_PATH, src_share)
assert_path = fake.EXPORT_PATH + '/' + fake.IMAGE_FILE_ID
self.assertEqual(assert_path, src_path)
self.driver._get_ip_verify_on_cluster.assert_called_once_with(ip)
def test_get_destination_ip_and_path(self):
self.driver._get_ip_verify_on_cluster = mock.Mock(
- return_value=fake.SHARE_IP)
+ return_value=(fake.SHARE_IP, fake.VSERVER_NAME))
mock_extract_host = self.mock_object(volume_utils, 'extract_host')
mock_extract_host.return_value = fake.NFS_SHARE
- dest_ip, dest_path = self.driver._get_destination_ip_and_path(
- fake.VOLUME)
+ dest_ip, dest_vserver, dest_path = (
+ self.driver._get_destination_ip_and_path(fake.VOLUME))
+ self.assertEqual(fake.VSERVER_NAME, dest_vserver)
self.assertEqual(fake.SHARE_IP, dest_ip)
assert_path = fake.EXPORT_PATH + '/' + fake.LUN_NAME
self.assertEqual(assert_path, dest_path)
@@ -1309,7 +1323,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.driver._is_flexgroup.assert_called_once_with(host=volume['host'])
mock_clone_file.assert_called_once_with()
- def test_clone_image_copyoffload_from_img_service(self):
+ @ddt.data(True, False)
+ def test_clone_image_from_img_service(self, use_tool):
drv = self.driver
context = object()
volume = {'id': 'vol_id', 'name': 'name',
@@ -1330,6 +1345,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
drv._copy_from_img_service = mock.Mock(return_value=True)
drv._is_flexgroup = mock.Mock(return_value=False)
drv._is_flexgroup_clone_file_supported = mock.Mock(return_value=True)
+ if not use_tool:
+ drv.configuration.netapp_copyoffload_tool_path = None
retval = drv.clone_image(
context, volume, image_location, image_meta, image_service)
@@ -1338,7 +1355,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
{'provider_location': '192.128.1.1:/mnt_point',
'bootable': True}, True))
drv._copy_from_img_service.assert_called_once_with(
- context, volume, image_service, image_id)
+ context, volume, image_service, image_id,
+ use_copyoffload_tool=use_tool)
def test_clone_image_copyoffload_failure(self):
mock_log = self.mock_object(nfs_cmode, 'LOG')
@@ -1365,35 +1383,57 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertEqual(retval, ({'bootable': False,
'provider_location': None}, False))
drv._copy_from_img_service.assert_called_once_with(
- context, volume, image_service, image_id)
+ context, volume, image_service, image_id,
+ use_copyoffload_tool=True)
mock_log.info.assert_not_called()
- def test_copy_from_remote_cache(self):
+ @ddt.data(True, False)
+ def test_copy_from_remote_cache(self, use_tool):
source_ip = '192.0.1.1'
source_path = '/openstack/img-cache-imgid'
+ source_vserver = 'fake_vserver'
+ source_share = 'vol_fake'
cache_copy = ('192.0.1.1:/openstack', fake.IMAGE_FILE_ID)
+ dest_vserver = 'fake_dest_vserver'
dest_path = fake.EXPORT_PATH + '/' + fake.VOLUME['name']
self.driver._execute = mock.Mock()
+ self.driver._copy_file = mock.Mock()
self.driver._get_source_ip_and_path = mock.Mock(
- return_value=(source_ip, source_path))
+ return_value=(
+ source_ip, source_vserver, source_share, source_path))
self.driver._get_destination_ip_and_path = mock.Mock(
- return_value=(fake.SHARE_IP, dest_path))
+ return_value=(fake.SHARE_IP, dest_vserver, dest_path))
self.driver._register_image_in_cache = mock.Mock()
+ ctxt = mock.Mock()
+ vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
+ fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.driver._copy_from_remote_cache(
- fake.VOLUME, fake.IMAGE_FILE_ID, cache_copy)
-
- self.driver._execute.assert_called_once_with(
- 'copyoffload_tool_path', source_ip, fake.SHARE_IP,
- source_path, dest_path, run_as_root=False, check_exit_code=0)
+ fake_vol, fake.IMAGE_FILE_ID, cache_copy,
+ use_copyoffload_tool=use_tool)
+
+ if use_tool:
+ self.driver._execute.assert_called_once_with(
+ 'copyoffload_tool_path', source_ip, fake.SHARE_IP,
+ source_path, dest_path, run_as_root=False, check_exit_code=0)
+ self.driver._copy_file.assert_not_called()
+ else:
+ dest_share_path = dest_path.rsplit("/", 1)[0]
+ self.driver._copy_file.assert_called_once_with(
+ fake.IMAGE_FILE_ID, fake.IMAGE_FILE_ID, source_share,
+ source_vserver, dest_share_path, dest_vserver,
+ dest_backend_name=self.driver.backend_name,
+ dest_file_name=fake_vol.name)
+ self.driver._execute.assert_not_called()
self.driver._get_source_ip_and_path.assert_called_once_with(
cache_copy[0], fake.IMAGE_FILE_ID)
self.driver._get_destination_ip_and_path.assert_called_once_with(
- fake.VOLUME)
+ fake_vol)
self.driver._register_image_in_cache.assert_called_once_with(
- fake.VOLUME, fake.IMAGE_FILE_ID)
+ fake_vol, fake.IMAGE_FILE_ID)
- def test_copy_from_cache_workflow_remote_location(self):
+ @ddt.data(True, False)
+ def test_copy_from_cache_workflow_remote_location(self, use_tool):
cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID),
('ip2:/openstack', fake.IMAGE_FILE_ID),
('ip3:/openstack', fake.IMAGE_FILE_ID)]
@@ -1401,29 +1441,20 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
cache_result[0], False])
self.driver._copy_from_remote_cache = mock.Mock()
self.driver._post_clone_image = mock.Mock()
+ if not use_tool:
+ self.driver.configuration.netapp_copyoffload_tool_path = None
copied = self.driver._copy_from_cache(
fake.VOLUME, fake.IMAGE_FILE_ID, cache_result)
self.assertTrue(copied)
- self.driver._copy_from_remote_cache.assert_called_once_with(
- fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0])
-
- def test_copy_from_cache_workflow_remote_location_no_copyoffload(self):
- cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID),
- ('ip2:/openstack', fake.IMAGE_FILE_ID),
- ('ip3:/openstack', fake.IMAGE_FILE_ID)]
- self.driver._find_image_location = mock.Mock(return_value=[
- cache_result[0], False])
- self.driver._copy_from_remote_cache = mock.Mock()
- self.driver._post_clone_image = mock.Mock()
- self.driver.configuration.netapp_copyoffload_tool_path = None
-
- copied = self.driver._copy_from_cache(
- fake.VOLUME, fake.IMAGE_FILE_ID, cache_result)
-
- self.assertFalse(copied)
- self.driver._copy_from_remote_cache.assert_not_called()
+ if use_tool:
+ self.driver._copy_from_remote_cache.assert_called_once_with(
+ fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0])
+ else:
+ self.driver._copy_from_remote_cache.assert_called_once_with(
+ fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0],
+ use_copyoffload_tool=False)
def test_copy_from_cache_workflow_local_location(self):
local_share = '/share'
@@ -1456,20 +1487,28 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
self.assertFalse(copied)
- def test_copy_from_cache_workflow_exception(self):
+ @ddt.data(True, False)
+ def test_copy_from_cache_workflow_exception(self, use_tool):
cache_result = [('ip1:/openstack', fake.IMAGE_FILE_ID)]
self.driver._find_image_location = mock.Mock(return_value=[
cache_result[0], False])
self.driver._copy_from_remote_cache = mock.Mock(
side_effect=Exception)
self.driver._post_clone_image = mock.Mock()
+ if not use_tool:
+ self.driver.configuration.netapp_copyoffload_tool_path = None
copied = self.driver._copy_from_cache(
fake.VOLUME, fake.IMAGE_FILE_ID, cache_result)
self.assertFalse(copied)
- self.driver._copy_from_remote_cache.assert_called_once_with(
- fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0])
+ if use_tool:
+ self.driver._copy_from_remote_cache.assert_called_once_with(
+ fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0])
+ else:
+ self.driver._copy_from_remote_cache.assert_called_once_with(
+ fake.VOLUME, fake.IMAGE_FILE_ID, cache_result[0],
+ use_copyoffload_tool=False)
self.assertFalse(self.driver._post_clone_image.called)
@ddt.data({'secondary_id': 'dev0', 'configured_targets': ['dev1']},
@@ -1902,21 +1941,15 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
return_value=fake_job_status)
mock_cancel_file_copy = self.mock_object(
self.driver, '_cancel_file_copy')
- ctxt = mock.Mock()
- vol_fields = {
- 'id': fake.VOLUME_ID,
- 'name': fake.VOLUME_NAME,
- 'status': fields.VolumeStatus.AVAILABLE
- }
- fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.driver._copy_file(
- fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
- fake.DEST_VSERVER_NAME, dest_file_name=fake.VOLUME_NAME,
+ fake.VOLUME_NAME, fake.VOLUME_ID, fake.POOL_NAME,
+ fake.VSERVER_NAME, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
+ dest_file_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True)
mock_start_file_copy.assert_called_with(
- fake_vol.name, fake.DEST_POOL_NAME,
+ fake.VOLUME_NAME, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_file_name=fake.VOLUME_NAME)
mock_get_file_copy_status.assert_called_with(fake.JOB_UUID)
@@ -1943,29 +1976,23 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
return_value=fake_job_status)
mock_cancel_file_copy = self.mock_object(
self.driver, '_cancel_file_copy')
- ctxt = mock.Mock()
- vol_fields = {
- 'id': fake.VOLUME_ID,
- 'name': fake.VOLUME_NAME,
- 'status': fields.VolumeStatus.AVAILABLE
- }
- fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(copy_exception,
self.driver._copy_file,
- fake_vol, fake.POOL_NAME, fake.VSERVER_NAME,
- fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
+ fake.VOLUME_NAME, fake.VOLUME_ID, fake.POOL_NAME,
+ fake.VSERVER_NAME, fake.DEST_POOL_NAME,
+ fake.DEST_VSERVER_NAME,
dest_file_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_start_file_copy.assert_called_with(
- fake_vol.name, fake.DEST_POOL_NAME,
+ fake.VOLUME_NAME, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_file_name=fake.VOLUME_NAME)
mock_get_file_copy_status.assert_called_with(fake.JOB_UUID)
mock_cancel_file_copy.assert_called_once_with(
- fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME,
+ fake.JOB_UUID, fake.VOLUME_NAME, fake.DEST_POOL_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME)
def test_migrate_volume_to_vserver(self):
@@ -1984,9 +2011,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
- fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
- fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME,
- dest_backend_name=fake.DEST_BACKEND_NAME,
+ fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:],
+ fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:],
+ fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
@@ -2051,9 +2078,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['file_copy'])
mock_copy_file.assert_called_once_with(
- fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
- fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME,
- dest_backend_name=fake.DEST_BACKEND_NAME,
+ fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:],
+ fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:],
+ fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
@@ -2084,9 +2111,9 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['file_copy'])
mock_copy_file.assert_called_once_with(
- fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
- fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME,
- dest_backend_name=fake.DEST_BACKEND_NAME,
+ fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:],
+ fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:],
+ fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
@@ -2105,8 +2132,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
- fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
- fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
+ fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:],
+ fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_called_once_with(
@@ -2133,8 +2160,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
- fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
- fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
+ fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:],
+ fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
@@ -2159,8 +2186,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
- fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
- fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
+ fake_vol.name, fake_vol.id, fake.EXPORT_PATH[1:],
+ fake.VSERVER_NAME, fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py
index b6858c9b0..3468d2e4f 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_capabilities.py
@@ -20,7 +20,6 @@ from unittest import mock
import ddt
import six
-from cinder import exception
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.client import (
fakes as fake_client)
@@ -46,45 +45,6 @@ class CapabilitiesLibraryTestCase(test.TestCase):
config.volume_backend_name = 'fake_backend'
return config
- def test_check_api_permissions(self):
-
- mock_log = self.mock_object(capabilities.LOG, 'warning')
-
- self.ssc_library.check_api_permissions()
-
- self.zapi_client.check_cluster_api.assert_has_calls(
- [mock.call(*key) for key in capabilities.SSC_API_MAP.keys()])
- self.assertEqual(0, mock_log.call_count)
-
- def test_check_api_permissions_failed_ssc_apis(self):
-
- def check_cluster_api(object_name, operation_name, api):
- if api != 'volume-get-iter':
- return False
- return True
-
- self.zapi_client.check_cluster_api.side_effect = check_cluster_api
- mock_log = self.mock_object(capabilities.LOG, 'warning')
-
- self.ssc_library.check_api_permissions()
-
- self.assertEqual(1, mock_log.call_count)
-
- def test_check_api_permissions_failed_volume_api(self):
-
- def check_cluster_api(object_name, operation_name, api):
- if api == 'volume-get-iter':
- return False
- return True
-
- self.zapi_client.check_cluster_api.side_effect = check_cluster_api
- mock_log = self.mock_object(capabilities.LOG, 'warning')
-
- self.assertRaises(exception.VolumeBackendAPIException,
- self.ssc_library.check_api_permissions)
-
- self.assertEqual(0, mock_log.call_count)
-
def test_get_ssc(self):
result = self.ssc_library.get_ssc()
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py
index 09e6f9dcb..19a0bd14e 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_data_motion.py
@@ -675,6 +675,12 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
'is_flexgroup': is_flexgroup})
self.mock_object(self.dm_mixin, '_get_replication_aggregate_map',
return_value=aggr_map)
+ self.mock_object(self.dm_mixin,
+ '_get_replication_volume_online_timeout',
+ return_value=2)
+ self.mock_object(self.mock_dest_client,
+ 'get_volume_state',
+ return_value='online')
mock_client_call = self.mock_object(
self.mock_dest_client, 'create_flexvol')
@@ -766,18 +772,18 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
return_value=False)
self.mock_object(self.dm_mixin, '_get_replication_aggregate_map',
return_value=aggr_map)
+ self.mock_object(self.dm_mixin,
+ '_get_replication_volume_online_timeout',
+ return_value=2)
+ mock_volume_state = self.mock_object(self.mock_dest_client,
+ 'get_volume_state',
+ return_value='online')
pool_is_flexgroup = False
if volume_style == 'flexgroup':
pool_is_flexgroup = True
- self.mock_object(self.dm_mixin,
- '_get_replication_volume_online_timeout',
- return_value=2)
mock_create_volume_async = self.mock_object(self.mock_dest_client,
'create_volume_async')
- mock_volume_state = self.mock_object(self.mock_dest_client,
- 'get_volume_state',
- return_value='online')
mock_dedupe_enabled = self.mock_object(
self.mock_dest_client, 'enable_volume_dedupe_async')
mock_compression_enabled = self.mock_object(
@@ -840,6 +846,12 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
return_value=True)
self.mock_object(self.dm_mixin, '_get_replication_aggregate_map',
return_value=aggr_map)
+ self.mock_object(self.dm_mixin,
+ '_get_replication_volume_online_timeout',
+ return_value=2)
+ self.mock_object(self.mock_dest_client,
+ 'get_volume_state',
+ return_value='online')
mock_client_call = self.mock_object(
self.mock_dest_client, 'create_flexvol')
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py
index 2ee30fbec..80ec2e994 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/utils/test_utils.py
@@ -21,6 +21,7 @@ from cinder import exception
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest
from cinder.volume.drivers.netapp.dataontap.utils import utils
CONF = cfg.CONF
@@ -33,6 +34,8 @@ class NetAppCDOTDataMotionTestCase(test.TestCase):
super(NetAppCDOTDataMotionTestCase, self).setUp()
self.backend = 'backend1'
self.mock_cmode_client = self.mock_object(client_cmode, 'Client')
+ self.mock_cmode_rest_client = self.mock_object(
+ client_cmode_rest, 'RestClient')
self.config = fakes.get_fake_cmode_config(self.backend)
CONF.set_override('volume_backend_name', self.backend,
group=self.backend)
@@ -48,6 +51,8 @@ class NetAppCDOTDataMotionTestCase(test.TestCase):
group=self.backend)
CONF.set_override('netapp_api_trace_pattern', "fake_regex",
group=self.backend)
+ CONF.set_override('netapp_ssl_cert_path', 'fake_ca',
+ group=self.backend)
def test_get_backend_configuration(self):
self.mock_object(utils, 'CONF')
@@ -81,18 +86,31 @@ class NetAppCDOTDataMotionTestCase(test.TestCase):
utils.get_backend_configuration,
self.backend)
- def test_get_client_for_backend(self):
+ @ddt.data(True, False)
+ def test_get_client_for_backend(self, use_legacy):
+ self.config.netapp_use_legacy_client = use_legacy
self.mock_object(utils, 'get_backend_configuration',
return_value=self.config)
utils.get_client_for_backend(self.backend)
- self.mock_cmode_client.assert_called_once_with(
- hostname='fake_hostname', password='fake_password',
- username='fake_user', transport_type='https', port=8866,
- trace=mock.ANY, vserver=None, api_trace_pattern="fake_regex")
-
- def test_get_client_for_backend_with_vserver(self):
+ if use_legacy:
+ self.mock_cmode_client.assert_called_once_with(
+ hostname='fake_hostname', password='fake_password',
+ username='fake_user', transport_type='https', port=8866,
+ trace=mock.ANY, vserver=None, api_trace_pattern="fake_regex")
+ self.mock_cmode_rest_client.assert_not_called()
+ else:
+ self.mock_cmode_rest_client.assert_called_once_with(
+ hostname='fake_hostname', password='fake_password',
+ username='fake_user', transport_type='https', port=8866,
+ trace=mock.ANY, vserver=None, api_trace_pattern="fake_regex",
+ ssl_cert_path='fake_ca', async_rest_timeout=60)
+ self.mock_cmode_client.assert_not_called()
+
+ @ddt.data(True, False)
+ def test_get_client_for_backend_with_vserver(self, use_legacy):
+ self.config.netapp_use_legacy_client = use_legacy
self.mock_object(utils, 'get_backend_configuration',
return_value=self.config)
@@ -101,11 +119,21 @@ class NetAppCDOTDataMotionTestCase(test.TestCase):
utils.get_client_for_backend(self.backend)
- self.mock_cmode_client.assert_called_once_with(
- hostname='fake_hostname', password='fake_password',
- username='fake_user', transport_type='https', port=8866,
- trace=mock.ANY, vserver='fake_vserver',
- api_trace_pattern="fake_regex")
+ if use_legacy:
+ self.mock_cmode_client.assert_called_once_with(
+ hostname='fake_hostname', password='fake_password',
+ username='fake_user', transport_type='https', port=8866,
+ trace=mock.ANY, vserver='fake_vserver',
+ api_trace_pattern="fake_regex")
+ self.mock_cmode_rest_client.assert_not_called()
+ else:
+ self.mock_cmode_rest_client.assert_called_once_with(
+ hostname='fake_hostname', password='fake_password',
+ username='fake_user', transport_type='https', port=8866,
+ trace=mock.ANY, vserver='fake_vserver',
+ api_trace_pattern="fake_regex", ssl_cert_path='fake_ca',
+ async_rest_timeout = 60)
+ self.mock_cmode_client.assert_not_called()
@ddt.ddt
diff --git a/cinder/tests/unit/volume/drivers/netapp/test_utils.py b/cinder/tests/unit/volume/drivers/netapp/test_utils.py
index f600794f3..2d0ef346a 100644
--- a/cinder/tests/unit/volume/drivers/netapp/test_utils.py
+++ b/cinder/tests/unit/volume/drivers/netapp/test_utils.py
@@ -138,11 +138,10 @@ class NetAppDriverUtilsTestCase(test.TestCase):
[fake.ISCSI_FAKE_PORT, fake.ISCSI_FAKE_PORT])
actual_properties_mapped = actual_properties['data']
- expected = fake.ISCSI_MP_TARGET_INFO_DICT.copy()
+ expected = copy.deepcopy(fake.ISCSI_MP_TARGET_INFO_DICT)
expected['target_iqns'][1] = expected['target_iqns'][0]
- self.assertDictEqual(actual_properties_mapped,
- fake.ISCSI_MP_TARGET_INFO_DICT)
+ self.assertDictEqual(expected, actual_properties_mapped)
def test_iscsi_connection_lun_id_type_str(self):
FAKE_LUN_ID = '1'
diff --git a/cinder/tests/unit/volume/drivers/test_infinidat.py b/cinder/tests/unit/volume/drivers/test_infinidat.py
index e49183374..964f4033b 100644
--- a/cinder/tests/unit/volume/drivers/test_infinidat.py
+++ b/cinder/tests/unit/volume/drivers/test_infinidat.py
@@ -1,4 +1,4 @@
-# Copyright 2016 Infinidat Ltd.
+# Copyright 2022 Infinidat Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -14,36 +14,58 @@
# under the License.
"""Unit tests for INFINIDAT InfiniBox volume driver."""
+import copy
import functools
import platform
import socket
from unittest import mock
+import ddt
from oslo_utils import units
from cinder import exception
+from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder import version
from cinder.volume import configuration
from cinder.volume.drivers import infinidat
+TEST_LUN = 1
TEST_WWN_1 = '00:11:22:33:44:55:66:77'
TEST_WWN_2 = '11:11:22:33:44:55:66:77'
-
-TEST_IP_ADDRESS = '1.1.1.1'
-TEST_IQN = 'iqn.2012-07.org.fake:01'
-TEST_ISCSI_TCP_PORT = 3260
-
-TEST_TARGET_PORTAL = '{}:{}'.format(TEST_IP_ADDRESS, TEST_ISCSI_TCP_PORT)
-
-test_volume = mock.Mock(id=1, size=1, volume_type_id=1)
-test_snapshot = mock.Mock(id=2, volume=test_volume, volume_id='1')
-test_clone = mock.Mock(id=3, size=1)
-test_group = mock.Mock(id=4)
-test_snapgroup = mock.Mock(id=5, group=test_group)
+TEST_IP_ADDRESS1 = '1.1.1.1'
+TEST_IP_ADDRESS2 = '2.2.2.2'
+TEST_IP_ADDRESS3 = '3.3.3.3'
+TEST_IP_ADDRESS4 = '4.4.4.4'
+TEST_INITIATOR_IQN = 'iqn.2012-07.org.initiator:01'
+TEST_TARGET_IQN = 'iqn.2012-07.org.target:01'
+TEST_ISCSI_TCP_PORT1 = 3261
+TEST_ISCSI_TCP_PORT2 = 3262
+TEST_ISCSI_NAMESPACE1 = 'netspace1'
+TEST_ISCSI_NAMESPACE2 = 'netspace2'
+TEST_TARGET_PORTAL1 = '{}:{}'.format(TEST_IP_ADDRESS1, TEST_ISCSI_TCP_PORT1)
+TEST_TARGET_PORTAL2 = '{}:{}'.format(TEST_IP_ADDRESS2, TEST_ISCSI_TCP_PORT1)
+TEST_TARGET_PORTAL3 = '{}:{}'.format(TEST_IP_ADDRESS3, TEST_ISCSI_TCP_PORT2)
+TEST_TARGET_PORTAL4 = '{}:{}'.format(TEST_IP_ADDRESS4, TEST_ISCSI_TCP_PORT2)
+TEST_FC_PROTOCOL = 'fc'
+TEST_ISCSI_PROTOCOL = 'iscsi'
+TEST_VOLUME_SOURCE_NAME = 'test-volume'
+TEST_VOLUME_SOURCE_ID = 12345
+TEST_VOLUME_METADATA = {'cinder_id': fake.VOLUME_ID}
+TEST_SNAPSHOT_SOURCE_NAME = 'test-snapshot'
+TEST_SNAPSHOT_SOURCE_ID = 67890
+TEST_SNAPSHOT_METADATA = {'cinder_id': fake.SNAPSHOT_ID}
+
+test_volume = mock.Mock(id=fake.VOLUME_ID, size=1,
+ volume_type_id=fake.VOLUME_TYPE_ID)
+test_snapshot = mock.Mock(id=fake.SNAPSHOT_ID, volume=test_volume,
+ volume_id=test_volume.id)
+test_clone = mock.Mock(id=fake.VOLUME4_ID, size=1)
+test_group = mock.Mock(id=fake.GROUP_ID)
+test_snapgroup = mock.Mock(id=fake.GROUP_SNAPSHOT_ID, group=test_group)
test_connector = dict(wwpns=[TEST_WWN_1],
- initiator=TEST_IQN)
+ initiator=TEST_INITIATOR_IQN)
def skip_driver_setup(func):
@@ -67,25 +89,15 @@ class InfiniboxDriverTestCaseBase(test.TestCase):
def setUp(self):
super(InfiniboxDriverTestCaseBase, self).setUp()
- # create mock configuration
- self.configuration = mock.Mock(spec=configuration.Configuration)
- self.configuration.infinidat_storage_protocol = 'fc'
- self.configuration.san_ip = 'mockbox'
- self.configuration.infinidat_pool_name = 'mockpool'
- self.configuration.san_thin_provision = True
- self.configuration.san_login = 'user'
- self.configuration.san_password = 'pass'
- self.configuration.volume_backend_name = 'mock'
- self.configuration.volume_dd_blocksize = '1M'
- self.configuration.use_multipath_for_image_xfer = False
- self.configuration.enforce_multipath_for_image_xfer = False
- self.configuration.num_volume_device_scan_tries = 1
- self.configuration.san_is_local = False
- self.configuration.chap_username = None
- self.configuration.chap_password = None
- self.configuration.infinidat_use_compression = None
- self.configuration.max_over_subscription_ratio = 10.0
-
+ self.configuration = configuration.Configuration(None)
+ self.configuration.append_config_values(infinidat.infinidat_opts)
+ self.override_config('san_ip', 'infinibox',
+ configuration.SHARED_CONF_GROUP)
+ self.override_config('san_login', 'user',
+ configuration.SHARED_CONF_GROUP)
+ self.override_config('san_password', 'password',
+ configuration.SHARED_CONF_GROUP)
+ self.override_config('infinidat_pool_name', 'pool')
self.driver = infinidat.InfiniboxVolumeDriver(
configuration=self.configuration)
self._system = self._infinibox_mock()
@@ -107,21 +119,36 @@ class InfiniboxDriverTestCaseBase(test.TestCase):
def _infinibox_mock(self):
result = mock.Mock()
self._mock_volume = mock.Mock()
+ self._mock_new_volume = mock.Mock()
+ self._mock_volume.get_id.return_value = TEST_VOLUME_SOURCE_ID
+ self._mock_volume.get_name.return_value = TEST_VOLUME_SOURCE_NAME
+ self._mock_volume.get_type.return_value = 'MASTER'
+ self._mock_volume.get_pool_name.return_value = (
+ self.configuration.infinidat_pool_name)
self._mock_volume.get_size.return_value = 1 * units.Gi
self._mock_volume.has_children.return_value = False
+ self._mock_volume.get_qos_policy.return_value = None
self._mock_volume.get_logical_units.return_value = []
+ self._mock_volume.get_all_metadata.return_value = {}
self._mock_volume.create_snapshot.return_value = self._mock_volume
+ self._mock_snapshot = mock.Mock()
self._mock_host = mock.Mock()
self._mock_host.get_luns.return_value = []
- self._mock_host.map_volume().get_lun.return_value = 1
+ self._mock_host.map_volume().get_lun.return_value = TEST_LUN
self._mock_pool = mock.Mock()
self._mock_pool.get_free_physical_capacity.return_value = units.Gi
self._mock_pool.get_physical_capacity.return_value = units.Gi
- self._mock_ns = mock.Mock()
- self._mock_ns.get_ips.return_value = [
- mock.Mock(ip_address=TEST_IP_ADDRESS, enabled=True)]
- self._mock_ns.get_properties.return_value = mock.Mock(
- iscsi_iqn=TEST_IQN, iscsi_tcp_port=TEST_ISCSI_TCP_PORT)
+ self._mock_pool.get_volumes.return_value = [self._mock_volume]
+ self._mock_name_space1 = mock.Mock()
+ self._mock_name_space2 = mock.Mock()
+ self._mock_name_space1.get_ips.return_value = [
+ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=True)]
+ self._mock_name_space2.get_ips.return_value = [
+ mock.Mock(ip_address=TEST_IP_ADDRESS3, enabled=True)]
+ self._mock_name_space1.get_properties.return_value = mock.Mock(
+ iscsi_iqn=TEST_TARGET_IQN, iscsi_tcp_port=TEST_ISCSI_TCP_PORT1)
+ self._mock_name_space2.get_properties.return_value = mock.Mock(
+ iscsi_iqn=TEST_TARGET_IQN, iscsi_tcp_port=TEST_ISCSI_TCP_PORT2)
self._mock_group = mock.Mock()
self._mock_qos_policy = mock.Mock()
result.volumes.safe_get.return_value = self._mock_volume
@@ -131,7 +158,7 @@ class InfiniboxDriverTestCaseBase(test.TestCase):
result.cons_groups.safe_get.return_value = self._mock_group
result.cons_groups.create.return_value = self._mock_group
result.hosts.create.return_value = self._mock_host
- result.network_spaces.safe_get.return_value = self._mock_ns
+ result.network_spaces.safe_get.return_value = self._mock_name_space1
result.components.nodes.get_all.return_value = []
result.qos_policies.create.return_value = self._mock_qos_policy
result.qos_policies.safe_get.return_value = None
@@ -141,6 +168,7 @@ class InfiniboxDriverTestCaseBase(test.TestCase):
raise FakeInfinisdkException()
+@ddt.ddt
class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
def _generate_mock_object_metadata(self, cinder_object):
return {"system": "openstack",
@@ -179,6 +207,16 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
infinidat._INFINIDAT_CINDER_IDENTIFIER)
self._system.login.assert_called_once()
+ @mock.patch('cinder.volume.drivers.infinidat.infinisdk.InfiniBox')
+ @ddt.data(True, False)
+ def test_ssl_options(self, use_ssl, infinibox):
+ auth = (self.configuration.san_login,
+ self.configuration.san_password)
+ self.override_config('driver_use_ssl', use_ssl)
+ self.driver.do_setup(None)
+ infinibox.assert_called_once_with(self.configuration.san_ip,
+ auth=auth, use_ssl=use_ssl)
+
def test_initialize_connection(self):
self._system.hosts.safe_get.return_value = None
result = self.driver.initialize_connection(test_volume, test_connector)
@@ -198,7 +236,7 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
def test_initialize_connection_volume_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
- self.assertRaises(exception.InvalidVolume,
+ self.assertRaises(exception.VolumeNotFound,
self.driver.initialize_connection,
test_volume, test_connector)
@@ -233,7 +271,7 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
def test_terminate_connection_volume_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
- self.assertRaises(exception.InvalidVolume,
+ self.assertRaises(exception.VolumeNotFound,
self.driver.terminate_connection,
test_volume, test_connector)
@@ -261,9 +299,10 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
self.driver.get_volume_stats)
def test_get_volume_stats_max_over_subscription_ratio(self):
+ self.override_config('san_thin_provision', True)
+ self.override_config('max_over_subscription_ratio', 10.0)
result = self.driver.get_volume_stats()
- # check the defaults defined in setUp
- self.assertEqual(10.0, result['max_over_subscription_ratio'])
+ self.assertEqual('10.0', result['max_over_subscription_ratio'])
self.assertTrue(result['thin_provisioning_support'])
self.assertFalse(result['thick_provisioning_support'])
@@ -288,7 +327,7 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_compression_enabled(self, *mocks):
- self.configuration.infinidat_use_compression = True
+ self.override_config('infinidat_use_compression', True)
self.driver.create_volume(test_volume)
self.assertTrue(
self._system.volumes.create.call_args[1]["compression_enabled"]
@@ -296,7 +335,7 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
@mock.patch("cinder.volume.volume_types.get_volume_type_qos_specs")
def test_create_volume_compression_not_enabled(self, *mocks):
- self.configuration.infinidat_use_compression = False
+ self.override_config('infinidat_use_compression', False)
self.driver.create_volume(test_volume)
self.assertFalse(
self._system.volumes.create.call_args[1]["compression_enabled"]
@@ -343,7 +382,7 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
def test_create_snapshot_volume_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
- self.assertRaises(exception.InvalidVolume,
+ self.assertRaises(exception.VolumeNotFound,
self.driver.create_snapshot, test_snapshot)
def test_create_snapshot_api_fail(self):
@@ -361,7 +400,7 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
def test_create_volume_from_snapshot_doesnt_exist(self):
self._system.volumes.safe_get.return_value = None
- self.assertRaises(exception.InvalidSnapshot,
+ self.assertRaises(exception.SnapshotNotFound,
self.driver.create_volume_from_snapshot,
test_clone, test_snapshot)
@@ -581,6 +620,244 @@ class InfiniboxDriverTestCase(InfiniboxDriverTestCaseBase):
self._mock_host.unmap_volume.assert_called_once()
self._mock_host.safe_delete.assert_called_once()
+ def test_snapshot_revert_use_temp_snapshot(self):
+ result = self.driver.snapshot_revert_use_temp_snapshot()
+ self.assertFalse(result)
+
+ @ddt.data((1, 1), (1, 2))
+ @ddt.unpack
+ def test_revert_to_snapshot_resize(self, volume_size, snapshot_size):
+ volume = copy.deepcopy(test_volume)
+ snapshot = copy.deepcopy(test_snapshot)
+ snapshot.volume.size = snapshot_size
+ self._system.volumes.safe_get.side_effect = [self._mock_snapshot,
+ self._mock_volume,
+ self._mock_volume]
+ self._mock_volume.get_size.side_effect = [volume_size * units.Gi,
+ volume_size * units.Gi]
+ self.driver.revert_to_snapshot(None, volume, snapshot)
+ self._mock_volume.restore.assert_called_once_with(self._mock_snapshot)
+ if volume_size == snapshot_size:
+ self._mock_volume.resize.assert_not_called()
+ else:
+ delta = (snapshot_size - volume_size) * units.Gi
+ self._mock_volume.resize.assert_called_with(delta)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_by_source_name(self, *mocks):
+ existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME}
+ self.driver.manage_existing(test_volume, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_by_source_id(self, *mocks):
+ existing_ref = {'source-id': TEST_VOLUME_SOURCE_ID}
+ self.driver.manage_existing(test_volume, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_by_invalid_source(self, *mocks):
+ existing_ref = {'source-path': None}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing,
+ test_volume, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ @mock.patch('cinder.volume.volume_utils.check_already_managed_volume',
+ return_value=False)
+ def test_manage_existing_not_managed(self, *mocks):
+ self._mock_volume.get_all_metadata.return_value = (
+ TEST_VOLUME_METADATA)
+ existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME}
+ self.driver.manage_existing(test_volume, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ @mock.patch('cinder.volume.volume_utils.check_already_managed_volume',
+ return_value=True)
+ def test_manage_existing_already_managed(self, *mocks):
+ self._mock_volume.get_all_metadata.return_value = (
+ TEST_VOLUME_METADATA)
+ existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME}
+ self.assertRaises(exception.ManageExistingAlreadyManaged,
+ self.driver.manage_existing,
+ test_volume, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_invalid_pool(self, *mocks):
+ existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME}
+ self._mock_volume.get_pool_name.return_value = 'invalid'
+ self.assertRaises(exception.InvalidConfigurationValue,
+ self.driver.manage_existing,
+ test_volume, existing_ref)
+
+ def test_manage_existing_get_size(self):
+ existing_ref = {'source-name': TEST_VOLUME_SOURCE_NAME}
+ size = self.driver.manage_existing_get_size(test_volume, existing_ref)
+ self.assertEqual(test_volume.size, size)
+
+ def test_get_manageable_volumes(self):
+ cinder_volumes = [test_volume]
+ self._mock_volume.is_snapshot.return_value = False
+ self._mock_volume.get_all_metadata.return_value = {
+ 'cinder_id': fake.VOLUME2_ID
+ }
+ self.driver.get_manageable_volumes(cinder_volumes, None,
+ 1, 0, [], [])
+
+ def test_get_manageable_volumes_already_managed(self):
+ cinder_volumes = [test_volume]
+ self._mock_volume.get_id.return_value = TEST_VOLUME_SOURCE_ID
+ self._mock_volume.get_all_metadata.return_value = (
+ TEST_VOLUME_METADATA)
+ self._mock_volume.is_snapshot.return_value = False
+ self.driver.get_manageable_volumes(cinder_volumes, None,
+ 1, 0, [], [])
+
+ def test_get_manageable_volumes_but_snapshots(self):
+ cinder_volumes = [test_volume]
+ self._mock_volume.is_snapshot.return_value = True
+ self.driver.get_manageable_volumes(cinder_volumes, None,
+ 1, 0, [], [])
+
+ def test_get_manageable_volumes_has_mappings(self):
+ cinder_volumes = [test_volume]
+ self._mock_volume.is_snapshot.return_value = False
+ self._mock_volume.get_all_metadata.return_value = {
+ 'cinder_id': fake.VOLUME2_ID
+ }
+ lun = mock.Mock()
+ self._mock_volume.get_logical_units.return_value = [lun]
+ self.driver.get_manageable_volumes(cinder_volumes, None,
+ 1, 0, [], [])
+
+ def test_get_manageable_volumes_has_snapshots(self):
+ cinder_volumes = [test_volume]
+ self._mock_volume.is_snapshot.return_value = False
+ self._mock_volume.has_children.return_value = True
+ self._mock_volume.get_all_metadata.return_value = {
+ 'cinder_id': fake.VOLUME2_ID
+ }
+ self.driver.get_manageable_volumes(cinder_volumes, None,
+ 1, 0, [], [])
+
+ def test_unmanage(self):
+ self.driver.unmanage(test_volume)
+
+ @mock.patch('cinder.objects.Snapshot.exists', return_value=True)
+ def test__check_already_managed_snapshot(self, *mocks):
+ self.driver._check_already_managed_snapshot(test_snapshot.id)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_snapshot_by_source_name(self, *mocks):
+ existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME}
+ self.driver.manage_existing_snapshot(test_snapshot, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_snapshot_by_source_id(self, *mocks):
+ existing_ref = {'source-id': TEST_SNAPSHOT_SOURCE_ID}
+ self.driver.manage_existing_snapshot(test_snapshot, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_snapshot_but_volume(self, *mocks):
+ existing_ref = {'source-id': TEST_SNAPSHOT_SOURCE_ID}
+ self._mock_volume.is_snapshot.return_value = False
+ self.assertRaises(exception.InvalidSnapshot,
+ self.driver.manage_existing_snapshot,
+ test_snapshot, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_snapshot_by_invalid_source(self, *mocks):
+ existing_ref = {'source-path': None}
+ self.assertRaises(exception.ManageExistingInvalidReference,
+ self.driver.manage_existing_snapshot,
+ test_snapshot, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_snapshot_by_non_cinder_id(self, *mocks):
+ self._mock_volume.get_all_metadata.return_value = {'cinder_id': 'x'}
+ existing_ref = {'source-id': TEST_SNAPSHOT_SOURCE_ID}
+ self.driver.manage_existing_snapshot(test_snapshot, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.'
+ '_check_already_managed_snapshot', return_value=False)
+ def test_manage_existing_snapshot_not_managed(self, *mocks):
+ self._mock_volume.get_all_metadata.return_value = (
+ TEST_SNAPSHOT_METADATA)
+ existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME}
+ self.driver.manage_existing(test_snapshot, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ @mock.patch('cinder.volume.drivers.infinidat.InfiniboxVolumeDriver.'
+ '_check_already_managed_snapshot', return_value=True)
+ def test_manage_existing_snapshot_already_managed(self, *mocks):
+ self._mock_volume.get_all_metadata.return_value = (
+ TEST_SNAPSHOT_METADATA)
+ existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME}
+ self.assertRaises(exception.ManageExistingAlreadyManaged,
+ self.driver.manage_existing_snapshot,
+ test_snapshot, existing_ref)
+
+ @mock.patch('cinder.volume.volume_types.get_volume_type_qos_specs')
+ def test_manage_existing_snapshot_invalid_pool(self, *mocks):
+ existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME}
+ self._mock_volume.get_pool_name.return_value = 'invalid'
+ self.assertRaises(exception.InvalidConfigurationValue,
+ self.driver.manage_existing_snapshot,
+ test_snapshot, existing_ref)
+
+ def test_manage_existing_snapshot_get_size(self):
+ existing_ref = {'source-name': TEST_SNAPSHOT_SOURCE_NAME}
+ size = self.driver.manage_existing_snapshot_get_size(test_volume,
+ existing_ref)
+ self.assertEqual(test_snapshot.volume.size, size)
+
+ def test_get_manageable_snapshots(self):
+ cinder_snapshots = [test_snapshot]
+ self._mock_volume.is_snapshot.return_value = True
+ self._mock_volume.get_all_metadata.return_value = {
+ 'cinder_id': fake.SNAPSHOT2_ID
+ }
+ self.driver.get_manageable_snapshots(cinder_snapshots,
+ None, 1, 0, [], [])
+
+ def test_get_manageable_snapshots_already_managed(self):
+ cinder_snapshots = [test_snapshot]
+ self._mock_volume.get_id.return_value = TEST_SNAPSHOT_SOURCE_ID
+ self._mock_volume.get_all_metadata.return_value = (
+ TEST_SNAPSHOT_METADATA)
+ self._mock_volume.is_snapshot.return_value = True
+ self.driver.get_manageable_snapshots(cinder_snapshots,
+ None, 1, 0, [], [])
+
+ def test_get_manageable_snapshots_but_volumes(self):
+ cinder_snapshots = [test_snapshot]
+ self._mock_volume.is_snapshot.return_value = False
+ self.driver.get_manageable_snapshots(cinder_snapshots,
+ None, 1, 0, [], [])
+
+ def test_get_manageable_snapshots_has_mappings(self):
+ cinder_snapshots = [test_snapshot]
+ self._mock_volume.is_snapshot.return_value = True
+ self._mock_volume.get_all_metadata.return_value = {
+ 'cinder_id': fake.SNAPSHOT2_ID
+ }
+ lun = mock.Mock()
+ self._mock_volume.get_logical_units.return_value = [lun]
+ self.driver.get_manageable_snapshots(cinder_snapshots,
+ None, 1, 0, [], [])
+
+ def test_get_manageable_snapshots_has_clones(self):
+ cinder_snapshots = [test_snapshot]
+ self._mock_volume.is_snapshot.return_value = True
+ self._mock_volume.has_children.return_value = True
+ self._mock_volume.get_all_metadata.return_value = {
+ 'cinder_id': fake.SNAPSHOT2_ID
+ }
+ self.driver.get_manageable_snapshots(cinder_snapshots,
+ None, 1, 0, [], [])
+
+ def test_unmanage_snapshot(self):
+ self.driver.unmanage_snapshot(test_snapshot)
+
class InfiniboxDriverTestCaseFC(InfiniboxDriverTestCaseBase):
def test_initialize_connection_multiple_wwpns(self):
@@ -590,7 +867,7 @@ class InfiniboxDriverTestCaseFC(InfiniboxDriverTestCaseBase):
def test_validate_connector(self):
fc_connector = {'wwpns': [TEST_WWN_1, TEST_WWN_2]}
- iscsi_connector = {'initiator': TEST_IQN}
+ iscsi_connector = {'initiator': TEST_INITIATOR_IQN}
self.driver.validate_connector(fc_connector)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, iscsi_connector)
@@ -599,33 +876,39 @@ class InfiniboxDriverTestCaseFC(InfiniboxDriverTestCaseBase):
class InfiniboxDriverTestCaseISCSI(InfiniboxDriverTestCaseBase):
def setUp(self):
super(InfiniboxDriverTestCaseISCSI, self).setUp()
- self.configuration.infinidat_storage_protocol = 'iscsi'
- self.configuration.infinidat_iscsi_netspaces = ['netspace1']
- self.configuration.use_chap_auth = False
+ self.override_config('infinidat_storage_protocol',
+ TEST_ISCSI_PROTOCOL)
+ self.override_config('infinidat_iscsi_netspaces',
+ [TEST_ISCSI_NAMESPACE1])
+ self.override_config('use_chap_auth', False)
self.driver.do_setup(None)
def test_setup_without_netspaces_configured(self):
- self.configuration.infinidat_iscsi_netspaces = []
+ self.override_config('infinidat_iscsi_netspaces', [])
self.assertRaises(exception.VolumeDriverException,
self.driver.do_setup, None)
- def _assert_plurals(self, result, expected_length):
- self.assertEqual(expected_length, len(result['data']['target_luns']))
- self.assertEqual(expected_length, len(result['data']['target_iqns']))
- self.assertEqual(expected_length,
- len(result['data']['target_portals']))
- self.assertTrue(all(lun == 1 for lun in result['data']['target_luns']))
- self.assertTrue(
- all(iqn == test_connector['initiator'] for
- iqn in result['data']['target_iqns']))
-
- self.assertTrue(all(target_portal == TEST_TARGET_PORTAL for
- target_portal in result['data']['target_portals']))
-
def test_initialize_connection(self):
result = self.driver.initialize_connection(test_volume, test_connector)
- self.assertEqual(1, result['data']['target_lun'])
- self._assert_plurals(result, 1)
+ expected = {
+ 'driver_volume_type': TEST_ISCSI_PROTOCOL,
+ 'data': {
+ 'target_discovered': True,
+ 'target_portal': TEST_TARGET_PORTAL1,
+ 'target_iqn': TEST_TARGET_IQN,
+ 'target_lun': TEST_LUN,
+ 'target_portals': [
+ TEST_TARGET_PORTAL1
+ ],
+ 'target_iqns': [
+ TEST_TARGET_IQN
+ ],
+ 'target_luns': [
+ TEST_LUN
+ ]
+ }
+ }
+ self.assertEqual(expected, result)
def test_initialize_netspace_does_not_exist(self):
self._system.network_spaces.safe_get.return_value = None
@@ -634,13 +917,13 @@ class InfiniboxDriverTestCaseISCSI(InfiniboxDriverTestCaseBase):
test_volume, test_connector)
def test_initialize_netspace_has_no_ips(self):
- self._mock_ns.get_ips.return_value = []
+ self._mock_name_space1.get_ips.return_value = []
self.assertRaises(exception.VolumeDriverException,
self.driver.initialize_connection,
test_volume, test_connector)
def test_initialize_connection_with_chap(self):
- self.configuration.use_chap_auth = True
+ self.override_config('use_chap_auth', True)
result = self.driver.initialize_connection(test_volume, test_connector)
self.assertEqual(1, result['data']['target_lun'])
self.assertEqual('CHAP', result['data']['auth_method'])
@@ -648,22 +931,136 @@ class InfiniboxDriverTestCaseISCSI(InfiniboxDriverTestCaseBase):
self.assertIn('auth_password', result['data'])
def test_initialize_connection_multiple_netspaces(self):
- self.configuration.infinidat_iscsi_netspaces = ['netspace1',
- 'netspace2']
+ self.override_config('infinidat_iscsi_netspaces',
+ [TEST_ISCSI_NAMESPACE1, TEST_ISCSI_NAMESPACE2])
+ self._system.network_spaces.safe_get.side_effect = [
+ self._mock_name_space1, self._mock_name_space2]
result = self.driver.initialize_connection(test_volume, test_connector)
- self.assertEqual(1, result['data']['target_lun'])
- self._assert_plurals(result, 2)
-
- def test_initialize_connection_plurals(self):
+ expected = {
+ 'driver_volume_type': TEST_ISCSI_PROTOCOL,
+ 'data': {
+ 'target_discovered': True,
+ 'target_portal': TEST_TARGET_PORTAL1,
+ 'target_iqn': TEST_TARGET_IQN,
+ 'target_lun': TEST_LUN,
+ 'target_portals': [
+ TEST_TARGET_PORTAL1,
+ TEST_TARGET_PORTAL3
+ ],
+ 'target_iqns': [
+ TEST_TARGET_IQN,
+ TEST_TARGET_IQN
+ ],
+ 'target_luns': [
+ TEST_LUN,
+ TEST_LUN
+ ]
+ }
+ }
+ self.assertEqual(expected, result)
+
+ def test_initialize_connection_multiple_netspaces_multipath(self):
+ self.override_config('infinidat_iscsi_netspaces',
+ [TEST_ISCSI_NAMESPACE1, TEST_ISCSI_NAMESPACE2])
+ self._system.network_spaces.safe_get.side_effect = [
+ self._mock_name_space1, self._mock_name_space2]
+ self._mock_name_space1.get_ips.return_value = [
+ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=True),
+ mock.Mock(ip_address=TEST_IP_ADDRESS2, enabled=True)]
+ self._mock_name_space2.get_ips.return_value = [
+ mock.Mock(ip_address=TEST_IP_ADDRESS3, enabled=True),
+ mock.Mock(ip_address=TEST_IP_ADDRESS4, enabled=True)]
+ result = self.driver.initialize_connection(test_volume, test_connector)
+ expected = {
+ 'driver_volume_type': TEST_ISCSI_PROTOCOL,
+ 'data': {
+ 'target_discovered': True,
+ 'target_portal': TEST_TARGET_PORTAL1,
+ 'target_iqn': TEST_TARGET_IQN,
+ 'target_lun': TEST_LUN,
+ 'target_portals': [
+ TEST_TARGET_PORTAL1,
+ TEST_TARGET_PORTAL2,
+ TEST_TARGET_PORTAL3,
+ TEST_TARGET_PORTAL4
+ ],
+ 'target_iqns': [
+ TEST_TARGET_IQN,
+ TEST_TARGET_IQN,
+ TEST_TARGET_IQN,
+ TEST_TARGET_IQN
+ ],
+ 'target_luns': [
+ TEST_LUN,
+ TEST_LUN,
+ TEST_LUN,
+ TEST_LUN
+ ]
+ }
+ }
+ self.assertEqual(expected, result)
+
+ def test_initialize_connection_disabled_interface(self):
+ self._mock_name_space1.get_ips.return_value = [
+ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=False),
+ mock.Mock(ip_address=TEST_IP_ADDRESS2, enabled=True)]
+ result = self.driver.initialize_connection(test_volume, test_connector)
+ expected = {
+ 'driver_volume_type': TEST_ISCSI_PROTOCOL,
+ 'data': {
+ 'target_discovered': True,
+ 'target_portal': TEST_TARGET_PORTAL2,
+ 'target_iqn': TEST_TARGET_IQN,
+ 'target_lun': TEST_LUN,
+ 'target_portals': [
+ TEST_TARGET_PORTAL2
+ ],
+ 'target_iqns': [
+ TEST_TARGET_IQN
+ ],
+ 'target_luns': [
+ TEST_LUN
+ ]
+ }
+ }
+ self.assertEqual(expected, result)
+
+ def test_initialize_connection_multiple_interfaces(self):
+ self._mock_name_space1.get_ips.return_value = [
+ mock.Mock(ip_address=TEST_IP_ADDRESS1, enabled=True),
+ mock.Mock(ip_address=TEST_IP_ADDRESS2, enabled=True)]
+ self._mock_name_space1.get_properties.return_value = mock.Mock(
+ iscsi_iqn=TEST_TARGET_IQN, iscsi_tcp_port=TEST_ISCSI_TCP_PORT1)
result = self.driver.initialize_connection(test_volume, test_connector)
- self._assert_plurals(result, 1)
+ expected = {
+ 'driver_volume_type': TEST_ISCSI_PROTOCOL,
+ 'data': {
+ 'target_discovered': True,
+ 'target_portal': TEST_TARGET_PORTAL1,
+ 'target_iqn': TEST_TARGET_IQN,
+ 'target_lun': TEST_LUN,
+ 'target_portals': [
+ TEST_TARGET_PORTAL1,
+ TEST_TARGET_PORTAL2
+ ],
+ 'target_iqns': [
+ TEST_TARGET_IQN,
+ TEST_TARGET_IQN
+ ],
+ 'target_luns': [
+ TEST_LUN,
+ TEST_LUN
+ ]
+ }
+ }
+ self.assertEqual(expected, result)
def test_terminate_connection(self):
self.driver.terminate_connection(test_volume, test_connector)
def test_validate_connector(self):
fc_connector = {'wwpns': [TEST_WWN_1, TEST_WWN_2]}
- iscsi_connector = {'initiator': TEST_IQN}
+ iscsi_connector = {'initiator': TEST_INITIATOR_IQN}
self.driver.validate_connector(iscsi_connector)
self.assertRaises(exception.InvalidConnectorException,
self.driver.validate_connector, fc_connector)
diff --git a/cinder/tests/unit/volume/drivers/test_linstordrv.py b/cinder/tests/unit/volume/drivers/test_linstordrv.py
index 51b874025..007e06b79 100644
--- a/cinder/tests/unit/volume/drivers/test_linstordrv.py
+++ b/cinder/tests/unit/volume/drivers/test_linstordrv.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
from unittest import mock
from oslo_utils import timeutils
@@ -1110,13 +1111,15 @@ class LinstorIscsiDriverTestCase(test.TestCase):
@mock.patch(DRIVER + 'LinstorIscsiDriver._get_volume_stats')
def test_iscsi_get_volume_stats(self, m_vol_stats, m_rsc_list):
- m_vol_stats.return_value = VOLUME_STATS_RESP
+ m_vol_stats.return_value = copy.deepcopy(VOLUME_STATS_RESP)
m_rsc_list.return_value = RESOURCE_LIST
val = self.driver.get_volume_stats()
- expected = VOLUME_STATS_RESP
+ expected = copy.deepcopy(VOLUME_STATS_RESP)
expected["storage_protocol"] = 'iSCSI'
+ expected["pools"][0]['location_info'] = (
+ 'LinstorIscsiDriver:' + expected["pools"][0]['location_info'])
self.assertEqual(expected, val)
@mock.patch(DRIVER + 'linstor')
@@ -1178,11 +1181,13 @@ class LinstorDrbdDriverTestCase(test.TestCase):
@mock.patch(DRIVER + 'LinstorDrbdDriver._get_volume_stats')
def test_drbd_get_volume_stats(self, m_vol_stats):
- m_vol_stats.return_value = VOLUME_STATS_RESP
+ m_vol_stats.return_value = copy.deepcopy(VOLUME_STATS_RESP)
val = self.driver.get_volume_stats()
- expected = VOLUME_STATS_RESP
+ expected = copy.deepcopy(VOLUME_STATS_RESP)
expected["storage_protocol"] = 'DRBD'
+ expected["pools"][0]['location_info'] = (
+ 'LinstorDrbdDriver:' + expected["pools"][0]['location_info'])
self.assertEqual(expected, val)
@mock.patch(DRIVER + 'linstor')
diff --git a/cinder/tests/unit/volume/drivers/test_nfs.py b/cinder/tests/unit/volume/drivers/test_nfs.py
index ad80fc11c..7aab7eff2 100644
--- a/cinder/tests/unit/volume/drivers/test_nfs.py
+++ b/cinder/tests/unit/volume/drivers/test_nfs.py
@@ -50,13 +50,14 @@ class RemoteFsDriverTestCase(test.TestCase):
def setUp(self):
super(RemoteFsDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSDriver()
- self.configuration = mock.Mock(conf.Configuration)
- self.configuration.append_config_values(mock.ANY)
- self.configuration.nas_secure_file_permissions = 'false'
- self.configuration.nas_secure_file_operations = 'false'
- self.configuration.nfs_snapshot_support = True
- self.configuration.max_over_subscription_ratio = 1.0
- self.configuration.reserved_percentage = 5
+ self.configuration = conf.Configuration(None)
+ self.configuration.append_config_values(nfs.nfs_opts)
+ self.configuration.append_config_values(remotefs.nas_opts)
+ self.override_config('nas_secure_file_permissions', 'false')
+ self.override_config('nas_secure_file_operations', 'false')
+ self.override_config('nfs_snapshot_support', True)
+ self.override_config('max_over_subscription_ratio', 1.0)
+ self.override_config('reserved_percentage', 5)
self._driver = remotefs.RemoteFSDriver(
configuration=self.configuration)
mock_exc = mock.patch.object(self._driver, '_execute')
@@ -92,14 +93,14 @@ class RemoteFsDriverTestCase(test.TestCase):
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_with_secure_file_permissions(self, LOG):
self._driver._mounted_shares = [self.TEST_EXPORT]
- self.configuration.nas_secure_file_permissions = 'true'
+ self.override_config('nas_secure_file_permissions', 'true')
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertFalse(LOG.warning.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
- self.configuration.nas_secure_file_permissions = 'false'
+ self.override_config('nas_secure_file_permissions', 'false')
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertTrue(LOG.warning.called)
@@ -290,7 +291,7 @@ class RemoteFsDriverTestCase(test.TestCase):
operations. This test verifies the settings when secure.
"""
drv = self._driver
- self.configuration.nas_secure_file_operations = 'true'
+ self.override_config('nas_secure_file_operations', 'true')
ret_flag = drv.secure_file_operations_enabled()
self.assertTrue(ret_flag)
@@ -301,7 +302,7 @@ class RemoteFsDriverTestCase(test.TestCase):
operations. This test verifies the settings when not secure.
"""
drv = self._driver
- self.configuration.nas_secure_file_operations = 'false'
+ self.override_config('nas_secure_file_operations', 'false')
ret_flag = drv.secure_file_operations_enabled()
self.assertFalse(ret_flag)
@@ -454,23 +455,24 @@ class NfsDriverTestCase(test.TestCase):
def setUp(self):
super(NfsDriverTestCase, self).setUp()
- self.configuration = mock.Mock(conf.Configuration)
- self.configuration.append_config_values(mock.ANY)
- self.configuration.max_over_subscription_ratio = 1.0
- self.configuration.reserved_percentage = 5
- self.configuration.nfs_shares_config = None
- self.configuration.nfs_sparsed_volumes = True
- self.configuration.nfs_reserved_percentage = 5.0
- self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
- self.configuration.nfs_mount_options = None
- self.configuration.nfs_mount_attempts = 3
- self.configuration.nfs_qcow2_volumes = False
- self.configuration.nas_secure_file_permissions = 'false'
- self.configuration.nas_secure_file_operations = 'false'
- self.configuration.nas_host = None
- self.configuration.nas_share_path = None
- self.configuration.nas_mount_options = None
- self.configuration.volume_dd_blocksize = '1M'
+ self.configuration = conf.Configuration(None)
+ self.configuration.append_config_values(nfs.nfs_opts)
+ self.configuration.append_config_values(remotefs.nas_opts)
+ self.override_config('max_over_subscription_ratio', 1.0)
+ self.override_config('reserved_percentage', 5)
+ self.override_config('nfs_shares_config', None)
+ self.override_config('nfs_sparsed_volumes', True)
+ self.override_config('reserved_percentage', 5.0)
+ self.override_config('nfs_mount_point_base', self.TEST_MNT_POINT_BASE)
+ self.override_config('nfs_mount_options', None)
+ self.override_config('nfs_mount_attempts', 3)
+ self.override_config('nfs_qcow2_volumes', False)
+ self.override_config('nas_secure_file_permissions', 'false')
+ self.override_config('nas_secure_file_operations', 'false')
+ self.override_config('nas_host', None)
+ self.override_config('nas_share_path', None)
+ self.override_config('nas_mount_options', None)
+ self.override_config('volume_dd_blocksize', '1M')
self.mock_object(volume_utils, 'get_max_over_subscription_ratio',
return_value=1)
@@ -491,7 +493,7 @@ class NfsDriverTestCase(test.TestCase):
@ddt.data(NFS_CONFIG1, NFS_CONFIG2, NFS_CONFIG3, NFS_CONFIG4)
def test_local_path(self, nfs_config):
"""local_path common use case."""
- self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
+ self.override_config('nfs_mount_point_base', self.TEST_MNT_POINT_BASE)
self._set_driver(extra_confs=nfs_config)
drv = self._driver
@@ -535,7 +537,7 @@ class NfsDriverTestCase(test.TestCase):
self._set_driver()
drv = self._driver
- self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
+ self.override_config('nfs_mount_point_base', self.TEST_MNT_POINT_BASE)
self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
@@ -543,8 +545,8 @@ class NfsDriverTestCase(test.TestCase):
def test_get_mount_point_for_share_given_extra_slash_in_state_path(self):
"""_get_mount_point_for_share should calculate correct value."""
# This test gets called with the extra slash
- self.configuration.nfs_mount_point_base = (
- self.TEST_MNT_POINT_BASE_EXTRA_SLASH)
+ self.override_config('nfs_mount_point_base',
+ self.TEST_MNT_POINT_BASE_EXTRA_SLASH)
# The driver gets called with the correct configuration and removes
# the extra slash
@@ -796,10 +798,8 @@ class NfsDriverTestCase(test.TestCase):
def test_create_nonsparsed_volume(self, mock_save):
self._set_driver()
drv = self._driver
- self.configuration.nfs_sparsed_volumes = False
- volume = self._simple_volume()
-
self.override_config('nfs_sparsed_volumes', False)
+ volume = self._simple_volume()
with mock.patch.object(
drv, '_create_regular_file') as mock_create_regular_file:
@@ -916,7 +916,7 @@ class NfsDriverTestCase(test.TestCase):
def test_get_volume_stats_with_non_zero_reserved_percentage(self):
"""get_volume_stats must fill the correct values."""
- self.configuration.reserved_percentage = 10.0
+ self.override_config('reserved_percentage', 10.0)
drv = nfs.NfsDriver(configuration=self.configuration)
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
@@ -1508,7 +1508,7 @@ class NfsDriverTestCase(test.TestCase):
self._set_driver()
drv = self._driver
volume = self._simple_volume()
- self.configuration.nfs_snapshot_support = True
+ self.override_config('nfs_snapshot_support', True)
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
fake_snap.volume = volume
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
diff --git a/cinder/tests/unit/volume/drivers/test_pure.py b/cinder/tests/unit/volume/drivers/test_pure.py
index d6909d112..40fbc79f3 100644
--- a/cinder/tests/unit/volume/drivers/test_pure.py
+++ b/cinder/tests/unit/volume/drivers/test_pure.py
@@ -87,6 +87,10 @@ FC_PORT_NAMES = ["ct0.fc2", "ct0.fc3", "ct1.fc2", "ct1.fc3"]
NVME_IPS = ["10.0.0." + str(i + 1) for i in range(len(NVME_PORT_NAMES))]
NVME_IPS += ["[2001:db8::" + str(i + 1) + "]"
for i in range(len(NVME_PORT_NAMES))]
+AC_NVME_IPS = ["10.0.0." + str(i + 1 + len(NVME_PORT_NAMES))
+ for i in range(len(NVME_PORT_NAMES))]
+AC_NVME_IPS += ["[2001:db8::1:" + str(i + 1) + "]"
+ for i in range(len(NVME_PORT_NAMES))]
NVME_CIDR = "0.0.0.0/0"
NVME_CIDR_V6 = "::/0"
NVME_PORT = 4420
@@ -131,6 +135,7 @@ NVME_CONNECTOR = {"nqn": INITIATOR_NQN, "host": HOSTNAME}
ISCSI_CONNECTOR = {"initiator": INITIATOR_IQN, "host": HOSTNAME}
FC_CONNECTOR = {"wwpns": {INITIATOR_WWN}, "host": HOSTNAME}
TARGET_NQN = "nqn.2010-06.com.purestorage:flasharray.12345abc"
+AC_TARGET_NQN = "nqn.2010-06.com.purestorage:flasharray.67890def"
TARGET_IQN = "iqn.2010-06.com.purestorage:flasharray.12345abc"
AC_TARGET_IQN = "iqn.2018-06.com.purestorage:flasharray.67890def"
TARGET_WWN = "21000024ff59fe94"
@@ -166,6 +171,12 @@ NVME_PORTS = [{"name": name,
"portal": ip + ":" + TARGET_ROCE_PORT,
"wwn": None,
} for name, ip in zip(NVME_PORT_NAMES * 2, NVME_IPS)]
+AC_NVME_PORTS = [{"name": name,
+ "nqn": AC_TARGET_NQN,
+ "iqn": None,
+ "portal": ip + ":" + TARGET_ROCE_PORT,
+ "wwn": None,
+ } for name, ip in zip(NVME_PORT_NAMES * 2, AC_NVME_IPS)]
ISCSI_PORTS = [{"name": name,
"iqn": TARGET_IQN,
"portal": ip + ":" + TARGET_PORT,
@@ -340,7 +351,55 @@ NVME_CONNECTION_INFO_V6 = {
"volume_nguid": "0009714b5cb916324a9374c470002b2c8",
},
}
-
+NVME_CONNECTION_INFO_AC = {
+ "driver_volume_type": "nvmeof",
+ "data": {
+ "target_nqn": TARGET_NQN,
+ "discard": True,
+ "portals": [
+ (NVME_IPS[0], NVME_PORT, "rdma"),
+ (NVME_IPS[1], NVME_PORT, "rdma"),
+ (NVME_IPS[2], NVME_PORT, "rdma"),
+ (NVME_IPS[3], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[0], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[1], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[2], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[3], NVME_PORT, "rdma")],
+ "volume_nguid": "0009714b5cb916324a9374c470002b2c8",
+ },
+}
+NVME_CONNECTION_INFO_AC_FILTERED = {
+ "driver_volume_type": "nvmeof",
+ "data": {
+ "target_nqn": TARGET_NQN,
+ "discard": True,
+ # Final entry filtered by NVME_CIDR_FILTERED
+ "portals": [
+ (NVME_IPS[0], NVME_PORT, "rdma"),
+ (NVME_IPS[1], NVME_PORT, "rdma"),
+ (NVME_IPS[2], NVME_PORT, "rdma"),
+ (NVME_IPS[3], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[0], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[1], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[2], NVME_PORT, "rdma")],
+ "volume_nguid": "0009714b5cb916324a9374c470002b2c8",
+ },
+}
+NVME_CONNECTION_INFO_AC_FILTERED_LIST = {
+ "driver_volume_type": "nvmeof",
+ "data": {
+ "target_nqn": TARGET_NQN,
+ "discard": True,
+ # Final entry filtered by NVME_CIDR_FILTERED
+ "portals": [
+ (NVME_IPS[1], NVME_PORT, "rdma"),
+ (NVME_IPS[2], NVME_PORT, "rdma"),
+ (AC_NVME_IPS[5].strip("[]"), NVME_PORT, "rdma"), # IPv6
+ (AC_NVME_IPS[6].strip("[]"), NVME_PORT, "rdma"), # IPv6
+ ],
+ "volume_nguid": "0009714b5cb916324a9374c470002b2c8",
+ },
+}
FC_CONNECTION_INFO = {
"driver_volume_type": "fibre_channel",
"data": {
@@ -978,37 +1037,6 @@ class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase):
mock.call(self.array, [mock_sync_target], 'cinder-pod')
])
- @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pods')
- @mock.patch(BASE_DRIVER_OBJ + '._generate_replication_retention')
- @mock.patch(BASE_DRIVER_OBJ + '._setup_replicated_pgroups')
- def test_do_setup_replicated_sync_rep_bad_driver(
- self,
- mock_setup_repl_pgroups,
- mock_generate_replication_retention,
- mock_setup_pods):
- retention = mock.MagicMock()
- mock_generate_replication_retention.return_value = retention
- self._setup_mocks_for_replication()
-
- self.mock_config.safe_get.return_value = [
- {
- "backend_id": "foo",
- "managed_backend_name": None,
- "san_ip": "1.2.3.4",
- "api_token": "abc123",
- "type": "sync",
- }
- ]
- mock_sync_target = mock.MagicMock()
- mock_sync_target.get.return_value = GET_ARRAY_SECONDARY
- self.array.get.return_value = GET_ARRAY_PRIMARY
- self.driver._storage_protocol = 'NVMe-RoCE'
- self.purestorage_module.FlashArray.side_effect = [self.array,
- mock_sync_target]
- self.assertRaises(pure.PureDriverException,
- self.driver.do_setup,
- None)
-
def test_update_provider_info_update_all(self):
test_vols = [
self.new_fake_vol(spec={'id': fake.VOLUME_ID},
@@ -4117,8 +4145,8 @@ class PureVolumeUpdateStatsTestCase(PureBaseSharedDriverTestCase):
config_ratio,
expected_ratio,
auto):
- volume_utils.get_max_over_subscription_ratio = mock.Mock(
- return_value=expected_ratio)
+ self.mock_object(volume_utils, 'get_max_over_subscription_ratio',
+ return_value=expected_ratio)
self.mock_config.pure_automatic_max_oversubscription_ratio = auto
self.mock_config.max_over_subscription_ratio = config_ratio
actual_ratio = self.driver._get_thin_provisioning(provisioned, used)
@@ -4524,6 +4552,155 @@ class PureNVMEDriverTestCase(PureBaseSharedDriverTestCase):
@mock.patch(NVME_DRIVER_OBJ + "._get_wwn")
@mock.patch(NVME_DRIVER_OBJ + "._connect")
@mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports")
+ def test_initialize_connection_uniform_ac(
+ self, mock_get_nvme_ports, mock_connection, mock_get_wwn,
+ mock_get_nguid
+ ):
+ repl_extra_specs = {
+ "replication_type": "<in> sync",
+ "replication_enabled": "<is> true",
+ }
+ vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs)
+ mock_get_nvme_ports.side_effect = [NVME_PORTS, AC_NVME_PORTS]
+ mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8"
+ mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8"
+ mock_connection.side_effect = [
+ {
+ "vol": vol_name,
+ "lun": 1,
+ },
+ {
+ "vol": vol_name,
+ "lun": 5,
+ },
+ ]
+ result = deepcopy(NVME_CONNECTION_INFO_AC)
+
+ self.driver._is_active_cluster_enabled = True
+ mock_secondary = mock.MagicMock()
+ self.driver._uniform_active_cluster_target_arrays = [mock_secondary]
+
+ real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR)
+ self.assertDictEqual(result, real_result)
+ mock_get_nvme_ports.assert_has_calls(
+ [
+ mock.call(self.array),
+ mock.call(mock_secondary),
+ ]
+ )
+ mock_connection.assert_has_calls(
+ [
+ mock.call(self.array, vol_name, NVME_CONNECTOR),
+ mock.call(
+ mock_secondary, vol_name, NVME_CONNECTOR),
+ ]
+ )
+
+ @mock.patch(NVME_DRIVER_OBJ + "._get_nguid")
+ @mock.patch(NVME_DRIVER_OBJ + "._get_wwn")
+ @mock.patch(NVME_DRIVER_OBJ + "._connect")
+ @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports")
+ def test_initialize_connection_uniform_ac_cidr(
+ self, mock_get_nvme_ports, mock_connection, mock_get_wwn,
+ mock_get_nguid
+ ):
+ repl_extra_specs = {
+ "replication_type": "<in> sync",
+ "replication_enabled": "<is> true",
+ }
+ vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs)
+ mock_get_nvme_ports.side_effect = [NVME_PORTS, AC_NVME_PORTS]
+ mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8"
+ mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8"
+ mock_connection.side_effect = [
+ {
+ "vol": vol_name,
+ "lun": 1,
+ },
+ {
+ "vol": vol_name,
+ "lun": 5,
+ },
+ ]
+ result = deepcopy(NVME_CONNECTION_INFO_AC_FILTERED)
+ self.driver._is_active_cluster_enabled = True
+ # Set up some CIDRs to block: this will block only one of the
+ # get four+three results back
+ self.driver.configuration.pure_nvme_cidr = NVME_CIDR_FILTERED
+ mock_secondary = mock.MagicMock()
+ self.driver._uniform_active_cluster_target_arrays = [mock_secondary]
+
+ real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR)
+ self.assertDictEqual(result, real_result)
+ mock_get_nvme_ports.assert_has_calls(
+ [
+ mock.call(self.array),
+ mock.call(mock_secondary),
+ ]
+ )
+ mock_connection.assert_has_calls(
+ [
+ mock.call(self.array, vol_name, NVME_CONNECTOR),
+ mock.call(mock_secondary, vol_name, NVME_CONNECTOR),
+ ]
+ )
+
+ @mock.patch(NVME_DRIVER_OBJ + "._get_nguid")
+ @mock.patch(NVME_DRIVER_OBJ + "._get_wwn")
+ @mock.patch(NVME_DRIVER_OBJ + "._connect")
+ @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports")
+ def test_initialize_connection_uniform_ac_cidrs(
+ self, mock_get_nvme_ports, mock_connection, mock_get_wwn,
+ mock_get_nguid
+ ):
+ repl_extra_specs = {
+ "replication_type": "<in> sync",
+ "replication_enabled": "<is> true",
+ }
+ vol, vol_name = self.new_fake_vol(type_extra_specs=repl_extra_specs)
+ mock_get_nvme_ports.side_effect = [NVME_PORTS, AC_NVME_PORTS]
+ mock_get_wwn.return_value = "3624a93709714b5cb91634c470002b2c8"
+ mock_get_nguid.return_value = "0009714b5cb916324a9374c470002b2c8"
+ mock_connection.side_effect = [
+ {
+ "vol": vol_name,
+ "lun": 1,
+ },
+ {
+ "vol": vol_name,
+ "lun": 5,
+ },
+ ]
+ result = deepcopy(NVME_CONNECTION_INFO_AC_FILTERED_LIST)
+
+ self.driver._is_active_cluster_enabled = True
+ # Set up some CIDRs to block: this will allow only 2 addresses from
+ # each host of the ActiveCluster, so we should check that we only
+ # get two+two results back
+ self.driver.configuration.pure_nvme = NVME_CIDR
+ self.driver.configuration.pure_nvme_cidr_list = NVME_CIDRS_FILTERED
+ mock_secondary = mock.MagicMock()
+ self.driver._uniform_active_cluster_target_arrays = [mock_secondary]
+
+ real_result = self.driver.initialize_connection(vol, NVME_CONNECTOR)
+ self.assertDictEqual(result, real_result)
+ mock_get_nvme_ports.assert_has_calls(
+ [
+ mock.call(self.array),
+ mock.call(mock_secondary),
+ ]
+ )
+ mock_connection.assert_has_calls(
+ [
+ mock.call(self.array, vol_name, NVME_CONNECTOR),
+ mock.call(mock_secondary, vol_name, NVME_CONNECTOR),
+ ]
+ )
+
+ @mock.patch(NVME_DRIVER_OBJ + "._get_nguid")
+ @mock.patch(NVME_DRIVER_OBJ + "._get_wwn")
+ @mock.patch(NVME_DRIVER_OBJ + "._connect")
+ @mock.patch(NVME_DRIVER_OBJ + "._get_target_nvme_ports")
def test_initialize_connection_multipath(
self, mock_get_nvme_ports, mock_connection, mock_get_wwn,
mock_get_nguid
diff --git a/cinder/tests/unit/volume/drivers/test_quobyte.py b/cinder/tests/unit/volume/drivers/test_quobyte.py
index 94b82e8c2..767afd97e 100644
--- a/cinder/tests/unit/volume/drivers/test_quobyte.py
+++ b/cinder/tests/unit/volume/drivers/test_quobyte.py
@@ -949,8 +949,8 @@ class QuobyteDriverTestCase(test.TestCase):
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
- image_utils.qemu_img_info = mock.Mock(return_value=img_info)
- image_utils.resize_image = mock.Mock()
+ self.mock_object(image_utils, 'qemu_img_info', return_value=img_info)
+ self.mock_object(image_utils, 'resize_image')
mock_remote_attached.return_value = is_attached
@@ -996,11 +996,11 @@ class QuobyteDriverTestCase(test.TestCase):
img_info = imageutils.QemuImgInfo(qemu_img_output)
# mocking and testing starts here
- image_utils.convert_image = mock.Mock()
+ mock_convert = self.mock_object(image_utils, 'convert_image')
drv._read_info_file = mock.Mock(return_value=
{'active': snap_file,
snapshot['id']: snap_file})
- image_utils.qemu_img_info = mock.Mock(return_value=img_info)
+ self.mock_object(image_utils, 'qemu_img_info', return_value=img_info)
drv._set_rw_permissions = mock.Mock()
drv._copy_volume_from_snapshot(snapshot, dest_volume, size)
@@ -1009,7 +1009,7 @@ class QuobyteDriverTestCase(test.TestCase):
image_utils.qemu_img_info.assert_called_once_with(snap_path,
force_share=True,
run_as_root=False)
- (image_utils.convert_image.
+ (mock_convert.
assert_called_once_with(src_vol_path,
dest_vol_path,
'raw',
@@ -1052,11 +1052,11 @@ class QuobyteDriverTestCase(test.TestCase):
img_info = imageutils.QemuImgInfo(qemu_img_output)
# mocking and testing starts here
- image_utils.convert_image = mock.Mock()
+ mock_convert = self.mock_object(image_utils, 'convert_image')
drv._read_info_file = mock.Mock(return_value=
{'active': snap_file,
snapshot['id']: snap_file})
- image_utils.qemu_img_info = mock.Mock(return_value=img_info)
+ self.mock_object(image_utils, 'qemu_img_info', return_value=img_info)
drv._set_rw_permissions = mock.Mock()
shutil.copyfile = mock.Mock()
@@ -1066,7 +1066,7 @@ class QuobyteDriverTestCase(test.TestCase):
image_utils.qemu_img_info.assert_called_once_with(snap_path,
force_share=True,
run_as_root=False)
- self.assertFalse(image_utils.convert_image.called,
+ self.assertFalse(mock_convert.called,
("_convert_image was called but should not have been")
)
os_ac_mock.assert_called_once_with(
@@ -1113,11 +1113,11 @@ class QuobyteDriverTestCase(test.TestCase):
img_info = imageutils.QemuImgInfo(qemu_img_output)
# mocking and testing starts here
- image_utils.convert_image = mock.Mock()
+ mock_convert = self.mock_object(image_utils, 'convert_image')
drv._read_info_file = mock.Mock(return_value=
{'active': snap_file,
snapshot['id']: snap_file})
- image_utils.qemu_img_info = mock.Mock(return_value=img_info)
+ self.mock_object(image_utils, 'qemu_img_info', return_value=img_info)
drv._set_rw_permissions = mock.Mock()
drv._create_overlay_volume_from_snapshot = mock.Mock()
@@ -1129,7 +1129,7 @@ class QuobyteDriverTestCase(test.TestCase):
image_utils.qemu_img_info.assert_called_once_with(snap_path,
force_share=True,
run_as_root=False)
- (image_utils.convert_image.
+ (mock_convert.
assert_called_once_with(
src_vol_path,
drv._local_volume_from_snap_cache_path(snapshot), 'qcow2',
@@ -1177,13 +1177,13 @@ class QuobyteDriverTestCase(test.TestCase):
img_info = imageutils.QemuImgInfo(qemu_img_output)
# mocking and testing starts here
- image_utils.convert_image = mock.Mock()
+ mock_convert = self.mock_object(image_utils, 'convert_image')
drv._read_info_file = mock.Mock(return_value=
{'active': snap_file,
snapshot['id']: snap_file})
- image_utils.qemu_img_info = mock.Mock(return_value=img_info)
+ self.mock_object(image_utils, 'qemu_img_info', return_value=img_info)
drv._set_rw_permissions = mock.Mock()
- shutil.copyfile = mock.Mock()
+ self.mock_object(shutil, 'copyfile')
drv._copy_volume_from_snapshot(snapshot, dest_volume, size)
@@ -1191,7 +1191,7 @@ class QuobyteDriverTestCase(test.TestCase):
image_utils.qemu_img_info.assert_called_once_with(snap_path,
force_share=True,
run_as_root=False)
- (image_utils.convert_image.
+ (mock_convert.
assert_called_once_with(
src_vol_path,
drv._local_volume_from_snap_cache_path(snapshot), 'raw',
@@ -1253,7 +1253,7 @@ class QuobyteDriverTestCase(test.TestCase):
img_info = imageutils.QemuImgInfo(qemu_img_output)
drv.get_active_image_from_info = mock.Mock(return_value=volume['name'])
- image_utils.qemu_img_info = mock.Mock(return_value=img_info)
+ self.mock_object(image_utils, 'qemu_img_info', return_value=img_info)
conn_info = drv.initialize_connection(volume, None)
diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py
index ae1b0c93c..13ee0c16b 100644
--- a/cinder/tests/unit/volume/drivers/test_rbd.py
+++ b/cinder/tests/unit/volume/drivers/test_rbd.py
@@ -44,9 +44,9 @@ from cinder.tests.unit import utils
from cinder.tests.unit.volume import test_driver
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
+from cinder.volume import qos_specs
from cinder.volume import volume_utils
-
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
@@ -266,6 +266,11 @@ class RBDTestCase(test.TestCase):
'host': 'host@fakebackend#fakepool'}
})
+ self.qos_policy_a = {"total_iops_sec": "100",
+ "total_bytes_sec": "1024"}
+ self.qos_policy_b = {"read_iops_sec": "500",
+ "write_iops_sec": "200"}
+
@ddt.data({'cluster_name': None, 'pool_name': 'rbd'},
{'cluster_name': 'volumes', 'pool_name': None})
@ddt.unpack
@@ -497,11 +502,17 @@ class RBDTestCase(test.TestCase):
image.update_features.assert_has_calls(calls, any_order=False)
@common_mocks
+ @mock.patch.object(driver.RBDDriver, '_qos_specs_from_volume_type')
+ @mock.patch.object(driver.RBDDriver, '_supports_qos')
@mock.patch.object(driver.RBDDriver, '_enable_replication')
- def test_create_volume(self, mock_enable_repl):
+ def test_create_volume(self, mock_enable_repl, mock_qos_vers,
+ mock_get_qos_specs):
client = self.mock_client.return_value
client.__enter__.return_value = client
+ mock_qos_vers.return_value = True
+ mock_get_qos_specs.return_value = None
+
res = self.driver.create_volume(self.volume_a)
self.assertEqual({}, res)
@@ -516,6 +527,7 @@ class RBDTestCase(test.TestCase):
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
mock_enable_repl.assert_not_called()
+ mock_qos_vers.assert_not_called()
@common_mocks
@mock.patch.object(driver.RBDDriver, '_enable_replication')
@@ -548,6 +560,39 @@ class RBDTestCase(test.TestCase):
client.__exit__.assert_called_once_with(None, None, None)
@common_mocks
+ @mock.patch.object(driver.RBDDriver, '_supports_qos')
+ @mock.patch.object(driver.RBDDriver, 'update_rbd_image_qos')
+ def test_create_volume_with_qos(self, mock_update_qos, mock_qos_supported):
+
+ ctxt = context.get_admin_context()
+ qos = qos_specs.create(ctxt, "qos-iops-bws", self.qos_policy_a)
+ self.volume_a.volume_type = fake_volume.fake_volume_type_obj(
+ ctxt,
+ id=fake.VOLUME_TYPE_ID,
+ qos_specs_id = qos.id)
+
+ client = self.mock_client.return_value
+ client.__enter__.return_value = client
+
+ mock_qos_supported.return_value = True
+ res = self.driver.create_volume(self.volume_a)
+ self.assertEqual({}, res)
+
+ chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
+ order = int(math.log(chunk_size, 2))
+ args = [client.ioctx, str(self.volume_a.name),
+ self.volume_a.size * units.Gi, order]
+ kwargs = {'old_format': False,
+ 'features': client.features}
+ self.mock_rbd.RBD.return_value.create.assert_called_once_with(
+ *args, **kwargs)
+
+ mock_update_qos.assert_called_once_with(self.volume_a, qos.specs)
+
+ client.__enter__.assert_called_once_with()
+ client.__exit__.assert_called_once_with(None, None, None)
+
+ @common_mocks
def test_manage_existing_get_size(self):
with mock.patch.object(self.driver.rbd.Image(), 'size') as \
mock_rbd_image_size:
@@ -1688,14 +1733,17 @@ class RBDTestCase(test.TestCase):
@ddt.data(True, False)
@common_mocks
+ @mock.patch('cinder.volume.drivers.rbd.RBDDriver._supports_qos')
@mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info')
@mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_pool_stats')
def test_update_volume_stats(self, replication_enabled, stats_mock,
- usage_mock):
+ usage_mock, mock_qos_supported):
stats_mock.return_value = (mock.sentinel.free_capacity_gb,
mock.sentinel.total_capacity_gb)
usage_mock.return_value = mock.sentinel.provisioned_capacity_gb
+ mock_qos_supported.return_value = True
+
expected_fsid = 'abc'
expected_location_info = ('nondefault:%s:%s:%s:rbd' %
(self.cfg.rbd_ceph_conf, expected_fsid,
@@ -1714,7 +1762,8 @@ class RBDTestCase(test.TestCase):
max_over_subscription_ratio=1.0,
multiattach=True,
location_info=expected_location_info,
- backend_state='up')
+ backend_state='up',
+ qos_support=True)
if replication_enabled:
targets = [{'backend_id': 'secondary-backend'},
@@ -1733,14 +1782,21 @@ class RBDTestCase(test.TestCase):
mock_get_fsid.return_value = expected_fsid
actual = self.driver.get_volume_stats(True)
self.assertDictEqual(expected, actual)
+ mock_qos_supported.assert_called_once_with()
@common_mocks
+ @mock.patch('cinder.volume.drivers.rbd.RBDDriver._supports_qos')
@mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info')
@mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_pool_stats')
- def test_update_volume_stats_exclusive_pool(self, stats_mock, usage_mock):
+ def test_update_volume_stats_exclusive_pool(self, stats_mock, usage_mock,
+ mock_qos_supported):
stats_mock.return_value = (mock.sentinel.free_capacity_gb,
mock.sentinel.total_capacity_gb)
+ # Set the version to unsupported, leading to the qos_support parameter
+ # in the actual output differing to the one set below in expected.
+ mock_qos_supported.return_value = False
+
expected_fsid = 'abc'
expected_location_info = ('nondefault:%s:%s:%s:rbd' %
(self.cfg.rbd_ceph_conf, expected_fsid,
@@ -1758,7 +1814,8 @@ class RBDTestCase(test.TestCase):
max_over_subscription_ratio=1.0,
multiattach=True,
location_info=expected_location_info,
- backend_state='up')
+ backend_state='up',
+ qos_support=False)
my_safe_get = MockDriverConfig(rbd_exclusive_cinder_pool=True)
self.mock_object(self.driver.configuration, 'safe_get',
@@ -1770,15 +1827,20 @@ class RBDTestCase(test.TestCase):
self.assertDictEqual(expected, actual)
usage_mock.assert_not_called()
+ mock_qos_supported.assert_called_once_with()
@common_mocks
+ @mock.patch('cinder.volume.drivers.rbd.RBDDriver._supports_qos')
@mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info')
@mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_pool_stats')
- def test_update_volume_stats_error(self, stats_mock, usage_mock):
+ def test_update_volume_stats_error(self, stats_mock, usage_mock,
+ mock_qos_supported):
my_safe_get = MockDriverConfig(rbd_exclusive_cinder_pool=False)
self.mock_object(self.driver.configuration, 'safe_get',
my_safe_get)
+ mock_qos_supported.return_value = True
+
expected_fsid = 'abc'
expected_location_info = ('nondefault:%s:%s:%s:rbd' %
(self.cfg.rbd_ceph_conf, expected_fsid,
@@ -1795,7 +1857,8 @@ class RBDTestCase(test.TestCase):
max_over_subscription_ratio=1.0,
thin_provisioning_support=True,
location_info=expected_location_info,
- backend_state='down')
+ backend_state='down',
+ qos_support=True)
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
mock_get_fsid.return_value = expected_fsid
@@ -2209,15 +2272,18 @@ class RBDTestCase(test.TestCase):
self.driver.extend_volume(self.volume_a, fake_size)
mock_resize.assert_called_once_with(self.volume_a, size=size)
+ @mock.patch.object(driver.RBDDriver, '_qos_specs_from_volume_type')
+ @mock.patch.object(driver.RBDDriver, '_supports_qos')
@ddt.data(False, True)
@common_mocks
- def test_retype(self, enabled):
+ def test_retype(self, enabled, mock_qos_vers, mock_get_qos_specs):
"""Test retyping a non replicated volume.
We will test on a system that doesn't have replication enabled and on
one that hast it enabled.
"""
self.driver._is_replication_enabled = enabled
+ mock_qos_vers.return_value = False
if enabled:
expect = {'replication_status': fields.ReplicationStatus.DISABLED}
else:
@@ -2264,11 +2330,14 @@ class RBDTestCase(test.TestCase):
{'old_replicated': True, 'new_replicated': True})
@ddt.unpack
@common_mocks
+ @mock.patch.object(driver.RBDDriver, '_qos_specs_from_volume_type')
+ @mock.patch.object(driver.RBDDriver, '_supports_qos')
@mock.patch.object(driver.RBDDriver, '_disable_replication',
return_value={'replication': 'disabled'})
@mock.patch.object(driver.RBDDriver, '_enable_replication',
return_value={'replication': 'enabled'})
- def test_retype_replicated(self, mock_disable, mock_enable, old_replicated,
+ def test_retype_replicated(self, mock_disable, mock_enable, mock_qos_vers,
+ mock_get_qos_specs, old_replicated,
new_replicated):
"""Test retyping a non replicated volume.
@@ -2283,6 +2352,9 @@ class RBDTestCase(test.TestCase):
self.volume_a.volume_type = replicated_type if old_replicated else None
+ mock_qos_vers.return_value = False
+ mock_get_qos_specs.return_value = False
+
if new_replicated:
new_type = replicated_type
if old_replicated:
@@ -2304,6 +2376,162 @@ class RBDTestCase(test.TestCase):
self.assertEqual((True, update), res)
@common_mocks
+ @mock.patch.object(driver.RBDDriver, 'delete_rbd_image_qos_keys')
+ @mock.patch.object(driver.RBDDriver, 'get_rbd_image_qos')
+ @mock.patch.object(driver.RBDDriver, '_supports_qos')
+ @mock.patch.object(driver.RBDDriver, 'update_rbd_image_qos')
+ def test_retype_qos(self, mock_update_qos, mock_qos_supported,
+ mock_get_vol_qos, mock_del_vol_qos):
+
+ ctxt = context.get_admin_context()
+ qos_a = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a)
+ qos_b = qos_specs.create(ctxt, "qos-vers-b", self.qos_policy_b)
+
+ # The vol_config dictionary containes supported as well as currently
+ # unsupported values (CNA). The latter will be marked accordingly to
+ # indicate the current support status.
+ vol_config = {
+ "rbd_qos_bps_burst": "0",
+ "rbd_qos_bps_burst_seconds": "1", # CNA
+ "rbd_qos_bps_limit": "1024",
+ "rbd_qos_iops_burst": "0",
+ "rbd_qos_iops_burst_seconds": "1", # CNA
+ "rbd_qos_iops_limit": "100",
+ "rbd_qos_read_bps_burst": "0",
+ "rbd_qos_read_bps_burst_seconds": "1", # CNA
+ "rbd_qos_read_bps_limit": "0",
+ "rbd_qos_read_iops_burst": "0",
+ "rbd_qos_read_iops_burst_seconds": "1", # CNA
+ "rbd_qos_read_iops_limit": "0",
+ "rbd_qos_schedule_tick_min": "50", # CNA
+ "rbd_qos_write_bps_burst": "0",
+ "rbd_qos_write_bps_burst_seconds": "1", # CNA
+ "rbd_qos_write_bps_limit": "0",
+ "rbd_qos_write_iops_burst": "0",
+ "rbd_qos_write_iops_burst_seconds": "1", # CNA
+ "rbd_qos_write_iops_limit": "0",
+ }
+
+ mock_get_vol_qos.return_value = vol_config
+
+ diff = {'encryption': {},
+ 'extra_specs': {},
+ 'qos_specs': {'consumer': (u'front-end', u'back-end'),
+ 'created_at': (123, 456),
+ u'total_bytes_sec': (u'1024', None),
+ u'total_iops_sec': (u'200', None)}}
+
+ delete_qos = ['total_iops_sec', 'total_bytes_sec']
+
+ self.volume_a.volume_type = fake_volume.fake_volume_type_obj(
+ ctxt,
+ id=fake.VOLUME_TYPE_ID,
+ qos_specs_id = qos_a.id)
+
+ new_type = fake_volume.fake_volume_type_obj(
+ ctxt,
+ id=fake.VOLUME_TYPE2_ID,
+ qos_specs_id = qos_b.id)
+
+ mock_qos_supported.return_value = True
+
+ res = self.driver.retype(ctxt, self.volume_a, new_type, diff,
+ None)
+ self.assertEqual((True, {}), res)
+
+ assert delete_qos == [key for key in delete_qos
+ if key in driver.QOS_KEY_MAP]
+ mock_update_qos.assert_called_once_with(self.volume_a, qos_b.specs)
+ mock_del_vol_qos.assert_called_once_with(self.volume_a, delete_qos)
+
+ @common_mocks
+ @mock.patch('cinder.volume.drivers.rbd.RBDDriver.RBDProxy')
+ def test__supports_qos(self, rbdproxy_mock):
+ rbdproxy_ver = 20
+ rbdproxy_mock.return_value.version.return_value = (0, rbdproxy_ver)
+
+ self.assertTrue(self.driver._supports_qos())
+
+ @common_mocks
+ def test__qos_specs_from_volume_type(self):
+ ctxt = context.get_admin_context()
+ qos = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a)
+ self.volume_a.volume_type = fake_volume.fake_volume_type_obj(
+ ctxt,
+ id=fake.VOLUME_TYPE_ID,
+ qos_specs_id = qos.id)
+
+ self.assertEqual(
+ {'total_iops_sec': '100', 'total_bytes_sec': '1024'},
+ self.driver._qos_specs_from_volume_type(self.volume_a.volume_type))
+
+ @common_mocks
+ def test_get_rbd_image_qos(self):
+ ctxt = context.get_admin_context()
+ qos = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a)
+ self.volume_a.volume_type = fake_volume.fake_volume_type_obj(
+ ctxt,
+ id=fake.VOLUME_TYPE_ID,
+ qos_specs_id = qos.id)
+
+ rbd_image_conf = []
+ for qos_key, qos_val in (
+ self.volume_a.volume_type.qos_specs.specs.items()):
+ rbd_image_conf.append(
+ {'name': driver.QOS_KEY_MAP[qos_key]['ceph_key'],
+ 'value': int(qos_val)})
+
+ rbd_image = self.mock_proxy.return_value.__enter__.return_value
+ rbd_image.config_list.return_value = rbd_image_conf
+
+ self.assertEqual(
+ {'rbd_qos_bps_limit': 1024, 'rbd_qos_iops_limit': 100},
+ self.driver.get_rbd_image_qos(self.volume_a))
+
+ @common_mocks
+ def test_update_rbd_image_qos(self):
+ ctxt = context.get_admin_context()
+ qos = qos_specs.create(ctxt, "qos-vers-a", self.qos_policy_a)
+ self.volume_a.volume_type = fake_volume.fake_volume_type_obj(
+ ctxt,
+ id=fake.VOLUME_TYPE_ID,
+ qos_specs_id = qos.id)
+
+ rbd_image = self.mock_proxy.return_value.__enter__.return_value
+
+ updated_specs = {"total_iops_sec": '50'}
+ rbd_image.config_set.return_value = qos_specs.update(ctxt,
+ qos.id,
+ updated_specs)
+
+ self.driver.update_rbd_image_qos(self.volume_a, updated_specs)
+ self.assertEqual(
+ {'total_bytes_sec': '1024', 'total_iops_sec': '50'},
+ self.volume_a.volume_type.qos_specs.specs)
+
+ @common_mocks
+ def test_delete_rbd_image_qos_key(self):
+ ctxt = context.get_admin_context()
+ qos = qos_specs.create(ctxt, 'qos-vers-a', self.qos_policy_a)
+ self.volume_a.volume_type = fake_volume.fake_volume_type_obj(
+ ctxt,
+ id=fake.VOLUME_TYPE_ID,
+ qos_specs_id = qos.id)
+
+ rbd_image = self.mock_proxy.return_value.__enter__.return_value
+
+ keys = ['total_iops_sec']
+ rbd_image.config_remove.return_value = qos_specs.delete_keys(ctxt,
+ qos.id,
+ keys)
+
+ self.driver.delete_rbd_image_qos_keys(self.volume_a, keys)
+
+ self.assertEqual(
+ {'total_bytes_sec': '1024'},
+ self.volume_a.volume_type.qos_specs.specs)
+
+ @common_mocks
def test_update_migrated_volume(self):
client = self.mock_client.return_value
client.__enter__.return_value = client
diff --git a/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py b/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py
index 31119025a..46692ecaa 100644
--- a/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py
+++ b/cinder/tests/unit/zonemanager/test_brcd_fc_zone_driver.py
@@ -92,10 +92,12 @@ class BrcdFcZoneDriverBaseTest(object):
class TestBrcdFcZoneDriver(BrcdFcZoneDriverBaseTest, test.TestCase):
def setUp(self):
+ global GlobalVars
+ GlobalVars = GlobalVarsClass()
+
super(TestBrcdFcZoneDriver, self).setUp()
# setup config for normal flow
self.setup_driver(self.setup_config(True, 1))
- GlobalVars._zone_state = []
def setup_driver(self, config):
self.driver = importutils.import_object(
@@ -269,10 +271,8 @@ class FakeBrcdFCSanLookupService(object):
return device_map
-class GlobalVars(object):
- global _active_cfg
- _active_cfg = {}
- global _zone_state
- _zone_state = list()
- global _is_normal_test
- _is_normal_test = True
+class GlobalVarsClass(object):
+ def __init__(self):
+ self._active_cfg = {}
+ self._zone_state = list()
+ self._is_normal_test = True
diff --git a/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py b/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py
index 859e90d76..1ad9004f0 100644
--- a/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py
+++ b/cinder/tests/unit/zonemanager/test_cisco_fc_zone_driver.py
@@ -43,7 +43,6 @@ _active_cfg_default = {
_activate = True
_zone_name = 'openstack10008c7cff523b0120240002ac000a50'
_target_ns_map = {'100000051e55a100': ['20240002ac000a50']}
-_zoning_status = {'mode': 'basis', 'session': 'none'}
_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']}
_zone_map_to_add = {'openstack10008c7cff523b0120240002ac000a50': (
['10:00:8c:7c:ff:52:3b:01', '20:24:00:02:ac:00:0a:50'])}
@@ -102,10 +101,12 @@ class CiscoFcZoneDriverBaseTest(object):
class TestCiscoFcZoneDriver(CiscoFcZoneDriverBaseTest, test.TestCase):
def setUp(self):
+ global GlobalVars
+ GlobalVars = GlobalVarsClass()
+
super(TestCiscoFcZoneDriver, self).setUp()
# setup config for normal flow
self.setup_driver(self.setup_config(True, 1))
- GlobalVars._zone_state = []
def setup_driver(self, config):
self.driver = importutils.import_object(
@@ -145,6 +146,7 @@ class TestCiscoFcZoneDriver(CiscoFcZoneDriverBaseTest, test.TestCase):
self.setup_driver(self.setup_config(True, 1))
get_zoning_status_mock.return_value = {'mode': 'basis',
'session': 'none'}
+
get_active_zone_set_mock.return_value = _active_cfg_default
self.driver.add_connection('CISCO_FAB_1', _initiator_target_map)
self.assertIn(_zone_name, GlobalVars._zone_state)
@@ -206,7 +208,7 @@ class FakeCiscoFCZoneClientCLI(object):
return _target_ns_map
def get_zoning_status(self):
- return _zoning_status
+ return GlobalVars._zoning_status
def close_connection(self):
pass
@@ -234,12 +236,9 @@ class FakeCiscoFCSanLookupService(object):
return device_map
-class GlobalVars(object):
- global _active_cfg
- _active_cfg = {}
- global _zone_state
- _zone_state = list()
- global _is_normal_test
- _is_normal_test = True
- global _zoning_status
- _zoning_status = {}
+class GlobalVarsClass(object):
+ def __init__(self):
+ self. _active_cfg = {}
+ self._zone_state = list()
+ self._is_normal_test = True
+ self._zoning_status = {}
diff --git a/cinder/tests/unit/zonemanager/test_volume_driver.py b/cinder/tests/unit/zonemanager/test_volume_driver.py
index 7383827bc..135cb23fa 100644
--- a/cinder/tests/unit/zonemanager/test_volume_driver.py
+++ b/cinder/tests/unit/zonemanager/test_volume_driver.py
@@ -34,7 +34,7 @@ class TestVolumeDriver(test.TestCase):
def setUp(self):
super(TestVolumeDriver, self).setUp()
self.driver = fake_driver.FakeFibreChannelDriver()
- brcd_fc_zone_driver.BrcdFCZoneDriver = mock.Mock()
+ self.mock_object(brcd_fc_zone_driver, 'BrcdFCZoneDriver')
self.addCleanup(self._cleanup)
def _cleanup(self):
diff --git a/cinder/transfer/api.py b/cinder/transfer/api.py
index 116bed9a6..115c3880e 100644
--- a/cinder/transfer/api.py
+++ b/cinder/transfer/api.py
@@ -31,6 +31,7 @@ import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
+from cinder.keymgr import transfer as key_transfer
from cinder import objects
from cinder.policies import volume_transfer as policy
from cinder import quota
@@ -76,6 +77,8 @@ class API(base.Base):
"transfer.delete.start")
if volume_ref['status'] != 'awaiting-transfer':
LOG.error("Volume in unexpected state")
+ if volume_ref.encryption_key_id is not None:
+ key_transfer.transfer_delete(context, volume_ref, conf=CONF)
self.db.transfer_destroy(context, transfer_id)
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.delete.end")
@@ -126,16 +129,23 @@ class API(base.Base):
auth_key = auth_key.encode('utf-8')
return hmac.new(salt, auth_key, hashlib.sha1).hexdigest()
- def create(self, context, volume_id, display_name, no_snapshots=False):
+ def create(self, context, volume_id, display_name, no_snapshots=False,
+ allow_encrypted=False):
"""Creates an entry in the transfers table."""
LOG.info("Generating transfer record for volume %s", volume_id)
volume_ref = objects.Volume.get_by_id(context, volume_id)
context.authorize(policy.CREATE_POLICY, target_obj=volume_ref)
if volume_ref['status'] != "available":
raise exception.InvalidVolume(reason=_("status must be available"))
- if volume_ref['encryption_key_id'] is not None:
- raise exception.InvalidVolume(
- reason=_("transferring encrypted volume is not supported"))
+
+ if volume_ref.encryption_key_id is not None:
+ if not allow_encrypted:
+ raise exception.InvalidVolume(
+ reason=_("transferring encrypted volume is not supported"))
+ if no_snapshots:
+ raise exception.InvalidVolume(
+ reason=_("transferring an encrypted volume without its "
+ "snapshots is not supported"))
if not no_snapshots:
snapshots = self.db.snapshot_get_all_for_volume(context, volume_id)
@@ -144,10 +154,6 @@ class API(base.Base):
msg = _("snapshot: %s status must be "
"available") % snapshot['id']
raise exception.InvalidSnapshot(reason=msg)
- if snapshot.get('encryption_key_id'):
- msg = _("snapshot: %s encrypted snapshots cannot be "
- "transferred") % snapshot['id']
- raise exception.InvalidSnapshot(reason=msg)
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.create.start")
@@ -170,6 +176,15 @@ class API(base.Base):
except Exception:
LOG.error("Failed to create transfer record for %s", volume_id)
raise
+
+ if volume_ref.encryption_key_id is not None:
+ try:
+ key_transfer.transfer_create(context, volume_ref, conf=CONF)
+ except Exception:
+ LOG.error("Failed to transfer keys for %s", volume_id)
+ self.db.transfer_destroy(context, transfer.id)
+ raise
+
volume_utils.notify_about_volume_usage(context, volume_ref,
"transfer.create.end")
return {'id': transfer['id'],
@@ -284,6 +299,8 @@ class API(base.Base):
volume_utils.notify_about_volume_usage(context, vol_ref,
"transfer.accept.start")
+
+ encryption_key_transferred = False
try:
# Transfer ownership of the volume now, must use an elevated
# context.
@@ -292,6 +309,10 @@ class API(base.Base):
context.user_id,
context.project_id,
transfer['no_snapshots'])
+ if vol_ref.encryption_key_id is not None:
+ key_transfer.transfer_accept(context, vol_ref, conf=CONF)
+ encryption_key_transferred = True
+
self.db.transfer_accept(context.elevated(),
transfer_id,
context.user_id,
@@ -306,6 +327,11 @@ class API(base.Base):
QUOTAS.commit(context, snap_donor_res, project_id=donor_id)
LOG.info("Volume %s has been transferred.", volume_id)
except Exception:
+ # If an exception occurs after the encryption key was transferred
+ # then we need to transfer the key *back* to the service project.
+ # This is done by making another key transfer request.
+ if encryption_key_transferred:
+ key_transfer.transfer_create(context, vol_ref, conf=CONF)
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
if snap_res:
diff --git a/cinder/utils.py b/cinder/utils.py
index fbc41e27b..9958c5a7f 100644
--- a/cinder/utils.py
+++ b/cinder/utils.py
@@ -715,14 +715,118 @@ def build_or_str(elements: Union[None, str, Iterable[str]],
return elements
+def calculate_capacity_factors(total_capacity: float,
+ free_capacity: float,
+ provisioned_capacity: float,
+ thin_provisioning_support: bool,
+ max_over_subscription_ratio: float,
+ reserved_percentage: int,
+ thin: bool) -> dict:
+ """Create the various capacity factors of the a particular backend.
+
+ Based off of definition of terms
+ cinder-specs/specs/queens/provisioning-improvements.html
+ Description of factors calculated where units of gb are Gibibytes.
+ reserved_capacity - The amount of space reserved from the total_capacity
+ as reported by the backend.
+ total_reserved_available_capacity - The total capacity minus reserved
+ capacity
+ total_available_capacity - The total capacity available to cinder
+ calculated from total_reserved_available_capacity (for thick) OR
+ for thin total_reserved_available_capacity max_over_subscription_ratio
+ calculated_free_capacity - total_available_capacity - provisioned_capacity
+ virtual_free_capacity - The calculated free capacity available to cinder
+ to allocate new storage.
+ For thin: calculated_free_capacity
+ For thick: the reported free_capacity can be less than the calculated
+ capacity, so we use free_capacity - reserved_capacity.
+
+ free_percent - the percentage of the virtual_free_capacity and
+ total_available_capacity is left over
+ provisioned_ratio - The ratio of provisioned storage to
+ total_available_capacity
+
+ :param total_capacity: The reported total capacity in the backend.
+ :type total_capacity: float
+ :param free_capacity: The free space/capacity as reported by the backend.
+ :type free_capacity: float
+ :param provisioned_capacity: as reported by backend or volume manager from
+ allocated_capacity_gb
+ :type provisioned_capacity: float
+ :param thin_provisioning_support: Is thin provisioning supported?
+ :type thin_provisioning_support: bool
+ :param max_over_subscription_ratio: as reported by the backend
+ :type max_over_subscription_ratio: float
+ :param reserved_percentage: the % amount to reserve as unavailable. 0-100
+ :type reserved_percentage: int, 0-100
+ :param thin: calculate based on thin provisioning if enabled by
+ thin_provisioning_support
+ :type thin: bool
+ :return: A dictionary of all of the capacity factors.
+ :rtype: dict
+
+ """
+
+ total = float(total_capacity)
+ reserved = float(reserved_percentage) / 100
+ reserved_capacity = math.floor(total * reserved)
+ total_reserved_available = total - reserved_capacity
+
+ if thin and thin_provisioning_support:
+ total_available_capacity = (
+ total_reserved_available * max_over_subscription_ratio
+ )
+ calculated_free = total_available_capacity - provisioned_capacity
+ virtual_free = calculated_free
+ provisioned_type = 'thin'
+ else:
+ # Calculate how much free space is left after taking into
+ # account the reserved space.
+ total_available_capacity = total_reserved_available
+ calculated_free = total_available_capacity - provisioned_capacity
+ virtual_free = calculated_free
+ if free_capacity < calculated_free:
+ virtual_free = free_capacity
+
+ provisioned_type = 'thick'
+
+ if total_available_capacity:
+ provisioned_ratio = provisioned_capacity / total_available_capacity
+ free_percent = (virtual_free / total_available_capacity) * 100
+ else:
+ provisioned_ratio = 0
+ free_percent = 0
+
+ def _limit(x):
+ """Limit our floating points to 2 decimal places."""
+ return round(x, 2)
+
+ return {
+ "total_capacity": total,
+ "free_capacity": free_capacity,
+ "reserved_capacity": reserved_capacity,
+ "total_reserved_available_capacity": _limit(total_reserved_available),
+ "max_over_subscription_ratio": (
+ max_over_subscription_ratio if provisioned_type == 'thin' else None
+ ),
+ "total_available_capacity": _limit(total_available_capacity),
+ "provisioned_capacity": provisioned_capacity,
+ "calculated_free_capacity": _limit(calculated_free),
+ "virtual_free_capacity": _limit(virtual_free),
+ "free_percent": _limit(free_percent),
+ "provisioned_ratio": _limit(provisioned_ratio),
+ "provisioned_type": provisioned_type
+ }
+
+
def calculate_virtual_free_capacity(total_capacity: float,
free_capacity: float,
provisioned_capacity: float,
thin_provisioning_support: bool,
max_over_subscription_ratio: float,
- reserved_percentage: float,
+ reserved_percentage: int,
thin: bool) -> float:
- """Calculate the virtual free capacity based on thin provisioning support.
+ """Calculate the virtual free capacity based on multiple factors.
:param total_capacity: total_capacity_gb of a host_state or pool.
:param free_capacity: free_capacity_gb of a host_state or pool.
@@ -738,18 +842,16 @@ def calculate_virtual_free_capacity(total_capacity: float,
:returns: the calculated virtual free capacity.
"""
- total = float(total_capacity)
- reserved = float(reserved_percentage) / 100
-
- if thin and thin_provisioning_support:
- free = (total * max_over_subscription_ratio
- - provisioned_capacity
- - math.floor(total * reserved))
- else:
- # Calculate how much free space is left after taking into
- # account the reserved space.
- free = free_capacity - math.floor(total * reserved)
- return free
+ factors = calculate_capacity_factors(
+ total_capacity,
+ free_capacity,
+ provisioned_capacity,
+ thin_provisioning_support,
+ max_over_subscription_ratio,
+ reserved_percentage,
+ thin
+ )
+ return factors["virtual_free_capacity"]
def calculate_max_over_subscription_ratio(
diff --git a/cinder/volume/driver_utils.py b/cinder/volume/driver_utils.py
index d22a265b8..55d3eb095 100644
--- a/cinder/volume/driver_utils.py
+++ b/cinder/volume/driver_utils.py
@@ -54,13 +54,16 @@ class VolumeDriverUtils(object):
if saved successfully return True.
"""
try:
- return self._db.driver_initiator_data_insert_by_key(
+ self._db.driver_initiator_data_insert_by_key(
self._get_context(ctxt),
initiator,
self._data_namespace,
key,
value
)
+ return True
+ except exception.DriverInitiatorDataExists:
+ return False
except exception.CinderException:
LOG.exception("Failed to insert initiator data for"
" initiator %(initiator)s and backend"
diff --git a/cinder/volume/drivers/dell_emc/powerflex/rest_client.py b/cinder/volume/drivers/dell_emc/powerflex/rest_client.py
index 29f4e911a..16fa5bbb6 100644
--- a/cinder/volume/drivers/dell_emc/powerflex/rest_client.py
+++ b/cinder/volume/drivers/dell_emc/powerflex/rest_client.py
@@ -35,8 +35,11 @@ VOLUME_MIGRATION_IN_PROGRESS_ERROR = 717
VOLUME_MIGRATION_ALREADY_ON_DESTINATION_POOL_ERROR = 718
VOLUME_NOT_FOUND_ERROR = 79
OLD_VOLUME_NOT_FOUND_ERROR = 78
+TOO_MANY_SNAPS_ERROR = 182
ILLEGAL_SYNTAX = 0
+MAX_SNAPS_IN_VTREE = 126
+
class RestClient(object):
def __init__(self, configuration, is_primary=True):
@@ -214,6 +217,11 @@ class RestClient(object):
{"vol_name": volume_provider_id,
"response": response["message"]})
LOG.error(msg)
+ # check if the volume reached snapshot limit
+ if ("details" in response and
+ response["details"][0]["rc"] == TOO_MANY_SNAPS_ERROR):
+ raise exception.SnapshotLimitReached(
+ set_limit=MAX_SNAPS_IN_VTREE)
raise exception.VolumeBackendAPIException(data=msg)
return response["volumeIdList"][0]
diff --git a/cinder/volume/drivers/dell_emc/powermax/common.py b/cinder/volume/drivers/dell_emc/powermax/common.py
index 80e28099c..0b7146e1b 100644
--- a/cinder/volume/drivers/dell_emc/powermax/common.py
+++ b/cinder/volume/drivers/dell_emc/powermax/common.py
@@ -551,6 +551,7 @@ class PowerMaxCommon(object):
if group_id is not None:
if group and (volume_utils.is_group_a_cg_snapshot_type(group)
or group.is_replicated):
+ self._find_volume_group(extra_specs[utils.ARRAY], group)
extra_specs[utils.FORCE_VOL_EDIT] = True
group_name = self._add_new_volume_to_volume_group(
volume, device_id, volume_name,
@@ -6288,8 +6289,12 @@ class PowerMaxCommon(object):
:param group: the group object
:returns: volume group dictionary
"""
+ __, interval_retries_dict = self._get_volume_group_info(group)
group_name = self.utils.update_volume_group_name(group)
- volume_group = self.rest.get_storage_group_rep(array, group_name)
+ sg_name_filter = utils.LIKE_FILTER + group.id
+ volume_group = self.rest.get_or_rename_storage_group_rep(
+ array, group_name, interval_retries_dict,
+ sg_filter=sg_name_filter)
if not volume_group:
LOG.warning("Volume group %(group_id)s cannot be found",
{'group_id': group_name})
diff --git a/cinder/volume/drivers/dell_emc/powermax/rest.py b/cinder/volume/drivers/dell_emc/powermax/rest.py
index 0246b65f1..b2dd4d947 100644
--- a/cinder/volume/drivers/dell_emc/powermax/rest.py
+++ b/cinder/volume/drivers/dell_emc/powermax/rest.py
@@ -3140,6 +3140,44 @@ class PowerMaxRest(object):
'src_device': device_id, 'tgt_device': r2_device_id,
'session_info': session_info}
+ def get_or_rename_storage_group_rep(
+ self, array, storage_group_name, extra_specs, sg_filter=None):
+ """Get storage group rep info if it exist.
+
+ If a generic volume group has been renamed we also need
+ to rename it on the array based on the uuid component.
+ We check for uuid if we cannot find it based on its old name.
+
+ :param array: the array serial number
+ :param storage_group_name: the name of the storage group
+ :param extra_specs: extra specification
+ :param sg_filter: uuid substring <like>
+ :returns: storage group dict or None
+ """
+ rep_details = self.get_storage_group_rep(array, storage_group_name)
+ if not rep_details:
+ # It is possible that the group has been renamed
+ if sg_filter:
+ sg_dict = self.get_storage_group_list(
+ array, params={
+ 'storageGroupId': sg_filter})
+ sg_list = sg_dict.get('storageGroupId') if sg_dict else None
+ if sg_list and len(sg_list) == 1:
+ rep_details = self.get_storage_group_rep(
+ array, sg_list[0])
+ # Update the new storage group name
+ if rep_details:
+ self._rename_storage_group(
+ array, sg_list[0], storage_group_name, extra_specs)
+ rep_details = self.get_storage_group_rep(
+ array, storage_group_name)
+ LOG.warning(
+ "Volume group %(old)s has been renamed to %(new)s "
+ "due to a rename operation in OpenStack.",
+ {'old': sg_list[0], 'new': storage_group_name})
+
+ return rep_details
+
def get_storage_group_rep(self, array, storage_group_name):
"""Given a name, return storage group details wrt replication.
@@ -3497,3 +3535,20 @@ class PowerMaxRest(object):
"""
return (self.ucode_major_level >= utils.UCODE_5978 and
self.ucode_minor_level >= utils.UCODE_5978_HICKORY)
+
+ def _rename_storage_group(
+ self, array, old_name, new_name, extra_specs):
+ """Rename the storage group.
+
+ :param array: the array serial number
+ :param old_name: the original name
+ :param new_name: the new name
+ :param extra_specs: the extra specifications
+ """
+ payload = {"editStorageGroupActionParam": {
+ "renameStorageGroupParam": {
+ "new_storage_Group_name": new_name}}}
+ status_code, job = self.modify_storage_group(
+ array, old_name, payload)
+ self.wait_for_job(
+ 'Rename storage group', status_code, job, extra_specs)
diff --git a/cinder/volume/drivers/dell_emc/powermax/utils.py b/cinder/volume/drivers/dell_emc/powermax/utils.py
index eaedb0e53..4c3ce3a16 100644
--- a/cinder/volume/drivers/dell_emc/powermax/utils.py
+++ b/cinder/volume/drivers/dell_emc/powermax/utils.py
@@ -216,6 +216,9 @@ REVERT_SS_EXC = 'Link must be fully copied for this operation to proceed'
IS_TRUE = ['<is> True', 'True', 'true', True]
IS_FALSE = ['<is> False', 'False', 'false', False]
+# <like> filter
+LIKE_FILTER = '<like>'
+
class PowerMaxUtils(object):
"""Utility class for Rest based PowerMax volume drivers.
diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py
index 5a8a02a2a..a855b4aeb 100644
--- a/cinder/volume/drivers/hitachi/hbsd_common.py
+++ b/cinder/volume/drivers/hitachi/hbsd_common.py
@@ -106,8 +106,17 @@ COMMON_VOLUME_OPTS = [
'a copy pair deletion or data restoration.'),
]
+COMMON_PORT_OPTS = [
+ cfg.BoolOpt(
+ 'hitachi_port_scheduler',
+ default=False,
+ help='Enable port scheduling of WWNs to the configured ports so that '
+ 'WWNs are registered to ports in a round-robin fashion.'),
+]
+
CONF = cfg.CONF
CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
+CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
@@ -154,6 +163,7 @@ class HBSDCommon():
self.driver_info['param_prefix'] + '_storage_id',
self.driver_info['param_prefix'] + '_pool',
]
+ self.port_index = {}
def create_ldev(self, size):
"""Create an LDEV and return its LDEV number."""
@@ -468,6 +478,23 @@ class HBSDCommon():
self.raise_error(msg)
return values
+ def check_param_fc(self):
+ """Check FC-related parameter values and consistency among them."""
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_port_scheduler'):
+ self.check_opts(self.conf, COMMON_PORT_OPTS)
+ if (self.conf.hitachi_port_scheduler and
+ not self.conf.hitachi_group_create):
+ msg = utils.output_log(
+ MSG.INVALID_PARAMETER,
+ param=self.driver_info['param_prefix'] + '_port_scheduler')
+ self.raise_error(msg)
+ if (self._lookup_service is None and
+ self.conf.hitachi_port_scheduler):
+ msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
+ self.raise_error(msg)
+
def check_param_iscsi(self):
"""Check iSCSI-related parameter values and consistency among them."""
if self.conf.use_chap_auth:
@@ -505,6 +532,8 @@ class HBSDCommon():
if not self.conf.safe_get(opt):
msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
self.raise_error(msg)
+ if self.storage_info['protocol'] == 'FC':
+ self.check_param_fc()
if self.storage_info['protocol'] == 'iSCSI':
self.check_param_iscsi()
@@ -544,11 +573,33 @@ class HBSDCommon():
resource=self.driver_info['hba_id_type'])
self.raise_error(msg)
+ def set_device_map(self, targets, hba_ids, volume):
+ return None, hba_ids
+
+ def get_port_scheduler_param(self):
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_port_scheduler'):
+ return self.conf.hitachi_port_scheduler
+ else:
+ return False
+
+ def create_target_by_port_scheduler(
+ self, devmap, targets, connector, volume):
+ raise NotImplementedError()
+
def create_target_to_storage(self, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the specified port."""
raise NotImplementedError()
- def set_target_mode(self, port, gid, connector):
+ def get_gid_from_targets(self, targets, port):
+ for target_port, target_gid in targets['list']:
+ if target_port == port:
+ return target_gid
+ msg = utils.output_log(MSG.NO_CONNECTED_TARGET)
+ self.raise_error(msg)
+
+ def set_target_mode(self, port, gid):
"""Configure the target to meet the environment."""
raise NotImplementedError()
@@ -560,35 +611,55 @@ class HBSDCommon():
"""Delete the host group or the iSCSI target from the port."""
raise NotImplementedError()
- def _create_target(self, targets, port, connector, hba_ids):
+ def set_target_map_info(self, targets, hba_ids, port):
+ pass
+
+ def create_target(self, targets, port, connector, hba_ids):
"""Create a host group or an iSCSI target on the storage port."""
- target_name, gid = self.create_target_to_storage(
- port, connector, hba_ids)
- utils.output_log(MSG.OBJECT_CREATED, object='a target',
- details='port: %(port)s, gid: %(gid)s, target_name: '
- '%(target)s' %
- {'port': port, 'gid': gid, 'target': target_name})
+ if port not in targets['info'] or not targets['info'][port]:
+ target_name, gid = self.create_target_to_storage(
+ port, connector, hba_ids)
+ utils.output_log(
+ MSG.OBJECT_CREATED,
+ object='a target',
+ details='port: %(port)s, gid: %(gid)s, target_name: '
+ '%(target)s' %
+ {'port': port, 'gid': gid, 'target': target_name})
+ else:
+ gid = self.get_gid_from_targets(targets, port)
try:
- self.set_target_mode(port, gid, connector)
+ if port not in targets['info'] or not targets['info'][port]:
+ self.set_target_mode(port, gid)
self.set_hba_ids(port, gid, hba_ids)
except Exception:
with excutils.save_and_reraise_exception():
self.delete_target_from_storage(port, gid)
targets['info'][port] = True
- targets['list'].append((port, gid))
+ if (port, gid) not in targets['list']:
+ targets['list'].append((port, gid))
+ self.set_target_map_info(targets, hba_ids, port)
- def create_mapping_targets(self, targets, connector):
+ def create_mapping_targets(self, targets, connector, volume=None):
"""Create server-storage connection for all specified storage ports."""
+ active_hba_ids = []
hba_ids = self.get_hba_ids_from_connector(connector)
- for port in targets['info'].keys():
- if targets['info'][port]:
- continue
- try:
- self._create_target(targets, port, connector, hba_ids)
- except exception.VolumeDriverException:
- utils.output_log(
- self.driver_info['msg_id']['target'], port=port)
+ devmap, active_hba_ids = self.set_device_map(targets, hba_ids, volume)
+
+ if self.get_port_scheduler_param():
+ self.create_target_by_port_scheduler(
+ devmap, targets, connector, volume)
+ else:
+ for port in targets['info'].keys():
+ if targets['info'][port]:
+ continue
+
+ try:
+ self.create_target(
+ targets, port, connector, active_hba_ids)
+ except exception.VolumeDriverException:
+ utils.output_log(
+ self.driver_info['msg_id']['target'], port=port)
# When other threads created a host group at same time, need to
# re-find targets.
@@ -596,6 +667,20 @@ class HBSDCommon():
self.find_targets_from_storage(
targets, connector, targets['info'].keys())
+ def get_port_index_to_be_used(self, ports, network_name):
+ backend_name = self.conf.safe_get('volume_backend_name')
+ code = (
+ str(self.conf.hitachi_storage_id) + backend_name + network_name)
+ if code in self.port_index.keys():
+ if self.port_index[code] >= len(ports) - 1:
+ self.port_index[code] = 0
+ else:
+ self.port_index[code] += 1
+ else:
+ self.port_index[code] = 0
+
+ return self.port_index[code]
+
def init_cinder_hosts(self, **kwargs):
"""Initialize server-storage connection."""
targets = kwargs.pop(
@@ -725,7 +810,7 @@ class HBSDCommon():
return {
'driver_volume_type': self.driver_info['volume_type'],
'data': self.get_properties(targets, target_lun, connector),
- }
+ }, targets['target_map']
def get_target_ports(self, connector):
"""Return a list of ports corresponding to the specified connector."""
@@ -818,6 +903,9 @@ class HBSDCommon():
return filtered_tps
+ def clean_mapping_targets(self, targets):
+ raise NotImplementedError()
+
def unmanage_snapshot(self, snapshot):
"""Output error message and raise NotImplementedError."""
utils.output_log(
@@ -895,3 +983,7 @@ class HBSDCommon():
"""Raise a VolumeDriverException by driver busy message."""
message = _(utils.BUSY_MESSAGE)
raise exception.VolumeDriverException(message)
+
+ def is_controller(self, connector):
+ return True if (
+ 'ip' in connector and connector['ip'] == CONF.my_ip) else False
diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py
index 2a30bd4e3..899eae8c8 100644
--- a/cinder/volume/drivers/hitachi/hbsd_fc.py
+++ b/cinder/volume/drivers/hitachi/hbsd_fc.py
@@ -70,6 +70,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
2.2.0 - Add maintenance parameters.
2.2.1 - Make the parameters name variable for supporting OEM storages.
2.2.2 - Add Target Port Assignment.
+ 2.2.3 - Add port scheduler.
"""
@@ -86,6 +87,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
super(HBSDFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
+ self.configuration.append_config_values(common.COMMON_PORT_OPTS)
self.configuration.append_config_values(rest_fc.FC_VOLUME_OPTS)
os.environ['LANG'] = 'C'
self.common = self._init_common(self.configuration, kwargs.get('db'))
@@ -101,6 +103,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
['driver_ssl_cert_verify', 'driver_ssl_cert_path',
'san_api_port', ]))
return (common.COMMON_VOLUME_OPTS +
+ common.COMMON_PORT_OPTS +
rest.REST_VOLUME_OPTS +
rest_fc.FC_VOLUME_OPTS +
additional_opts)
diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py
index cb58b0de1..e121481de 100644
--- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py
+++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py
@@ -70,6 +70,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
2.2.0 - Add maintenance parameters.
2.2.1 - Make the parameters name variable for supporting OEM storages.
2.2.2 - Add Target Port Assignment.
+ 2.2.3 - Add port scheduler.
"""
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest.py b/cinder/volume/drivers/hitachi/hbsd_rest.py
index b7b4865b5..af4e0d9fe 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest.py
@@ -568,7 +568,7 @@ class HBSDREST(common.HBSDCommon):
if (self.find_targets_from_storage(
targets, connector, target_ports) and
self.conf.hitachi_group_create):
- self.create_mapping_targets(targets, connector)
+ self.create_mapping_targets(targets, connector, volume)
self.require_target_existed(targets)
@@ -644,7 +644,7 @@ class HBSDREST(common.HBSDCommon):
{'port': port, 'gid': gid})
return result
- def _clean_mapping_targets(self, targets):
+ def clean_mapping_targets(self, targets):
"""Delete the empty host group without LU."""
deleted_targets = []
for target in targets['list']:
@@ -681,7 +681,7 @@ class HBSDREST(common.HBSDCommon):
self.unmap_ldev(unmap_targets, ldev)
if self.conf.hitachi_group_delete:
- deleted_targets = self._clean_mapping_targets(unmap_targets)
+ deleted_targets = self.clean_mapping_targets(unmap_targets)
return deleted_targets
def find_all_mapped_targets_from_storage(self, targets, ldev):
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest_api.py b/cinder/volume/drivers/hitachi/hbsd_rest_api.py
index 56c68e97c..118b9db6d 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest_api.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest_api.py
@@ -377,7 +377,7 @@ class RestApiClient():
message, errobj=errobj)
return False, rsp_body, errobj
else:
- LOG.debug("The resource group to which the operation object ",
+ LOG.debug("The resource group to which the operation object "
"belongs is being locked by other software.")
return True, rsp_body, errobj
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest_fc.py b/cinder/volume/drivers/hitachi/hbsd_rest_fc.py
index 330eaa75f..6006afa02 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest_fc.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest_fc.py
@@ -16,10 +16,12 @@
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils import excutils
from cinder import exception
from cinder.volume import configuration
from cinder.volume.drivers.hitachi import hbsd_rest as rest
+from cinder.volume.drivers.hitachi import hbsd_rest_api as rest_api
from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.zonemanager import utils as fczm_utils
@@ -34,6 +36,8 @@ FC_VOLUME_OPTS = [
_FC_HMO_DISABLE_IO = 91
+_MSG_EXCEED_HOST_GROUP_MAX = "could not find empty Host group ID for adding."
+
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
@@ -69,10 +73,22 @@ class HBSDRESTFC(rest.HBSDREST):
if port not in set(target_ports + compute_target_ports):
continue
secure_fc_port = True
+ can_port_schedule = True
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_port_scheduler'):
+ port_scheduler_param = self.conf.hitachi_port_scheduler
+ else:
+ port_scheduler_param = False
if (port_data['portType'] not in ['FIBRE', 'FCoE'] or
not port_data['lunSecuritySetting']):
secure_fc_port = False
- if not secure_fc_port:
+ elif (port in set(target_ports + compute_target_ports) and
+ port_scheduler_param and not (
+ port_data.get('fabricMode') and
+ port_data.get('portConnection') == 'PtoP')):
+ can_port_schedule = False
+ if not secure_fc_port or not can_port_schedule:
utils.output_log(
MSG.INVALID_PORT, port=port,
additional_info='portType: %s, lunSecuritySetting: %s, '
@@ -84,10 +100,11 @@ class HBSDRESTFC(rest.HBSDREST):
if not secure_fc_port:
continue
wwn = port_data.get('wwn')
- if target_ports and port in target_ports:
+ if target_ports and port in target_ports and can_port_schedule:
available_ports.append(port)
self.storage_info['wwns'][port] = wwn
- if compute_target_ports and port in compute_target_ports:
+ if (compute_target_ports and port in compute_target_ports and
+ can_port_schedule):
available_compute_ports.append(port)
self.storage_info['wwns'][port] = wwn
@@ -136,20 +153,21 @@ class HBSDRESTFC(rest.HBSDREST):
try:
self.client.add_hba_wwn(port, gid, wwn, no_log=True)
registered_wwns.append(wwn)
- except exception.VolumeDriverException:
+ except exception.VolumeDriverException as ex:
utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
wwn=wwn)
+ if (self.get_port_scheduler_param() and
+ utils.safe_get_err_code(ex.kwargs.get('errobj'))
+ == rest_api.EXCEED_WWN_MAX):
+ raise ex
if not registered_wwns:
msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
gid=gid)
self.raise_error(msg)
- def set_target_mode(self, port, gid, connector):
+ def set_target_mode(self, port, gid):
"""Configure the host group to meet the environment."""
- if connector.get('os_type', None) == 'aix':
- body = {'hostMode': 'AIX'}
- else:
- body = {'hostMode': 'LINUX/IRIX'}
+ body = {'hostMode': 'LINUX/IRIX'}
if self.conf.hitachi_rest_disable_io_wait:
body['hostModeOptions'] = [_FC_HMO_DISABLE_IO]
if self.conf.hitachi_host_mode_options:
@@ -240,16 +258,34 @@ class HBSDRESTFC(rest.HBSDREST):
pass
else:
not_found_count += 1
+
+ if self.get_port_scheduler_param():
+ """
+ When port scheduler feature is enabled,
+ it is OK to find any mapped port. so:
+ - return 0, if any mapped port is found
+ - return port count, if no mapped port is found.
+ It is no case with both not_found_count and len(target_ports) are
+ zero, bcz it must be failed in param checker if any target ports
+ are not defined.
+ """
+ return (not_found_count if not_found_count == len(target_ports)
+ else 0)
+
return not_found_count
def initialize_connection(self, volume, connector, is_snapshot=False):
"""Initialize connection between the server and the volume."""
- conn_info = super(HBSDRESTFC, self).initialize_connection(
+ conn_info, map_info = super(HBSDRESTFC, self).initialize_connection(
volume, connector, is_snapshot)
if self.conf.hitachi_zoning_request:
- init_targ_map = utils.build_initiator_target_map(
- connector, conn_info['data']['target_wwn'],
- self._lookup_service)
+ if (self.get_port_scheduler_param() and
+ not self.is_controller(connector)):
+ init_targ_map = map_info
+ else:
+ init_targ_map = utils.build_initiator_target_map(
+ connector, conn_info['data']['target_wwn'],
+ self._lookup_service)
if init_targ_map:
conn_info['data']['initiator_target_map'] = init_targ_map
fczm_utils.add_fc_zone(conn_info)
@@ -284,3 +320,115 @@ class HBSDRESTFC(rest.HBSDREST):
for hostgroup in hostgroups:
wwpns.update(self._get_wwpns(port, hostgroup))
fake_connector['wwpns'] = list(wwpns)
+
+ def set_device_map(self, targets, hba_ids, volume):
+ active_hba_ids = []
+ target_wwns = []
+ active_target_wwns = []
+ vol_id = volume['id'] if volume and 'id' in volume.keys() else ""
+
+ if not self.get_port_scheduler_param():
+ return None, hba_ids
+
+ for port in targets['info'].keys():
+ target_wwns.append(self.storage_info['wwns'][port])
+
+ devmap = self._lookup_service.get_device_mapping_from_network(
+ hba_ids, target_wwns)
+
+ for fabric_name in devmap.keys():
+ active_hba_ids.extend(
+ devmap[fabric_name]['initiator_port_wwn_list'])
+ active_target_wwns.extend(
+ devmap[fabric_name]['target_port_wwn_list'])
+
+ active_hba_ids = list(set(active_hba_ids))
+ if not active_hba_ids:
+ msg = utils.output_log(MSG.NO_ACTIVE_WWN, wwn=', '.join(hba_ids),
+ volume=vol_id)
+ self.raise_error(msg)
+
+ active_target_wwns = list(set(active_target_wwns))
+ if not active_target_wwns:
+ port_wwns = ""
+ for port in targets['info'].keys():
+ if port_wwns:
+ port_wwns += ", "
+ port_wwns += ("port, WWN: " + port +
+ ", " + self.storage_info['wwns'][port])
+ msg = utils.output_log(
+ MSG.NO_PORT_WITH_ACTIVE_WWN, port_wwns=port_wwns,
+ volume=vol_id)
+ self.raise_error(msg)
+
+ return devmap, active_hba_ids
+
+ def build_wwpn_groups(self, wwpns, connector):
+ count = 1
+ return ([wwpns[i:i + count] for i in range(0, len(wwpns), count)])
+
+ def _create_target_to_any_port(
+ self, targets, ports, connector, hba_ids, fabric_name):
+ for port in ports:
+ index = self.get_port_index_to_be_used(ports, fabric_name)
+ try:
+ self.create_target(
+ targets, ports[index], connector, hba_ids)
+ return
+ except exception.VolumeDriverException as ex:
+ if ((utils.safe_get_message_id(ex.kwargs.get('errobj'))
+ == rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST)
+ or (_MSG_EXCEED_HOST_GROUP_MAX
+ in utils.safe_get_message(ex.kwargs.get('errobj')))):
+ utils.output_log(
+ MSG.HOST_GROUP_NUMBER_IS_MAXIMUM, port=ports[index])
+ elif (utils.safe_get_err_code(ex.kwargs.get('errobj'))
+ == rest_api.EXCEED_WWN_MAX):
+ utils.output_log(
+ MSG.WWN_NUMBER_IS_MAXIMUM, port=ports[index],
+ wwn=", ". join(hba_ids))
+ else:
+ raise ex
+
+ msg = utils.output_log(
+ MSG.HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE, ports=', '.join(ports))
+ self.raise_error(msg)
+
+ def create_target_by_port_scheduler(
+ self, devmap, targets, connector, volume):
+ available_ports = []
+ active_ports = []
+
+ if not devmap:
+ msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
+ self.raise_error(msg)
+ for fabric_name in devmap.keys():
+ available_ports = []
+ active_ports = []
+ active_initiator_wwns = devmap[
+ fabric_name]['initiator_port_wwn_list']
+ wwpn_groups = self.build_wwpn_groups(
+ active_initiator_wwns, connector)
+ for port, wwn in self.storage_info['wwns'].items():
+ if wwn in devmap[fabric_name]['target_port_wwn_list']:
+ available_ports.append(port)
+ target_ports = self.get_target_ports(connector)
+ filter_ports = self.filter_target_ports(target_ports, volume)
+ for port in target_ports:
+ if port in available_ports and port in filter_ports:
+ active_ports.append(port)
+ elif port not in available_ports and port in filter_ports:
+ utils.output_log(
+ MSG.INVALID_PORT_BY_ZONE_MANAGER, port=port)
+ for wwpns in wwpn_groups:
+ try:
+ self._create_target_to_any_port(
+ targets, active_ports, connector, wwpns, fabric_name)
+ except exception.VolumeDriverException:
+ with excutils.save_and_reraise_exception():
+ self.clean_mapping_targets(targets)
+
+ def set_target_map_info(self, targets, hba_ids, port):
+ for hba_id in hba_ids:
+ target_map = {hba_id: [self.storage_info['wwns'][port]]}
+ targets['target_map'].update(target_map)
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py
index a580bde9e..2b3224fdf 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py
@@ -116,13 +116,9 @@ class HBSDRESTISCSI(rest.HBSDREST):
"""Connect the specified HBA with the specified port."""
self.client.add_hba_iscsi(port, gid, hba_ids)
- def set_target_mode(self, port, gid, connector):
+ def set_target_mode(self, port, gid):
"""Configure the iSCSI target to meet the environment."""
- if connector.get('os_type', None) == 'aix':
- host_mode = 'AIX'
- else:
- host_mode = 'LINUX/IRIX'
- body = {'hostMode': host_mode,
+ body = {'hostMode': 'LINUX/IRIX',
'hostModeOptions': [_ISCSI_HMO_REPORT_FULL_PORTAL]}
if self.conf.hitachi_rest_disable_io_wait:
body['hostModeOptions'].append(_ISCSI_HMO_DISABLE_IO)
@@ -204,6 +200,12 @@ class HBSDRESTISCSI(rest.HBSDREST):
not_found_count += 1
return not_found_count
+ def initialize_connection(self, volume, connector, is_snapshot=False):
+ """Initialize connection between the server and the volume."""
+ conn_info, map_info = super(HBSDRESTISCSI, self).initialize_connection(
+ volume, connector, is_snapshot)
+ return conn_info
+
def get_properties_iscsi(self, targets, multipath):
"""Return iSCSI-specific server-LDEV connection info."""
if not multipath:
diff --git a/cinder/volume/drivers/hitachi/hbsd_utils.py b/cinder/volume/drivers/hitachi/hbsd_utils.py
index 0d0f95ebb..6c16b306d 100644
--- a/cinder/volume/drivers/hitachi/hbsd_utils.py
+++ b/cinder/volume/drivers/hitachi/hbsd_utils.py
@@ -25,7 +25,7 @@ from oslo_utils import units
from cinder import exception
-VERSION = '2.2.2'
+VERSION = '2.2.3'
CI_WIKI_NAME = 'Hitachi_VSP_CI'
PARAM_PREFIX = 'hitachi'
VENDOR_NAME = 'Hitachi'
@@ -184,6 +184,21 @@ class HBSDMsg(enum.Enum):
'%(volume_type)s)',
'suffix': WARNING_SUFFIX,
}
+ HOST_GROUP_NUMBER_IS_MAXIMUM = {
+ 'msg_id': 335,
+ 'loglevel': base_logging.WARNING,
+ 'msg': 'Failed to create the host group because the host group '
+ 'maximum of the port is exceeded. (port: %(port)s)',
+ 'suffix': WARNING_SUFFIX,
+ }
+ WWN_NUMBER_IS_MAXIMUM = {
+ 'msg_id': 336,
+ 'loglevel': base_logging.WARNING,
+ 'msg': 'Failed to add the wwns to the host group port because the '
+ 'WWN maximum of the port is exceeded. '
+ '(port: %(port)s, WWN: %(wwn)s)',
+ 'suffix': WARNING_SUFFIX,
+ }
INVALID_PORT = {
'msg_id': 339,
'loglevel': base_logging.WARNING,
@@ -191,6 +206,13 @@ class HBSDMsg(enum.Enum):
'invalid. (%(additional_info)s)',
'suffix': WARNING_SUFFIX,
}
+ INVALID_PORT_BY_ZONE_MANAGER = {
+ 'msg_id': 340,
+ 'loglevel': base_logging.WARNING,
+ 'msg': 'Port %(port)s will not be used because it is not considered '
+ 'to be active by the Fibre Channel Zone Manager.',
+ 'suffix': WARNING_SUFFIX,
+ }
STORAGE_COMMAND_FAILED = {
'msg_id': 600,
'loglevel': base_logging.ERROR,
@@ -427,6 +449,36 @@ class HBSDMsg(enum.Enum):
'%(group_type)s, volume: %(volume)s, snapshot: %(snapshot)s)',
'suffix': ERROR_SUFFIX,
}
+ NO_ACTIVE_WWN = {
+ 'msg_id': 747,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to initialize volume connection because no active WWN '
+ 'was found for the connector. (WWN: %(wwn)s, volume: %(volume)s'
+ ')',
+ 'suffix': ERROR_SUFFIX,
+ }
+ NO_PORT_WITH_ACTIVE_WWN = {
+ 'msg_id': 748,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to initialize volume connection because no port with '
+ 'an active WWN was found. (%(port_wwns)s, volume: %(volume)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
+ ZONE_MANAGER_IS_NOT_AVAILABLE = {
+ 'msg_id': 749,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'The Fibre Channel Zone Manager is not available. The Fibre '
+ 'Channel Zone Manager must be up and running when '
+ 'port_scheduler parameter is set to True.',
+ 'suffix': ERROR_SUFFIX,
+ }
+ HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE = {
+ 'msg_id': 750,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to initialize volume connection because no available '
+ 'resource of host group or wwn was found. (ports: %(ports)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
def __init__(self, error_info):
"""Initialize Enum attributes."""
@@ -514,6 +566,12 @@ def safe_get_message_id(errobj):
return errobj.get('messageId', '')
+def safe_get_message(errobj):
+ if not errobj:
+ return ''
+ return errobj.get('message', '')
+
+
def is_shared_connection(volume, connector):
"""Check if volume is multiattach to 1 node."""
connection_count = 0
diff --git a/cinder/volume/drivers/hpe/hpe_3par_base.py b/cinder/volume/drivers/hpe/hpe_3par_base.py
index aa0ebf379..497fb7f36 100644
--- a/cinder/volume/drivers/hpe/hpe_3par_base.py
+++ b/cinder/volume/drivers/hpe/hpe_3par_base.py
@@ -237,6 +237,20 @@ class HPE3PARDriverBase(driver.ManageableVD,
return self.common.unmanage_snapshot(snapshot)
@volume_utils.trace
+ def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
+ sort_keys, sort_dirs):
+ return self.common.get_manageable_volumes(cinder_volumes, marker,
+ limit, offset, sort_keys,
+ sort_dirs)
+
+ @volume_utils.trace
+ def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
+ sort_keys, sort_dirs):
+ return self.common.get_manageable_snapshots(cinder_snapshots, marker,
+ limit, offset, sort_keys,
+ sort_dirs)
+
+ @volume_utils.trace
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
common = self._login()
diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py
index 584200c86..ba4d4ea5c 100644
--- a/cinder/volume/drivers/hpe/hpe_3par_common.py
+++ b/cinder/volume/drivers/hpe/hpe_3par_common.py
@@ -298,11 +298,13 @@ class HPE3PARCommon(object):
4.0.14 - Added Peer Persistence feature
4.0.15 - Support duplicated FQDN in network. Bug #1834695
4.0.16 - In multi host env, fix multi-detach operation. Bug #1958122
+ 4.0.17 - Added get_manageable_volumes and get_manageable_snapshots.
+ Bug #1819903
"""
- VERSION = "4.0.16"
+ VERSION = "4.0.17"
stats = {}
@@ -1223,6 +1225,105 @@ class HPE3PARCommon(object):
'vol': snap_name,
'new': new_snap_name})
+ def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
+ sort_keys, sort_dirs):
+ already_managed = {}
+ for vol_obj in cinder_volumes:
+ cinder_id = vol_obj.id
+ volume_name = self._get_3par_vol_name(cinder_id)
+ already_managed[volume_name] = cinder_id
+
+ cinder_cpg = self._client_conf['hpe3par_cpg'][0]
+
+ manageable_vols = []
+
+ body = self.client.getVolumes()
+ all_volumes = body['members']
+ for vol in all_volumes:
+ cpg = vol.get('userCPG')
+ if cpg == cinder_cpg:
+ size_gb = int(vol['sizeMiB'] / 1024)
+ vol_name = vol['name']
+ if vol_name in already_managed:
+ is_safe = False
+ reason_not_safe = _('Volume already managed')
+ cinder_id = already_managed[vol_name]
+ else:
+ is_safe = False
+ hostname = None
+ cinder_id = None
+ # Check if the unmanaged volume is attached to any host
+ try:
+ vlun = self.client.getVLUN(vol_name)
+ hostname = vlun['hostname']
+ except hpe3parclient.exceptions.HTTPNotFound:
+ # not attached to any host
+ is_safe = True
+
+ if is_safe:
+ reason_not_safe = None
+ else:
+ reason_not_safe = _('Volume attached to host ' +
+ hostname)
+
+ manageable_vols.append({
+ 'reference': {'name': vol_name},
+ 'size': size_gb,
+ 'safe_to_manage': is_safe,
+ 'reason_not_safe': reason_not_safe,
+ 'cinder_id': cinder_id,
+ })
+
+ return volume_utils.paginate_entries_list(
+ manageable_vols, marker, limit, offset, sort_keys, sort_dirs)
+
+ def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
+ sort_keys, sort_dirs):
+ already_managed = {}
+ for snap_obj in cinder_snapshots:
+ cinder_snap_id = snap_obj.id
+ snap_name = self._get_3par_snap_name(cinder_snap_id)
+ already_managed[snap_name] = cinder_snap_id
+
+ cinder_cpg = self._client_conf['hpe3par_cpg'][0]
+
+ cpg_volumes = []
+
+ body = self.client.getVolumes()
+ all_volumes = body['members']
+ for vol in all_volumes:
+ cpg = vol.get('userCPG')
+ if cpg == cinder_cpg:
+ cpg_volumes.append(vol)
+
+ manageable_snaps = []
+
+ for vol in cpg_volumes:
+ size_gb = int(vol['sizeMiB'] / 1024)
+ snapshots = self.client.getSnapshotsOfVolume(cinder_cpg,
+ vol['name'])
+ for snap_name in snapshots:
+ if snap_name in already_managed:
+ is_safe = False
+ reason_not_safe = _('Snapshot already managed')
+ cinder_snap_id = already_managed[snap_name]
+ else:
+ is_safe = True
+ reason_not_safe = None
+ cinder_snap_id = None
+
+ manageable_snaps.append({
+ 'reference': {'name': snap_name},
+ 'size': size_gb,
+ 'safe_to_manage': is_safe,
+ 'reason_not_safe': reason_not_safe,
+ 'cinder_id': cinder_snap_id,
+ 'source_reference': {'name': vol['name']},
+ })
+
+ return volume_utils.paginate_entries_list(
+ manageable_snaps, marker, limit, offset, sort_keys, sort_dirs)
+
def _get_existing_volume_ref_name(self, existing_ref, is_snapshot=False):
"""Returns the volume name of an existing reference.
diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
index b1e6fb6d1..e9ba9d0c1 100644
--- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
+++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
@@ -3280,6 +3280,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
self._vdiskcopyops = {}
self._vdiskcopyops_loop = None
self.protocol = None
+ self._storwize_portset = self.configuration.storwize_portset
self._master_state = {'storage_nodes': {},
'enabled_protocols': set(),
'compression_enabled': False,
@@ -3381,7 +3382,8 @@ class StorwizeSVCCommonDriver(san.SanDriver,
state['storage_nodes'] = helper.get_node_info()
# Add the iSCSI IP addresses and WWPNs to the storage node info
- helper.add_iscsi_ip_addrs(state['storage_nodes'], state['code_level'])
+ helper.add_iscsi_ip_addrs(state['storage_nodes'], state['code_level'],
+ portset=self._storwize_portset)
helper.add_fc_wwpns(state['storage_nodes'], state['code_level'])
# For each node, check what connection modes it supports. Delete any
diff --git a/cinder/volume/drivers/infinidat.py b/cinder/volume/drivers/infinidat.py
index 0dae912b1..68b988d4e 100644
--- a/cinder/volume/drivers/infinidat.py
+++ b/cinder/volume/drivers/infinidat.py
@@ -1,4 +1,4 @@
-# Copyright 2016 Infinidat Ltd.
+# Copyright 2022 Infinidat Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -16,19 +16,23 @@
from contextlib import contextmanager
import functools
+import math
import platform
import socket
from unittest import mock
+import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from cinder.common import constants
+from cinder import context as cinder_context
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder import interface
+from cinder import objects
from cinder.objects import fields
from cinder import version
from cinder.volume import configuration
@@ -117,10 +121,14 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
1.4 - added support for QoS
1.5 - added support for volume compression
1.6 - added support for volume multi-attach
+ 1.7 - fixed iSCSI to return all portals
+ 1.8 - added revert to snapshot
+ 1.9 - added manage/unmanage/manageable-list volume/snapshot
+ 1.10 - added support for TLS/SSL communication
"""
- VERSION = '1.6'
+ VERSION = '1.10'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "INFINIDAT_CI"
@@ -137,11 +145,14 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
'chap_username', 'chap_password', 'san_thin_provision',
'use_multipath_for_image_xfer', 'enforce_multipath_for_image_xfer',
'num_volume_device_scan_tries', 'volume_dd_blocksize',
+ 'driver_use_ssl', 'suppress_requests_ssl_warnings',
'max_over_subscription_ratio')
return infinidat_opts + additional_opts
- def _setup_and_get_system_object(self, management_address, auth):
- system = infinisdk.InfiniBox(management_address, auth=auth)
+ def _setup_and_get_system_object(self, management_address, auth,
+ use_ssl=False):
+ system = infinisdk.InfiniBox(management_address, auth=auth,
+ use_ssl=use_ssl)
system.api.add_auto_retry(
lambda e: isinstance(
e, infinisdk.core.exceptions.APITransportFailure) and
@@ -158,9 +169,10 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
raise exception.VolumeDriverException(message=msg)
auth = (self.configuration.san_login,
self.configuration.san_password)
+ use_ssl = self.configuration.driver_use_ssl
self.management_address = self.configuration.san_ip
- self._system = (
- self._setup_and_get_system_object(self.management_address, auth))
+ self._system = self._setup_and_get_system_object(
+ self.management_address, auth, use_ssl=use_ssl)
backend_name = self.configuration.safe_get('volume_backend_name')
self._backend_name = backend_name or self.__class__.__name__
self._volume_stats = None
@@ -221,21 +233,41 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
"host.created_by": _INFINIDAT_CINDER_IDENTIFIER}
infinidat_object.set_metadata_from_dict(data)
+ def _get_infinidat_dataset_by_ref(self, existing_ref):
+ if 'source-id' in existing_ref:
+ kwargs = dict(id=existing_ref['source-id'])
+ elif 'source-name' in existing_ref:
+ kwargs = dict(name=existing_ref['source-name'])
+ else:
+ reason = _('dataset reference must contain '
+ 'source-id or source-name key')
+ raise exception.ManageExistingInvalidReference(
+ existing_ref=existing_ref, reason=reason)
+ return self._system.volumes.safe_get(**kwargs)
+
+ def _get_infinidat_volume_by_ref(self, existing_ref):
+ infinidat_volume = self._get_infinidat_dataset_by_ref(existing_ref)
+ if infinidat_volume is None:
+ raise exception.VolumeNotFound(volume_id=existing_ref)
+ return infinidat_volume
+
+ def _get_infinidat_snapshot_by_ref(self, existing_ref):
+ infinidat_snapshot = self._get_infinidat_dataset_by_ref(existing_ref)
+ if infinidat_snapshot is None:
+ raise exception.SnapshotNotFound(snapshot_id=existing_ref)
+ if not infinidat_snapshot.is_snapshot():
+ reason = (_('reference %(existing_ref)s is a volume')
+ % {'existing_ref': existing_ref})
+ raise exception.InvalidSnapshot(reason=reason)
+ return infinidat_snapshot
+
def _get_infinidat_volume_by_name(self, name):
- volume = self._system.volumes.safe_get(name=name)
- if volume is None:
- msg = _('Volume "%s" not found') % name
- LOG.error(msg)
- raise exception.InvalidVolume(reason=msg)
- return volume
+ ref = {'source-name': name}
+ return self._get_infinidat_volume_by_ref(ref)
def _get_infinidat_snapshot_by_name(self, name):
- snapshot = self._system.volumes.safe_get(name=name)
- if snapshot is None:
- msg = _('Snapshot "%s" not found') % name
- LOG.error(msg)
- raise exception.InvalidSnapshot(reason=msg)
- return snapshot
+ ref = {'source-name': name}
+ return self._get_infinidat_snapshot_by_ref(ref)
def _get_infinidat_volume(self, cinder_volume):
volume_name = self._make_volume_name(cinder_volume)
@@ -258,9 +290,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
group_name = self._make_cg_name(cinder_group)
infinidat_cg = self._system.cons_groups.safe_get(name=group_name)
if infinidat_cg is None:
- msg = _('Consistency group "%s" not found') % group_name
- LOG.error(msg)
- raise exception.InvalidGroup(message=msg)
+ raise exception.GroupNotFound(group_id=group_name)
return infinidat_cg
def _get_or_create_host(self, port):
@@ -336,8 +366,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
yield str(port.get_wwpn())
def _initialize_connection_fc(self, volume, connector):
- volume_name = self._make_volume_name(volume)
- infinidat_volume = self._get_infinidat_volume_by_name(volume_name)
+ infinidat_volume = self._get_infinidat_volume(volume)
ports = [wwn.WWN(wwpn) for wwpn in connector['wwpns']]
for port in ports:
infinidat_host = self._get_or_create_host(port)
@@ -366,19 +395,19 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
raise exception.VolumeDriverException(message=msg)
return netspace
- def _get_iscsi_portal(self, netspace):
- for netpsace_interface in netspace.get_ips():
- if netpsace_interface.enabled:
- port = netspace.get_properties().iscsi_tcp_port
- return "%s:%s" % (netpsace_interface.ip_address, port)
+ def _get_iscsi_portals(self, netspace):
+ port = netspace.get_properties().iscsi_tcp_port
+ portals = ["%s:%s" % (interface.ip_address, port) for interface
+ in netspace.get_ips() if interface.enabled]
+ if portals:
+ return portals
# if we get here it means there are no enabled ports
msg = (_('No available interfaces in iSCSI network space %s') %
netspace.get_name())
raise exception.VolumeDriverException(message=msg)
def _initialize_connection_iscsi(self, volume, connector):
- volume_name = self._make_volume_name(volume)
- infinidat_volume = self._get_infinidat_volume_by_name(volume_name)
+ infinidat_volume = self._get_infinidat_volume(volume)
port = iqn.IQN(connector['initiator'])
infinidat_host = self._get_or_create_host(port)
if self.configuration.use_chap_auth:
@@ -399,9 +428,11 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
target_luns = []
for netspace_name in netspace_names:
netspace = self._get_iscsi_network_space(netspace_name)
- target_portals.append(self._get_iscsi_portal(netspace))
- target_iqns.append(netspace.get_properties().iscsi_iqn)
- target_luns.append(lun)
+ netspace_portals = self._get_iscsi_portals(netspace)
+ target_portals.extend(netspace_portals)
+ target_iqns.extend([netspace.get_properties().iscsi_iqn] *
+ len(netspace_portals))
+ target_luns.extend([lun] * len(netspace_portals))
result_data = dict(target_discovered=True,
target_portal=target_portals[0],
@@ -541,14 +572,13 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
@infinisdk_to_cinder_exceptions
def delete_volume(self, volume):
"""Delete a volume from the backend."""
- volume_name = self._make_volume_name(volume)
try:
- infinidat_volume = self._get_infinidat_volume_by_name(volume_name)
- except exception.InvalidVolume:
- return # volume not found
+ infinidat_volume = self._get_infinidat_volume(volume)
+ except exception.VolumeNotFound:
+ return
if infinidat_volume.has_children():
# can't delete a volume that has a live snapshot
- raise exception.VolumeIsBusy(volume_name=volume_name)
+ raise exception.VolumeIsBusy(volume_name=volume.name)
infinidat_volume.safe_delete()
@infinisdk_to_cinder_exceptions
@@ -649,11 +679,11 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
"""Deletes a snapshot."""
try:
snapshot = self._get_infinidat_snapshot(snapshot)
- except exception.InvalidSnapshot:
- return # snapshot not found
+ except exception.SnapshotNotFound:
+ return
snapshot.safe_delete()
- def _asssert_volume_not_mapped(self, volume):
+ def _assert_volume_not_mapped(self, volume):
# copy is not atomic so we can't clone while the volume is mapped
infinidat_volume = self._get_infinidat_volume(volume)
if len(infinidat_volume.get_logical_units()) == 0:
@@ -679,7 +709,7 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
* copy data from source to new volume
* unmap both volumes
"""
- self._asssert_volume_not_mapped(src_vref)
+ self._assert_volume_not_mapped(src_vref)
infinidat_volume = self._create_volume(volume)
try:
src_ctx = self._device_connect_context(src_vref)
@@ -744,8 +774,8 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
raise NotImplementedError()
try:
infinidat_cg = self._get_infinidat_cg(group)
- except exception.InvalidGroup:
- pass # group not found
+ except exception.GroupNotFound:
+ pass
else:
infinidat_cg.safe_delete()
for volume in volumes:
@@ -833,3 +863,334 @@ class InfiniboxVolumeDriver(san.SanISCSIDriver):
for snapshot in snapshots:
self.delete_snapshot(snapshot)
return None, None
+
+ def snapshot_revert_use_temp_snapshot(self):
+ """Disable the use of a temporary snapshot on revert."""
+ return False
+
+ @infinisdk_to_cinder_exceptions
+ def revert_to_snapshot(self, context, volume, snapshot):
+ """Revert volume to snapshot.
+
+ Note: the revert process should not change the volume's
+ current size, that means if the driver shrank
+ the volume during the process, it should extend the
+ volume internally.
+ """
+ infinidat_snapshot = self._get_infinidat_snapshot(snapshot)
+ infinidat_volume = self._get_infinidat_volume(snapshot.volume)
+ infinidat_volume.restore(infinidat_snapshot)
+ volume_size = infinidat_volume.get_size()
+ snapshot_size = snapshot.volume.size * capacity.GiB
+ if volume_size < snapshot_size:
+ self.extend_volume(volume, snapshot.volume.size)
+
+ @infinisdk_to_cinder_exceptions
+ def manage_existing(self, volume, existing_ref):
+ """Manage an existing Infinidat volume.
+
+ Checks if the volume is already managed.
+ Renames the Infinidat volume to match the expected name.
+ Updates QoS and metadata.
+
+ :param volume: Cinder volume to manage
+ :param existing_ref: dictionary of the forms:
+ {'source-name': 'Infinidat volume name'} or
+ {'source-id': 'Infinidat volume serial number'}
+ """
+ infinidat_volume = self._get_infinidat_volume_by_ref(existing_ref)
+ infinidat_metadata = infinidat_volume.get_all_metadata()
+ if 'cinder_id' in infinidat_metadata:
+ cinder_id = infinidat_metadata['cinder_id']
+ if volume_utils.check_already_managed_volume(cinder_id):
+ raise exception.ManageExistingAlreadyManaged(
+ volume_ref=cinder_id)
+ infinidat_pool = infinidat_volume.get_pool_name()
+ if infinidat_pool != self.configuration.infinidat_pool_name:
+ message = (_('unexpected pool name %(infinidat_pool)s')
+ % {'infinidat_pool': infinidat_pool})
+ raise exception.InvalidConfigurationValue(message=message)
+ cinder_name = self._make_volume_name(volume)
+ infinidat_volume.update_name(cinder_name)
+ self._set_qos(volume, infinidat_volume)
+ self._set_cinder_object_metadata(infinidat_volume, volume)
+
+ @infinisdk_to_cinder_exceptions
+ def manage_existing_get_size(self, volume, existing_ref):
+ """Return size of an existing Infinidat volume.
+
+ When calculating the size, round up to the next GB.
+
+ :param volume: Cinder volume to manage
+ :param existing_ref: dictionary of the forms:
+ {'source-name': 'Infinidat volume name'} or
+ {'source-id': 'Infinidat volume serial number'}
+ :returns size: Volume size in GiB (integer)
+ """
+ infinidat_volume = self._get_infinidat_volume_by_ref(existing_ref)
+ return int(math.ceil(infinidat_volume.get_size() / capacity.GiB))
+
+ @infinisdk_to_cinder_exceptions
+ def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
+ sort_keys, sort_dirs):
+ """List volumes on the Infinidat backend available for management.
+
+ Returns a list of dictionaries, each specifying a volume on the
+ Infinidat backend, with the following keys:
+ - reference (dictionary): The reference for a volume, which can be
+ passed to "manage_existing". Each reference contains keys:
+ Infinidat volume name and Infinidat volume serial number.
+ - size (int): The size of the volume according to the Infinidat
+ storage backend, rounded up to the nearest GB.
+ - safe_to_manage (boolean): Whether or not this volume is safe to
+ manage according to the storage backend. For example, is the volume
+ already managed, in use, has snapshots or active mappings.
+ - reason_not_safe (string): If safe_to_manage is False, the reason why.
+ - cinder_id (string): If already managed, provide the Cinder ID.
+ - extra_info (string): Extra information (pool name, volume type,
+ QoS and metadata) to return to the user.
+
+ :param cinder_volumes: A list of volumes in this host that Cinder
+ currently manages, used to determine if
+ a volume is manageable or not.
+ :param marker: The last item of the previous page; we return the
+ next results after this value (after sorting)
+ :param limit: Maximum number of items to return
+ :param offset: Number of items to skip after marker
+ :param sort_keys: List of keys to sort results by (valid keys are
+ 'identifier' and 'size')
+ :param sort_dirs: List of directions to sort by, corresponding to
+ sort_keys (valid directions are 'asc' and 'desc')
+ """
+ manageable_volumes = []
+ cinder_ids = [cinder_volume.id for cinder_volume in cinder_volumes]
+ infinidat_pool = self._get_infinidat_pool()
+ infinidat_volumes = infinidat_pool.get_volumes()
+ for infinidat_volume in infinidat_volumes:
+ if infinidat_volume.is_snapshot():
+ continue
+ safe_to_manage = False
+ reason_not_safe = None
+ volume_id = infinidat_volume.get_id()
+ volume_name = infinidat_volume.get_name()
+ volume_size = infinidat_volume.get_size()
+ volume_type = infinidat_volume.get_type()
+ volume_pool = infinidat_volume.get_pool_name()
+ volume_qos = infinidat_volume.get_qos_policy()
+ volume_meta = infinidat_volume.get_all_metadata()
+ cinder_id = volume_meta.get('cinder_id')
+ volume_luns = infinidat_volume.get_logical_units()
+ if cinder_id and cinder_id in cinder_ids:
+ reason_not_safe = _('volume already managed')
+ elif volume_luns:
+ reason_not_safe = _('volume has mappings')
+ elif infinidat_volume.has_children():
+ reason_not_safe = _('volume has snapshots')
+ else:
+ safe_to_manage = True
+ reference = {
+ 'source-name': volume_name,
+ 'source-id': str(volume_id)
+ }
+ extra_info = {
+ 'pool': volume_pool,
+ 'type': volume_type,
+ 'qos': str(volume_qos),
+ 'meta': str(volume_meta)
+ }
+ manageable_volume = {
+ 'reference': reference,
+ 'size': int(math.ceil(volume_size / capacity.GiB)),
+ 'safe_to_manage': safe_to_manage,
+ 'reason_not_safe': reason_not_safe,
+ 'cinder_id': cinder_id,
+ 'extra_info': extra_info
+ }
+ manageable_volumes.append(manageable_volume)
+ return volume_utils.paginate_entries_list(
+ manageable_volumes, marker, limit,
+ offset, sort_keys, sort_dirs)
+
+ @infinisdk_to_cinder_exceptions
+ def unmanage(self, volume):
+ """Removes the specified volume from Cinder management.
+
+ Does not delete the underlying backend storage object.
+
+ For most drivers, this will not need to do anything. However, some
+ drivers might use this call as an opportunity to clean up any
+ Cinder-specific configuration that they have associated with the
+ backend storage object.
+
+ :param volume: Cinder volume to unmanage
+ """
+ infinidat_volume = self._get_infinidat_volume(volume)
+ infinidat_volume.clear_metadata()
+
+ def _check_already_managed_snapshot(self, snapshot_id):
+ """Check cinder db for already managed snapshot.
+
+ :param snapshot_id snapshot id parameter
+ :returns: bool -- return True, if db entry with specified
+ snapshot id exists, otherwise return False
+ """
+ try:
+ uuid.UUID(snapshot_id, version=4)
+ except ValueError:
+ return False
+ ctxt = cinder_context.get_admin_context()
+ return objects.Snapshot.exists(ctxt, snapshot_id)
+
+ @infinisdk_to_cinder_exceptions
+ def manage_existing_snapshot(self, snapshot, existing_ref):
+ """Manage an existing Infinidat snapshot.
+
+ Checks if the snapshot is already managed.
+ Renames the Infinidat snapshot to match the expected name.
+ Updates QoS and metadata.
+
+ :param snapshot: Cinder snapshot to manage
+ :param existing_ref: dictionary of the forms:
+ {'source-name': 'Infinidat snapshot name'} or
+ {'source-id': 'Infinidat snapshot serial number'}
+ """
+ infinidat_snapshot = self._get_infinidat_snapshot_by_ref(existing_ref)
+ infinidat_metadata = infinidat_snapshot.get_all_metadata()
+ if 'cinder_id' in infinidat_metadata:
+ cinder_id = infinidat_metadata['cinder_id']
+ if self._check_already_managed_snapshot(cinder_id):
+ raise exception.ManageExistingAlreadyManaged(
+ volume_ref=cinder_id)
+ infinidat_pool = infinidat_snapshot.get_pool_name()
+ if infinidat_pool != self.configuration.infinidat_pool_name:
+ message = (_('unexpected pool name %(infinidat_pool)s')
+ % {'infinidat_pool': infinidat_pool})
+ raise exception.InvalidConfigurationValue(message=message)
+ cinder_name = self._make_snapshot_name(snapshot)
+ infinidat_snapshot.update_name(cinder_name)
+ self._set_qos(snapshot, infinidat_snapshot)
+ self._set_cinder_object_metadata(infinidat_snapshot, snapshot)
+
+ @infinisdk_to_cinder_exceptions
+ def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
+ """Return size of an existing Infinidat snapshot.
+
+ When calculating the size, round up to the next GB.
+
+ :param snapshot: Cinder snapshot to manage
+ :param existing_ref: dictionary of the forms:
+ {'source-name': 'Infinidat snapshot name'} or
+ {'source-id': 'Infinidat snapshot serial number'}
+ :returns size: Snapshot size in GiB (integer)
+ """
+ infinidat_snapshot = self._get_infinidat_snapshot_by_ref(existing_ref)
+ return int(math.ceil(infinidat_snapshot.get_size() / capacity.GiB))
+
+ @infinisdk_to_cinder_exceptions
+ def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,
+ sort_keys, sort_dirs):
+ """List snapshots on the Infinidat backend available for management.
+
+ Returns a list of dictionaries, each specifying a snapshot on the
+ Infinidat backend, with the following keys:
+ - reference (dictionary): The reference for a snapshot, which can be
+ passed to "manage_existing_snapshot". Each reference contains keys:
+ Infinidat snapshot name and Infinidat snapshot serial number.
+ - size (int): The size of the snapshot according to the Infinidat
+ storage backend, rounded up to the nearest GB.
+ - safe_to_manage (boolean): Whether or not this snapshot is safe to
+ manage according to the storage backend. For example, is the snapshot
+ already managed, has clones or active mappings.
+ - reason_not_safe (string): If safe_to_manage is False, the reason why.
+ - cinder_id (string): If already managed, provide the Cinder ID.
+ - extra_info (string): Extra information (pool name, snapshot type,
+ QoS and metadata) to return to the user.
+ - source_reference (string): Similar to "reference", but for the
+ snapshot's source volume. The source reference contains two keys:
+ Infinidat volume name and Infinidat volume serial number.
+
+ :param cinder_snapshots: A list of snapshots in this host that Cinder
+ currently manages, used to determine if
+ a snapshot is manageable or not.
+ :param marker: The last item of the previous page; we return the
+ next results after this value (after sorting)
+ :param limit: Maximum number of items to return
+ :param offset: Number of items to skip after marker
+ :param sort_keys: List of keys to sort results by (valid keys are
+ 'identifier' and 'size')
+ :param sort_dirs: List of directions to sort by, corresponding to
+ sort_keys (valid directions are 'asc' and 'desc')
+ """
+ manageable_snapshots = []
+ cinder_ids = [cinder_snapshot.id for cinder_snapshot
+ in cinder_snapshots]
+ infinidat_pool = self._get_infinidat_pool()
+ infinidat_snapshots = infinidat_pool.get_volumes()
+ for infinidat_snapshot in infinidat_snapshots:
+ if not infinidat_snapshot.is_snapshot():
+ continue
+ safe_to_manage = False
+ reason_not_safe = None
+ parent = infinidat_snapshot.get_parent()
+ parent_id = parent.get_id()
+ parent_name = parent.get_name()
+ snapshot_id = infinidat_snapshot.get_id()
+ snapshot_name = infinidat_snapshot.get_name()
+ snapshot_size = infinidat_snapshot.get_size()
+ snapshot_type = infinidat_snapshot.get_type()
+ snapshot_pool = infinidat_snapshot.get_pool_name()
+ snapshot_qos = infinidat_snapshot.get_qos_policy()
+ snapshot_meta = infinidat_snapshot.get_all_metadata()
+ cinder_id = snapshot_meta.get('cinder_id')
+ snapshot_luns = infinidat_snapshot.get_logical_units()
+ if cinder_id and cinder_id in cinder_ids:
+ reason_not_safe = _('snapshot already managed')
+ elif snapshot_luns:
+ reason_not_safe = _('snapshot has mappings')
+ elif infinidat_snapshot.has_children():
+ reason_not_safe = _('snapshot has clones')
+ else:
+ safe_to_manage = True
+ reference = {
+ 'source-name': snapshot_name,
+ 'source-id': str(snapshot_id)
+ }
+ source_reference = {
+ 'source-name': parent_name,
+ 'source-id': str(parent_id)
+ }
+ extra_info = {
+ 'pool': snapshot_pool,
+ 'type': snapshot_type,
+ 'qos': str(snapshot_qos),
+ 'meta': str(snapshot_meta)
+ }
+ manageable_snapshot = {
+ 'reference': reference,
+ 'size': int(math.ceil(snapshot_size / capacity.GiB)),
+ 'safe_to_manage': safe_to_manage,
+ 'reason_not_safe': reason_not_safe,
+ 'cinder_id': cinder_id,
+ 'extra_info': extra_info,
+ 'source_reference': source_reference
+ }
+ manageable_snapshots.append(manageable_snapshot)
+ return volume_utils.paginate_entries_list(
+ manageable_snapshots, marker, limit,
+ offset, sort_keys, sort_dirs)
+
+ @infinisdk_to_cinder_exceptions
+ def unmanage_snapshot(self, snapshot):
+ """Removes the specified snapshot from Cinder management.
+
+ Does not delete the underlying backend storage object.
+
+ For most drivers, this will not need to do anything. However, some
+ drivers might use this call as an opportunity to clean up any
+ Cinder-specific configuration that they have associated with the
+ backend storage object.
+
+ :param snapshot: Cinder volume snapshot to unmanage
+ """
+ infinidat_snapshot = self._get_infinidat_snapshot(snapshot)
+ infinidat_snapshot.clear_metadata()
diff --git a/cinder/volume/drivers/lightos.py b/cinder/volume/drivers/lightos.py
index 8f4dd0735..3fd7d5bfc 100644
--- a/cinder/volume/drivers/lightos.py
+++ b/cinder/volume/drivers/lightos.py
@@ -19,6 +19,7 @@ import json
import random
import time
from typing import Dict
+from urllib.parse import urlparse
from oslo_config import cfg
@@ -293,6 +294,11 @@ class LightOSConnection(object):
raise exception.VolumeDriverException(
message="Could not get a response from any API server")
+ def _format_endpoint(self, ip, port):
+ ip_requires_bracketing = ':' in ip or '%' in ip
+ template = "[%s]:%s" if ip_requires_bracketing else "%s:%s"
+ return template % (ip, port)
+
def __send_cmd(self, cmd, host, port, timeout, **kwargs):
"""Send command to LightOS REST API server.
@@ -306,7 +312,7 @@ class LightOSConnection(object):
{'cmd': cmd, 'method': method, 'url': url, 'body': body,
'ssl_verify': ssl_verify})
- api_url = "https://%s:%s%s" % (host, port, url)
+ api_url = "https://%s%s" % (self._format_endpoint(host, port), url)
try:
with requests.Session() as session:
@@ -537,8 +543,9 @@ class LightOSVolumeDriver(driver.VolumeDriver):
must be supplied'
# while creating lightos volume we can stop on any terminal status
# possible states: Unknown, Creating, Available, Deleting, Deleted,
- # Failed, Updating
- states = ('Available', 'Deleting', 'Deleted', 'Failed', 'UNKNOWN')
+ # Failed, Updating, Migrating, Rollback
+ states = ('Available', 'Deleting', 'Deleted', 'Failed', 'UNKNOWN',
+ 'Migrating', 'Rollback')
stop = time.time() + timeout
while time.time() <= stop:
@@ -664,7 +671,8 @@ class LightOSVolumeDriver(driver.VolumeDriver):
project_name,
timeout=self.logical_op_timeout,
vol_uuid=lightos_uuid)
- if vol_state == 'Available':
+ allowed_states = ['Available', 'Migrating']
+ if vol_state in allowed_states:
LOG.debug(
"LIGHTOS created volume name %s lightos_uuid \
%s project %s",
@@ -1011,11 +1019,11 @@ class LightOSVolumeDriver(driver.VolumeDriver):
lightos_targets = {}
for target in self.cluster.targets.values():
properties = dict()
- data_address, _ = target['nvmeEndpoint'].split(':')
- properties['target_portal'] = data_address
+ ep = urlparse('//' + target['nvmeEndpoint'])
+ properties['target_portal'] = ep.hostname
properties['target_port'] = 8009 # spec specified discovery port
properties['transport_type'] = 'tcp'
- lightos_targets[data_address] = properties
+ lightos_targets[ep.hostname] = properties
server_properties = {}
server_properties['lightos_nodes'] = lightos_targets
@@ -1226,7 +1234,8 @@ class LightOSVolumeDriver(driver.VolumeDriver):
# for the volume to stabilize
vol_state = self._wait_for_volume_available(
project_name, timeout=end - time.time(), vol_name=lightos_volname)
- if vol_state != 'Available':
+ allowed_states = ['Available', 'Migrating']
+ if vol_state not in allowed_states:
LOG.warning(
'Timed out waiting for volume %s project %s to stabilize, \
last state %s',
diff --git a/cinder/volume/drivers/netapp/dataontap/block_base.py b/cinder/volume/drivers/netapp/dataontap/block_base.py
index ece615f8e..f0a8649ed 100644
--- a/cinder/volume/drivers/netapp/dataontap/block_base.py
+++ b/cinder/volume/drivers/netapp/dataontap/block_base.py
@@ -410,12 +410,11 @@ class NetAppBlockStorageLibrary(object):
def _extract_lun_info(self, lun):
"""Extracts the LUNs from API and populates the LUN table."""
- meta_dict = self._create_lun_meta(lun)
- path = lun.get_child_content('path')
+ path = lun['Path']
(_rest, _splitter, name) = path.rpartition('/')
- handle = self._create_lun_handle(meta_dict)
- size = lun.get_child_content('size')
- return NetAppLun(handle, name, size, meta_dict)
+ handle = self._create_lun_handle(lun)
+ size = lun['Size']
+ return NetAppLun(handle, name, size, lun)
def _extract_and_populate_luns(self, api_luns):
"""Extracts the LUNs from API and populates the LUN table."""
@@ -547,9 +546,6 @@ class NetAppBlockStorageLibrary(object):
LOG.error("Error getting LUN attribute. Exception: %s", e)
return None
- def _create_lun_meta(self, lun):
- raise NotImplementedError()
-
def _get_fc_target_wwpns(self, include_partner=True):
raise NotImplementedError()
@@ -725,8 +721,8 @@ class NetAppBlockStorageLibrary(object):
msg = _('Failure getting LUN info for %s.')
raise exception.VolumeBackendAPIException(data=msg % seg[-1])
lun_info = lun_infos[-1]
- bs = int(lun_info.get_child_content('block-size'))
- ls = int(lun_info.get_child_content('size'))
+ bs = int(lun_info['BlockSize'])
+ ls = int(lun_info['Size'])
block_count = ls / bs
return block_count
diff --git a/cinder/volume/drivers/netapp/dataontap/block_cmode.py b/cinder/volume/drivers/netapp/dataontap/block_cmode.py
index 59d5b04fe..f12c28826 100644
--- a/cinder/volume/drivers/netapp/dataontap/block_cmode.py
+++ b/cinder/volume/drivers/netapp/dataontap/block_cmode.py
@@ -236,27 +236,14 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
if len(lun) == 0:
msg = _("No cloned LUN named %s found on the filer")
raise exception.VolumeBackendAPIException(data=msg % new_name)
- clone_meta = self._create_lun_meta(lun[0])
+
+ clone_lun = lun[0]
self._add_lun_to_table(
- block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'],
- clone_meta['Path']),
+ block_base.NetAppLun('%s:%s' % (clone_lun['Vserver'],
+ clone_lun['Path']),
new_name,
- lun[0].get_child_content('size'),
- clone_meta))
-
- def _create_lun_meta(self, lun):
- """Creates LUN metadata dictionary."""
- self.zapi_client.check_is_naelement(lun)
- meta_dict = {}
- meta_dict['Vserver'] = lun.get_child_content('vserver')
- meta_dict['Volume'] = lun.get_child_content('volume')
- meta_dict['Qtree'] = lun.get_child_content('qtree')
- meta_dict['Path'] = lun.get_child_content('path')
- meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
- meta_dict['SpaceReserved'] = \
- lun.get_child_content('is-space-reservation-enabled')
- meta_dict['UUID'] = lun.get_child_content('uuid')
- return meta_dict
+ clone_lun['Size'],
+ clone_lun))
def _get_fc_target_wwpns(self, include_partner=True):
return self.zapi_client.get_fc_target_wwpns()
@@ -629,25 +616,23 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
def _move_lun(self, volume, src_ontap_volume, dest_ontap_volume,
dest_lun_name=None):
"""Moves LUN from an ONTAP volume to another."""
- job_uuid = self.zapi_client.start_lun_move(
+ operation_info = self.zapi_client.start_lun_move(
volume.name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
dest_lun_name=dest_lun_name)
- LOG.debug('Start moving LUN %s from %s to %s. '
- 'Job UUID is %s.', volume.name, src_ontap_volume,
- dest_ontap_volume, job_uuid)
+ LOG.debug('Start moving LUN %s from %s to %s. ',
+ volume.name, src_ontap_volume,
+ dest_ontap_volume)
def _wait_lun_move_complete():
- move_status = self.zapi_client.get_lun_move_status(job_uuid)
- LOG.debug('Waiting for LUN move job %s to complete. '
- 'Current status is: %s.', job_uuid,
- move_status['job-status'])
+ move_status = self.zapi_client.get_lun_move_status(operation_info)
+ LOG.debug('Waiting for LUN move to complete. '
+ 'Current status is: %s.', move_status['job-status'])
if not move_status:
- status_error_msg = (_("Error moving LUN %s. The "
- "corresponding Job UUID % doesn't "
- "exist."))
+ status_error_msg = (_("Error moving LUN %s. The movement"
+ "status could not be retrieved."))
raise na_utils.NetAppDriverException(
- status_error_msg % (volume.id, job_uuid))
+ status_error_msg % (volume.id))
elif move_status['job-status'] == 'destroyed':
status_error_msg = (_('Error moving LUN %s. %s.'))
raise na_utils.NetAppDriverException(
@@ -689,29 +674,27 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
dest_ontap_volume, dest_vserver, dest_lun_name=None,
dest_backend_name=None, cancel_on_error=False):
"""Copies LUN from an ONTAP volume to another."""
- job_uuid = self.zapi_client.start_lun_copy(
+ operation_info = self.zapi_client.start_lun_copy(
volume.name, dest_ontap_volume, dest_vserver,
src_ontap_volume=src_ontap_volume, src_vserver=src_vserver,
dest_lun_name=dest_lun_name)
LOG.debug('Start copying LUN %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
- '%(dest_vserver)s:%(dest_ontap_vol)s. Job UUID is %(job)s.',
+ '%(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.name, 'src_vserver': src_vserver,
'src_ontap_vol': src_ontap_volume,
'dest_vserver': dest_vserver,
- 'dest_ontap_vol': dest_ontap_volume,
- 'job': job_uuid})
+ 'dest_ontap_vol': dest_ontap_volume})
def _wait_lun_copy_complete():
- copy_status = self.zapi_client.get_lun_copy_status(job_uuid)
- LOG.debug('Waiting for LUN copy job %s to complete. Current '
- 'status is: %s.', job_uuid, copy_status['job-status'])
+ copy_status = self.zapi_client.get_lun_copy_status(operation_info)
+ LOG.debug('Waiting for LUN copy job to complete. Current '
+ 'status is: %s.', copy_status['job-status'])
if not copy_status:
- status_error_msg = (_("Error copying LUN %s. The "
- "corresponding Job UUID % doesn't "
- "exist."))
+ status_error_msg = (_("Error copying LUN %s. The copy"
+ "status could not be retrieved."))
raise na_utils.NetAppDriverException(
- status_error_msg % (volume.id, job_uuid))
+ status_error_msg % (volume.id))
elif copy_status['job-status'] == 'destroyed':
status_error_msg = (_('Error copying LUN %s. %s.'))
raise na_utils.NetAppDriverException(
@@ -730,7 +713,8 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
if cancel_on_error:
- self._cancel_lun_copy(job_uuid, volume, dest_ontap_volume,
+ self._cancel_lun_copy(operation_info, volume,
+ dest_ontap_volume,
dest_backend_name=dest_backend_name)
if isinstance(e, loopingcall.LoopingCallTimeOut):
ctxt.reraise = False
@@ -879,8 +863,6 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
LOG.info("Cloning LUN %s from snapshot %s in volume %s.", lun_name,
snapshot_name, flexvol_name)
- metadata = snapshot_lun.metadata
-
block_count = self._get_lun_block_count(snapshot_path)
if block_count == 0:
msg = _("%s cannot be reverted using clone operation"
@@ -889,12 +871,9 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
new_snap_name = "new-%s" % snapshot_name
- self.zapi_client.create_lun(
- flexvol_name, new_snap_name,
- six.text_type(snapshot_lun.size), metadata)
try:
self._clone_lun(snapshot_name, new_snap_name,
- block_count=block_count)
+ space_reserved='false', is_snapshot=True)
return new_snap_name
except Exception:
with excutils.save_and_reraise_exception():
diff --git a/cinder/volume/drivers/netapp/dataontap/client/api.py b/cinder/volume/drivers/netapp/dataontap/client/api.py
index 091ac03e2..bfe60449c 100644
--- a/cinder/volume/drivers/netapp/dataontap/client/api.py
+++ b/cinder/volume/drivers/netapp/dataontap/client/api.py
@@ -25,7 +25,12 @@ from eventlet import greenthread
from eventlet import semaphore
from lxml import etree
from oslo_log import log as logging
+from oslo_serialization import jsonutils
from oslo_utils import netutils
+import requests
+from requests.adapters import HTTPAdapter
+from requests import auth
+from requests.packages.urllib3.util.retry import Retry
import six
from six.moves import urllib
@@ -37,6 +42,7 @@ from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
+# ZAPI API error codes.
EAPIERROR = '13001'
EAPIPRIVILEGE = '13003'
EAPINOTFOUND = '13005'
@@ -549,6 +555,12 @@ class NaApiError(Exception):
return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message)
+class NaRetryableError(NaApiError):
+ def __str__(self, *args, **kwargs):
+ return 'NetApp API failed. Try again. Reason - %s:%s' % (
+ self.code, self.message)
+
+
class SSHUtil(object):
"""Encapsulates connection logic and command execution for SSH client."""
@@ -628,3 +640,234 @@ class SSHUtil(object):
if wait_time > timeout:
LOG.debug("Timeout exceeded while waiting for exit status.")
break
+
+
+# REST API error codes.
+REST_UNAUTHORIZED = '6'
+REST_API_NOT_FOUND = '3'
+REST_UPDATE_SNAPMIRROR_FAILED = '13303844'
+REST_ERELATION_EXISTS = '6619637'
+REST_SNAPMIRROR_IN_PROGRESS = '13303810'
+REST_UPDATE_SNAPMIRROR_FAILED = '13303844'
+REST_NO_SUCH_LUN_MAP = '5374922'
+REST_NO_SUCH_FILE = '6684674'
+
+
+class RestNaServer(object):
+
+ TRANSPORT_TYPE_HTTP = 'http'
+ TRANSPORT_TYPE_HTTPS = 'https'
+ HTTP_PORT = '80'
+ HTTPS_PORT = '443'
+
+ TRANSPORT_PORT = {
+ TRANSPORT_TYPE_HTTP: HTTP_PORT,
+ TRANSPORT_TYPE_HTTPS: HTTPS_PORT
+ }
+
+ def __init__(self, host, transport_type=TRANSPORT_TYPE_HTTP,
+ ssl_cert_path=None, username=None, password=None, port=None,
+ api_trace_pattern=None):
+ self._host = host
+ self.set_transport_type(transport_type)
+ self.set_port(port=port)
+ self._username = username
+ self._password = password
+
+ if api_trace_pattern is not None:
+ na_utils.setup_api_trace_pattern(api_trace_pattern)
+
+ if ssl_cert_path is not None:
+ self._ssl_verify = ssl_cert_path
+ else:
+ # Note(felipe_rodrigues): it will verify with the Mozila CA roots,
+ # given by certifi package.
+ self._ssl_verify = True
+
+ self._api_version = None
+ self._api_major_version = None
+ self._api_minor_version = None
+ self._ontap_version = None
+ self._timeout = None
+
+ LOG.debug('Using REST with NetApp controller: %s', self._host)
+
+ def set_transport_type(self, transport_type):
+ """Set the transport type protocol for API.
+
+ Supports http and https transport types.
+ """
+ if transport_type is None or transport_type.lower() not in (
+ RestNaServer.TRANSPORT_TYPE_HTTP,
+ RestNaServer.TRANSPORT_TYPE_HTTPS):
+ raise ValueError('Unsupported transport type')
+ self._protocol = transport_type.lower()
+
+ def get_transport_type(self):
+ """Get the transport type protocol."""
+ return self._protocol
+
+ def set_api_version(self, major, minor):
+ """Set the API version."""
+ try:
+ self._api_major_version = int(major)
+ self._api_minor_version = int(minor)
+ self._api_version = str(major) + "." + str(minor)
+ except ValueError:
+ raise ValueError('Major and minor versions must be integers')
+
+ def get_api_version(self):
+ """Gets the API version tuple."""
+ if not self._api_version:
+ return None
+ return (self._api_major_version, self._api_minor_version)
+
+ def set_ontap_version(self, ontap_version):
+ """Set the ONTAP version."""
+ self._ontap_version = ontap_version
+
+ def get_ontap_version(self):
+ """Gets the ONTAP version."""
+ return self._ontap_version
+
+ def set_port(self, port=None):
+ """Set the ONTAP port, if not informed, set with default one."""
+ if port is None and self._protocol in RestNaServer.TRANSPORT_PORT:
+ self._port = RestNaServer.TRANSPORT_PORT[self._protocol]
+ else:
+ try:
+ int(port)
+ except ValueError:
+ raise ValueError('Port must be integer')
+ self._port = str(port)
+
+ def get_port(self):
+ """Get the server communication port."""
+ return self._port
+
+ def set_timeout(self, seconds):
+ """Sets the timeout in seconds."""
+ try:
+ self._timeout = int(seconds)
+ except ValueError:
+ raise ValueError('timeout in seconds must be integer')
+
+ def get_timeout(self):
+ """Gets the timeout in seconds if set."""
+ return self._timeout
+
+ def set_vserver(self, vserver):
+ """Set the vserver to use if tunneling gets enabled."""
+ self._vserver = vserver
+
+ def get_vserver(self):
+ """Get the vserver to use in tunneling."""
+ return self._vserver
+
+ def __str__(self):
+ """Gets a representation of the client."""
+ return "server: %s" % (self._host)
+
+ def _get_request_method(self, method, session):
+ """Returns the request method to be used in the REST call."""
+
+ request_methods = {
+ 'post': session.post,
+ 'get': session.get,
+ 'put': session.put,
+ 'delete': session.delete,
+ 'patch': session.patch,
+ }
+ return request_methods[method]
+
+ def _add_query_params_to_url(self, url, query):
+ """Populates the URL with specified filters."""
+ filters = '&'.join([f"{k}={v}" for k, v in query.items()])
+ url += "?" + filters
+ return url
+
+ def _get_base_url(self):
+ """Get the base URL for REST requests."""
+ host = self._host
+ if ':' in host:
+ host = '[%s]' % host
+ return '%s://%s:%s/api/' % (self._protocol, host, self._port)
+
+ def _build_session(self, headers):
+ """Builds a session in the client."""
+ self._session = requests.Session()
+
+ # NOTE(felipe_rodrigues): request resilient of temporary network
+ # failures (like name resolution failure), retrying until 5 times.
+ max_retries = Retry(total=5, connect=5, read=2, backoff_factor=1)
+ adapter = HTTPAdapter(max_retries=max_retries)
+ self._session.mount('%s://' % self._protocol, adapter)
+
+ self._session.auth = self._create_basic_auth_handler()
+ self._session.verify = self._ssl_verify
+ self._session.headers = headers
+
+ def _build_headers(self, enable_tunneling):
+ """Build and return headers for a REST request."""
+ headers = {
+ "Accept": "application/json",
+ "Content-Type": "application/json"
+ }
+ if enable_tunneling:
+ headers["X-Dot-SVM-Name"] = self.get_vserver()
+
+ return headers
+
+ def _create_basic_auth_handler(self):
+ """Creates and returns a basic HTTP auth handler."""
+ return auth.HTTPBasicAuth(self._username, self._password)
+
+ @volume_utils.trace_api(
+ filter_function=na_utils.trace_filter_func_rest_api)
+ def send_http_request(self, method, url, body, headers):
+ """Invoke the API on the server.
+
+ The passed parameters and returned parameters will be logged if trace
+ feature is on. They are important for debugging purpose.
+ """
+ data = jsonutils.dumps(body) if body else {}
+
+ self._build_session(headers)
+ request_method = self._get_request_method(method, self._session)
+
+ try:
+ if self._timeout is not None:
+ response = request_method(
+ url, data=data, timeout=self._timeout)
+ else:
+ response = request_method(url, data=data)
+ except requests.HTTPError as e:
+ raise NaApiError(e.errno, e.strerror)
+ except Exception as e:
+ raise NaApiError(message=e)
+
+ code = response.status_code
+ body = jsonutils.loads(response.content) if response.content else {}
+ return code, body
+
+ def invoke_successfully(self, action_url, method, body=None, query=None,
+ enable_tunneling=False):
+ """Invokes REST API and checks execution status as success."""
+ headers = self._build_headers(enable_tunneling)
+ if query:
+ action_url = self._add_query_params_to_url(action_url, query)
+ url = self._get_base_url() + action_url
+ code, response = self.send_http_request(method, url, body, headers)
+
+ if not response.get('error'):
+ return code, response
+
+ result_error = response.get('error')
+ code = result_error.get('code', 'ESTATUSFAILED')
+ # TODO: add the correct code number for REST not licensed clone error.
+ if code == ESIS_CLONE_NOT_LICENSED:
+ msg = 'Clone operation failed: FlexClone not licensed.'
+ else:
+ msg = (result_error.get('message')
+ or 'Execution status is failed due to unknown reason')
+ raise NaApiError(code, msg)
diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py
index d29039fe4..32b01b178 100644
--- a/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py
+++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode.py
@@ -37,6 +37,28 @@ DEFAULT_MAX_PAGE_LENGTH = 50
ONTAP_SELECT_MODEL = 'FDvM300'
ONTAP_C190 = 'C190'
+# NOTE(cknight): The keys in this map are tuples that contain arguments needed
+# for efficient use of the system-user-capability-get-iter cDOT API. The
+# values are SSC extra specs associated with the APIs listed in the keys.
+SSC_API_MAP = {
+ ('storage.aggregate', 'show', 'aggr-options-list-info'): [
+ 'netapp_raid_type',
+ ],
+ ('storage.disk', 'show', 'storage-disk-get-iter'): [
+ 'netapp_disk_type',
+ ],
+ ('snapmirror', 'show', 'snapmirror-get-iter'): [
+ 'netapp_mirrored',
+ ],
+ ('volume.efficiency', 'show', 'sis-get-iter'): [
+ 'netapp_dedup',
+ 'netapp_compression',
+ ],
+ ('volume', '*show', 'volume-get-iter'): [
+ 'netapp_flexvol_encryption',
+ ],
+}
+
@six.add_metaclass(volume_utils.TraceWrapperMetaclass)
class Client(client_base.Client):
@@ -182,6 +204,32 @@ class Client(client_base.Client):
result.get_child_by_name('next-tag').set_content('')
return result
+ def check_api_permissions(self):
+ """Check which APIs that support SSC functionality are available."""
+
+ inaccessible_apis = []
+ invalid_extra_specs = []
+
+ for api_tuple, extra_specs in SSC_API_MAP.items():
+ object_name, operation_name, api = api_tuple
+ if not self.check_cluster_api(object_name,
+ operation_name,
+ api):
+ inaccessible_apis.append(api)
+ invalid_extra_specs.extend(extra_specs)
+
+ if inaccessible_apis:
+ if 'volume-get-iter' in inaccessible_apis:
+ msg = _('User not permitted to query Data ONTAP volumes.')
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ LOG.warning('The configured user account does not have '
+ 'sufficient privileges to use all needed '
+ 'APIs. The following extra specs will fail '
+ 'or be ignored: %s.', invalid_extra_specs)
+
+ return invalid_extra_specs
+
def _get_cluster_nodes_info(self):
"""Return a list of models of the nodes in the cluster"""
api_args = {
@@ -481,7 +529,25 @@ class Client(client_base.Client):
tag = result.get_child_content('next-tag')
if tag is None:
break
- return luns
+
+ lun_list = [self._create_lun_meta(lun) for lun in luns]
+ return lun_list
+
+ def _create_lun_meta(self, lun):
+ """Creates LUN metadata dictionary."""
+ self.check_is_naelement(lun)
+ meta_dict = {}
+ meta_dict['Vserver'] = lun.get_child_content('vserver')
+ meta_dict['Volume'] = lun.get_child_content('volume')
+ meta_dict['Size'] = lun.get_child_content('size')
+ meta_dict['Qtree'] = lun.get_child_content('qtree')
+ meta_dict['Path'] = lun.get_child_content('path')
+ meta_dict['OsType'] = lun.get_child_content('multiprotocol-type')
+ meta_dict['SpaceReserved'] = \
+ lun.get_child_content('is-space-reservation-enabled')
+ meta_dict['UUID'] = lun.get_child_content('uuid')
+ meta_dict['BlockSize'] = lun.get_child_content('block-size')
+ return meta_dict
def get_lun_map(self, path):
"""Gets the LUN map by LUN path."""
@@ -853,7 +919,10 @@ class Client(client_base.Client):
attr_list = luns.get_child_by_name('attributes-list')
if not attr_list:
return []
- return attr_list.get_children()
+
+ lun_list = [self._create_lun_meta(lun)
+ for lun in attr_list.get_children()]
+ return lun_list
def file_assign_qos(self, flex_vol, qos_policy_group_name,
qos_policy_group_is_adaptive, file_path):
@@ -1061,7 +1130,8 @@ class Client(client_base.Client):
num_records = result.get_child_content('num-records')
if num_records and int(num_records) >= 1:
attr_list = result.get_child_by_name('attributes-list')
- return attr_list.get_children()
+ return [{'vserver': attr.get_child_content('vserver')}
+ for attr in attr_list.get_children()]
raise exception.NotFound(
_('No interface found on cluster for ip %s') % ip)
@@ -1671,7 +1741,8 @@ class Client(client_base.Client):
def create_volume_async(self, name, aggregate_list, size_gb,
space_guarantee_type=None, snapshot_policy=None,
- language=None, snapshot_reserve=None,
+ language=None, dedupe_enabled=False,
+ compression_enabled=False, snapshot_reserve=None,
volume_type='rw'):
"""Creates a FlexGroup volume asynchronously."""
diff --git a/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py
new file mode 100644
index 000000000..4a3154d6c
--- /dev/null
+++ b/cinder/volume/drivers/netapp/dataontap/client/client_cmode_rest.py
@@ -0,0 +1,2521 @@
+# Copyright (c) 2022 NetApp, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from datetime import datetime
+from datetime import timedelta
+import math
+from time import time
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import units
+import six
+
+from cinder import exception
+from cinder.i18n import _
+from cinder import utils
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp import utils as na_utils
+from cinder.volume import volume_utils
+
+LOG = logging.getLogger(__name__)
+DEFAULT_MAX_PAGE_LENGTH = 10000
+ONTAP_SELECT_MODEL = 'FDvM300'
+ONTAP_C190 = 'C190'
+HTTP_ACCEPTED = 202
+DELETED_PREFIX = 'deleted_cinder_'
+DEFAULT_TIMEOUT = 15
+REST_SYNC_TIMEOUT = 15
+
+# Keys in this map are REST API's endpoints that the user shall have permission
+# in order to enable extra specs reported to Cinder's scheduler.
+# NOTE(sfernand): ONTAP does not retrieve volume efficiency information
+# properly when using the pre-created "vsadmin" role (SVM scoped), causing
+# dedup and compression extra specs to be reported as disabled despite its
+# current configuration.
+SSC_API_MAP = {
+ '/storage/aggregates': [
+ 'netapp_raid_type',
+ ],
+ '/storage/disks': [
+ 'netapp_disk_type',
+ ],
+ '/snapmirror/relationships': [
+ 'netapp_mirrored',
+ ],
+ '/storage/volumes': [
+ 'netapp_flexvol_encryption'
+ 'netapp_dedup',
+ 'netapp_compression',
+ ],
+}
+
+
+@six.add_metaclass(volume_utils.TraceWrapperMetaclass)
+class RestClient(object):
+
+ def __init__(self, **kwargs):
+
+ host = kwargs['hostname']
+ username = kwargs['username']
+ password = kwargs['password']
+ api_trace_pattern = kwargs['api_trace_pattern']
+ self.connection = netapp_api.RestNaServer(
+ host=host,
+ transport_type=kwargs['transport_type'],
+ ssl_cert_path=kwargs.pop('ssl_cert_path'),
+ port=kwargs['port'],
+ username=username,
+ password=password,
+ api_trace_pattern=api_trace_pattern)
+
+ self.async_rest_timeout = kwargs.get('async_rest_timeout', 60)
+
+ self.vserver = kwargs.get('vserver')
+ self.connection.set_vserver(self.vserver)
+
+ ontap_version = self.get_ontap_version(cached=False)
+ if ontap_version < (9, 11, 1):
+ msg = _('REST Client can be used only with ONTAP 9.11.1 or upper.')
+ raise na_utils.NetAppDriverException(msg)
+ self.connection.set_ontap_version(ontap_version)
+
+ self.ssh_client = self._init_ssh_client(host, username, password)
+
+ # NOTE(nahimsouza): ZAPI Client is needed to implement the fallback
+ # when a REST method is not supported.
+ self.zapi_client = client_cmode.Client(**kwargs)
+
+ self._init_features()
+
+ def _init_ssh_client(self, host, username, password):
+ return netapp_api.SSHUtil(
+ host=host,
+ username=username,
+ password=password)
+
+ def _init_features(self):
+ self.features = na_utils.Features()
+
+ generation, major, minor = self.get_ontap_version()
+ ontap_version = (generation, major)
+
+ ontap_9_0 = ontap_version >= (9, 0)
+ ontap_9_4 = ontap_version >= (9, 4)
+ ontap_9_5 = ontap_version >= (9, 5)
+ ontap_9_6 = ontap_version >= (9, 6)
+ ontap_9_8 = ontap_version >= (9, 8)
+ ontap_9_9 = ontap_version >= (9, 9)
+
+ nodes_info = self._get_cluster_nodes_info()
+ for node in nodes_info:
+ qos_min_block = False
+ qos_min_nfs = False
+ if node['model'] == ONTAP_SELECT_MODEL:
+ qos_min_block = node['is_all_flash_select'] and ontap_9_6
+ qos_min_nfs = qos_min_block
+ elif ONTAP_C190 in node['model']:
+ qos_min_block = node['is_all_flash'] and ontap_9_6
+ qos_min_nfs = qos_min_block
+ else:
+ qos_min_block = node['is_all_flash'] and ontap_9_0
+ qos_min_nfs = node['is_all_flash'] and ontap_9_0
+
+ qos_name = na_utils.qos_min_feature_name(True, node['name'])
+ self.features.add_feature(qos_name, supported=qos_min_nfs)
+ qos_name = na_utils.qos_min_feature_name(False, node['name'])
+ self.features.add_feature(qos_name, supported=qos_min_block)
+
+ self.features.add_feature('SNAPMIRROR_V2', supported=ontap_9_0)
+ self.features.add_feature('USER_CAPABILITY_LIST',
+ supported=ontap_9_0)
+ self.features.add_feature('SYSTEM_METRICS', supported=ontap_9_0)
+ self.features.add_feature('CLONE_SPLIT_STATUS', supported=ontap_9_0)
+ self.features.add_feature('FAST_CLONE_DELETE', supported=ontap_9_0)
+ self.features.add_feature('SYSTEM_CONSTITUENT_METRICS',
+ supported=ontap_9_0)
+ self.features.add_feature('ADVANCED_DISK_PARTITIONING',
+ supported=ontap_9_0)
+ self.features.add_feature('BACKUP_CLONE_PARAM', supported=ontap_9_0)
+ self.features.add_feature('CLUSTER_PEER_POLICY', supported=ontap_9_0)
+ self.features.add_feature('FLEXVOL_ENCRYPTION', supported=ontap_9_0)
+ self.features.add_feature('FLEXGROUP', supported=ontap_9_8)
+ self.features.add_feature('FLEXGROUP_CLONE_FILE',
+ supported=ontap_9_9)
+
+ self.features.add_feature('ADAPTIVE_QOS', supported=ontap_9_4)
+ self.features.add_feature('ADAPTIVE_QOS_BLOCK_SIZE',
+ supported=ontap_9_5)
+ self.features.add_feature('ADAPTIVE_QOS_EXPECTED_IOPS_ALLOCATION',
+ supported=ontap_9_5)
+
+ LOG.info('ONTAP Version: %(generation)s.%(major)s.%(minor)s',
+ {'generation': ontap_version[0], 'major': ontap_version[1],
+ 'minor': minor})
+
+ def __getattr__(self, name):
+ """If method is not implemented for REST, try to call the ZAPI."""
+ LOG.debug("The %s call is not supported for REST, falling back to "
+ "ZAPI.", name)
+ # Don't use self.zapi_client to avoid reentrant call to __getattr__()
+ zapi_client = object.__getattribute__(self, 'zapi_client')
+ return getattr(zapi_client, name)
+
+ def _wait_job_result(self, job_url):
+ """Waits for a job to finish."""
+
+ interval = 2
+ retries = (self.async_rest_timeout / interval)
+
+ @utils.retry(netapp_api.NaRetryableError, interval=interval,
+ retries=retries, backoff_rate=1)
+ def _waiter():
+ response = self.send_request(job_url, 'get',
+ enable_tunneling=False)
+
+ job_state = response.get('state')
+ if job_state == 'success':
+ return response
+ elif job_state == 'failure':
+ message = response['error']['message']
+ code = response['error']['code']
+ raise netapp_api.NaApiError(message=message, code=code)
+
+ msg_args = {'job': job_url, 'state': job_state}
+ LOG.debug("Job %(job)s has not finished: %(state)s", msg_args)
+ raise netapp_api.NaRetryableError(message='Job is running.')
+
+ try:
+ return _waiter()
+ except netapp_api.NaRetryableError:
+ msg = _("Job %s did not reach the expected state. Retries "
+ "exhausted. Aborting.") % job_url
+ raise na_utils.NetAppDriverException(msg)
+
+ def send_request(self, action_url, method, body=None, query=None,
+ enable_tunneling=True,
+ max_page_length=DEFAULT_MAX_PAGE_LENGTH,
+ wait_on_accepted=True):
+
+ """Sends REST request to ONTAP.
+
+ :param action_url: action URL for the request
+ :param method: HTTP method for the request ('get', 'post', 'put',
+ 'delete' or 'patch')
+ :param body: dict of arguments to be passed as request body
+ :param query: dict of arguments to be passed as query string
+ :param enable_tunneling: enable tunneling to the ONTAP host
+ :param max_page_length: size of the page during pagination
+ :param wait_on_accepted: if True, wait until the job finishes when
+ HTTP code 202 (Accepted) is returned
+
+ :returns: parsed REST response
+ """
+
+ response = None
+
+ if method == 'get':
+ response = self.get_records(
+ action_url, query, enable_tunneling, max_page_length)
+ else:
+ code, response = self.connection.invoke_successfully(
+ action_url, method, body=body, query=query,
+ enable_tunneling=enable_tunneling)
+
+ if code == HTTP_ACCEPTED and wait_on_accepted:
+ # get job URL and discard '/api'
+ job_url = response['job']['_links']['self']['href'][4:]
+ response = self._wait_job_result(job_url)
+
+ return response
+
+ def get_records(self, action_url, query=None, enable_tunneling=True,
+ max_page_length=DEFAULT_MAX_PAGE_LENGTH):
+ """Retrieves ONTAP resources using pagination REST request.
+
+ :param action_url: action URL for the request
+ :param query: dict of arguments to be passed as query string
+ :param enable_tunneling: enable tunneling to the ONTAP host
+ :param max_page_length: size of the page during pagination
+
+ :returns: dict containing records and num_records
+ """
+
+ # Initialize query variable if it is None
+ query = query if query else {}
+ query['max_records'] = max_page_length
+
+ _, response = self.connection.invoke_successfully(
+ action_url, 'get', query=query,
+ enable_tunneling=enable_tunneling)
+
+ # NOTE(nahimsouza): if all records are returned in the first call,
+ # 'next_url' will be None.
+ next_url = response.get('_links', {}).get('next', {}).get('href')
+ next_url = next_url[4:] if next_url else None # discard '/api'
+
+ # Get remaining pages, saving data into first page
+ while next_url:
+ # NOTE(nahimsouza): clean the 'query', because the parameters are
+ # already included in 'next_url'.
+ _, next_response = self.connection.invoke_successfully(
+ next_url, 'get', query=None,
+ enable_tunneling=enable_tunneling)
+
+ response['num_records'] += next_response.get('num_records', 0)
+ response['records'].extend(next_response.get('records'))
+
+ next_url = (
+ next_response.get('_links', {}).get('next', {}).get('href'))
+ next_url = next_url[4:] if next_url else None # discard '/api'
+
+ return response
+
+ def get_ontap_version(self, cached=True):
+ """Gets the ONTAP version as tuple."""
+
+ if cached:
+ return self.connection.get_ontap_version()
+
+ query = {
+ 'fields': 'version'
+ }
+
+ response = self.send_request('/cluster/', 'get', query=query)
+
+ version = (response['version']['generation'],
+ response['version']['major'],
+ response['version']['minor'])
+
+ return version
+
+ def check_api_permissions(self):
+ """Check which APIs that support SSC functionality are available."""
+
+ inaccessible_apis = []
+ invalid_extra_specs = []
+
+ for api, extra_specs in SSC_API_MAP.items():
+ if not self.check_cluster_api(api):
+ inaccessible_apis.append(api)
+ invalid_extra_specs.extend(extra_specs)
+
+ if inaccessible_apis:
+ if '/storage/volumes' in inaccessible_apis:
+ msg = _('User not permitted to query Data ONTAP volumes.')
+ raise exception.VolumeBackendAPIException(data=msg)
+ else:
+ LOG.warning('The configured user account does not have '
+ 'sufficient privileges to use all needed '
+ 'APIs. The following extra specs will fail '
+ 'or be ignored: %s.', invalid_extra_specs)
+
+ return invalid_extra_specs
+
+ def check_cluster_api(self, api):
+ """Checks the availability of a cluster API.
+
+ Returns True if the specified cluster API exists and may be called by
+ the current user.
+ """
+ try:
+ # No need to return any records here since we just want to know if
+ # the user is allowed to make the request. A "Permission Denied"
+ # error code is expected in case user does not have the necessary
+ # permissions.
+ self.send_request('%s?return_records=false' % api, 'get',
+ enable_tunneling=False)
+ except netapp_api.NaApiError as ex:
+ # NOTE(nahimsouza): This function only returns false in case user
+ # is not authorized. If other error is returned, it must be
+ # handled in the function call that uses the same endpoint.
+ if ex.code == netapp_api.REST_UNAUTHORIZED:
+ return False
+
+ return True
+
+ def _get_cluster_nodes_info(self):
+ """Return a list of models of the nodes in the cluster."""
+ query_args = {'fields': 'model,'
+ 'name,'
+ 'is_all_flash_optimized,'
+ 'is_all_flash_select_optimized'}
+
+ nodes = []
+ try:
+ result = self.send_request('/cluster/nodes', 'get',
+ query=query_args,
+ enable_tunneling=False)
+
+ for record in result['records']:
+ node = {
+ 'model': record['model'],
+ 'name': record['name'],
+ 'is_all_flash':
+ record['is_all_flash_optimized'],
+ 'is_all_flash_select':
+ record['is_all_flash_select_optimized']
+ }
+ nodes.append(node)
+ except netapp_api.NaApiError as e:
+ if e.code == netapp_api.REST_UNAUTHORIZED:
+ LOG.debug('Cluster nodes can only be collected with '
+ 'cluster scoped credentials.')
+ else:
+ LOG.exception('Failed to get the cluster nodes.')
+
+ return nodes
+
+ def list_flexvols(self):
+ """Returns the names of the flexvols on the controller."""
+
+ query = {
+ 'type': 'rw',
+ 'style': 'flex*', # Match both 'flexvol' and 'flexgroup'
+ 'is_svm_root': 'false',
+ 'error_state.is_inconsistent': 'false',
+ 'state': 'online',
+ 'fields': 'name'
+ }
+
+ response = self.send_request(
+ '/storage/volumes/', 'get', query=query)
+
+ records = response.get('records', [])
+ volumes = [volume['name'] for volume in records]
+
+ return volumes
+
+ def _get_unique_volume(self, records):
+ """Get the unique FlexVol or FlexGroup volume from a volume list."""
+ if len(records) != 1:
+ msg = _('Could not find unique volume. Volumes found: %(vol)s.')
+ msg_args = {'vol': records}
+ raise exception.VolumeBackendAPIException(data=msg % msg_args)
+
+ return records[0]
+
+ def _get_volume_by_args(self, vol_name=None, vol_path=None,
+ vserver=None, fields=None):
+ """Get info from a single volume according to the args."""
+
+ query = {
+ 'type': 'rw',
+ 'style': 'flex*', # Match both 'flexvol' and 'flexgroup'
+ 'is_svm_root': 'false',
+ 'error_state.is_inconsistent': 'false',
+ 'state': 'online',
+ 'fields': 'name,style'
+ }
+
+ if vol_name:
+ query['name'] = vol_name
+ if vol_path:
+ query['nas.path'] = vol_path
+ if vserver:
+ query['svm.name'] = vserver
+ if fields:
+ query['fields'] = fields
+
+ volumes_response = self.send_request(
+ '/storage/volumes/', 'get', query=query)
+
+ records = volumes_response.get('records', [])
+ volume = self._get_unique_volume(records)
+ return volume
+
+ def get_flexvol(self, flexvol_path=None, flexvol_name=None):
+ """Get flexvol attributes needed for the storage service catalog."""
+
+ fields = ('aggregates.name,name,svm.name,nas.path,'
+ 'type,guarantee.honored,guarantee.type,'
+ 'space.snapshot.reserve_percent,space.size,'
+ 'qos.policy.name,snapshot_policy,language,style')
+ unique_volume = self._get_volume_by_args(
+ vol_name=flexvol_name, vol_path=flexvol_path, fields=fields)
+
+ aggregate = None
+ if unique_volume['style'] == 'flexvol':
+ # flexvol has only 1 aggregate
+ aggregate = unique_volume['aggregates'][0]['name']
+ else:
+ aggregate = [aggr["name"]
+ for aggr in unique_volume.get('aggregates', [])]
+
+ qos_policy_group = (
+ unique_volume.get('qos', {}).get('policy', {}).get('name'))
+
+ volume = {
+ 'name': unique_volume['name'],
+ 'vserver': unique_volume['svm']['name'],
+ 'junction-path': unique_volume.get('nas', {}).get('path'),
+ 'aggregate': aggregate,
+ 'type': unique_volume['type'],
+ 'space-guarantee-enabled': unique_volume['guarantee']['honored'],
+ 'space-guarantee': unique_volume['guarantee']['type'],
+ 'percentage-snapshot-reserve':
+ str(unique_volume['space']['snapshot']['reserve_percent']),
+ 'size': str(unique_volume['space']['size']),
+ 'qos-policy-group': qos_policy_group,
+ 'snapshot-policy': unique_volume['snapshot_policy']['name'],
+ 'language': unique_volume['language'],
+ 'style-extended': unique_volume['style'],
+ }
+
+ return volume
+
+ def is_flexvol_mirrored(self, flexvol_name, vserver_name):
+ """Check if flexvol is a SnapMirror source."""
+
+ query = {
+ 'source.path': vserver_name + ':' + flexvol_name,
+ 'state': 'snapmirrored',
+ 'return_records': 'false',
+ }
+
+ try:
+ response = self.send_request('/snapmirror/relationships/',
+ 'get', query=query)
+ return response['num_records'] > 0
+ except netapp_api.NaApiError:
+ LOG.exception('Failed to get SnapMirror info for volume %s.',
+ flexvol_name)
+
+ return False
+
+ def is_flexvol_encrypted(self, flexvol_name, vserver_name):
+ """Check if a flexvol is encrypted."""
+
+ if not self.features.FLEXVOL_ENCRYPTION:
+ return False
+
+ query = {
+ 'encryption.enabled': 'true',
+ 'name': flexvol_name,
+ 'svm.name': vserver_name,
+ 'return_records': 'false',
+ }
+
+ try:
+ response = self.send_request(
+ '/storage/volumes/', 'get', query=query)
+ return response['num_records'] > 0
+ except netapp_api.NaApiError:
+ LOG.exception('Failed to get Encryption info for volume %s.',
+ flexvol_name)
+
+ return False
+
+ def get_aggregate_disk_types(self, aggregate_name):
+ """Get the disk type(s) of an aggregate."""
+ disk_types = self._get_aggregate_disk_types(aggregate_name)
+ return list(disk_types) if disk_types else None
+
+ def _get_aggregate_disk_types(self, aggregate_name):
+ """Get the disk type(s) of an aggregate"""
+
+ disk_types = set()
+
+ query = {
+ 'aggregates.name': aggregate_name,
+ 'fields': 'effective_type'
+ }
+
+ try:
+ response = self.send_request(
+ '/storage/disks', 'get', query=query, enable_tunneling=False)
+ except netapp_api.NaApiError:
+ LOG.exception('Failed to get disk info for aggregate %s.',
+ aggregate_name)
+ return disk_types
+
+ for storage_disk_info in response['records']:
+ disk_types.add(storage_disk_info['effective_type'])
+
+ return disk_types
+
+ def _get_aggregates(self, aggregate_names=None, fields=None):
+
+ query = {}
+ if aggregate_names:
+ query['name'] = ','.join(aggregate_names)
+
+ if fields:
+ query['fields'] = fields
+
+ response = self.send_request(
+ '/storage/aggregates', 'get', query=query, enable_tunneling=False)
+
+ return response['records']
+
+ def get_aggregate(self, aggregate_name):
+ """Get aggregate attributes needed for the storage service catalog."""
+
+ if not aggregate_name:
+ return {}
+
+ fields = ('name,block_storage.primary.raid_type,'
+ 'block_storage.storage_type,home_node.name')
+
+ try:
+ aggrs = self._get_aggregates(aggregate_names=[aggregate_name],
+ fields=fields)
+ except netapp_api.NaApiError:
+ LOG.exception('Failed to get info for aggregate %s.',
+ aggregate_name)
+ return {}
+
+ if len(aggrs) < 1:
+ return {}
+
+ aggr_attributes = aggrs[0]
+
+ aggregate = {
+ 'name': aggr_attributes['name'],
+ 'raid-type':
+ aggr_attributes['block_storage']['primary']['raid_type'],
+ 'is-hybrid':
+ aggr_attributes['block_storage']['storage_type'] == 'hybrid',
+ 'node-name': aggr_attributes['home_node']['name'],
+ }
+
+ return aggregate
+
+ def is_qos_min_supported(self, is_nfs, node_name):
+ """Check if the node supports QoS minimum."""
+ if node_name is None:
+ # whether no access to node name (SVM account or error), the QoS
+ # min support is dropped.
+ return False
+
+ qos_min_name = na_utils.qos_min_feature_name(is_nfs, node_name)
+ return getattr(self.features, qos_min_name, False).__bool__()
+
+ def get_flexvol_dedupe_info(self, flexvol_name):
+ """Get dedupe attributes needed for the storage service catalog."""
+
+ query = {
+ 'efficiency.volume_path': '/vol/%s' % flexvol_name,
+ 'fields': 'efficiency.state,efficiency.compression'
+ }
+
+ # Set default values for the case there is no response.
+ no_dedupe_response = {
+ 'compression': False,
+ 'dedupe': False,
+ 'logical-data-size': 0,
+ 'logical-data-limit': 1,
+ }
+
+ try:
+ response = self.send_request('/storage/volumes',
+ 'get', query=query)
+ except netapp_api.NaApiError:
+ LOG.exception('Failed to get dedupe info for volume %s.',
+ flexvol_name)
+ return no_dedupe_response
+
+ if response["num_records"] != 1:
+ return no_dedupe_response
+
+ state = response["records"][0]["efficiency"]["state"]
+ compression = response["records"][0]["efficiency"]["compression"]
+
+ # TODO(nahimsouza): as soon as REST API supports the fields
+ # 'logical-data-size and 'logical-data-limit', we should include
+ # them in the query and set them correctly.
+ # NOTE(nahimsouza): these fields are only used by the client function
+ # `get_flexvol_dedupe_used_percent`, since the function is not
+ # implemented on REST yet, the below hard-coded fields are not
+ # affecting the driver in anyway.
+ logical_data_size = 0
+ logical_data_limit = 1
+
+ dedupe_info = {
+ 'compression': False if compression == "none" else True,
+ 'dedupe': False if state == "disabled" else True,
+ 'logical-data-size': logical_data_size,
+ 'logical-data-limit': logical_data_limit,
+ }
+
+ return dedupe_info
+
+ def get_lun_list(self):
+ """Gets the list of LUNs on filer.
+
+ Gets the LUNs from cluster with vserver.
+ """
+
+ query = {
+ 'svm.name': self.vserver,
+ 'fields': 'svm.name,location.volume.name,space.size,'
+ 'location.qtree.name,name,os_type,'
+ 'space.guarantee.requested,uuid'
+ }
+
+ response = self.send_request(
+ '/storage/luns/', 'get', query=query)
+
+ if response['num_records'] == '0':
+ return []
+
+ lun_list = []
+ for lun in response['records']:
+ lun_info = {}
+ lun_info['Vserver'] = lun['svm']['name']
+ lun_info['Volume'] = lun['location']['volume']['name']
+ lun_info['Size'] = lun['space']['size']
+ lun_info['Qtree'] = \
+ lun['location'].get('qtree', {}).get('name', '')
+ lun_info['Path'] = lun['name']
+ lun_info['OsType'] = lun['os_type']
+ lun_info['SpaceReserved'] = lun['space']['guarantee']['requested']
+ lun_info['UUID'] = lun['uuid']
+
+ lun_list.append(lun_info)
+
+ return lun_list
+
+ def get_lun_by_args(self, **lun_info_args):
+ """Retrieves LUN with specified args."""
+
+ query = {
+ 'fields': 'svm.name,location.volume.name,space.size,'
+ 'location.qtree.name,name,os_type,'
+ 'space.guarantee.requested,uuid'
+ }
+
+ if lun_info_args:
+ if 'vserver' in lun_info_args:
+ query['svm.name'] = lun_info_args['vserver']
+ if 'path' in lun_info_args:
+ query['name'] = lun_info_args['path']
+ if 'uuid' in lun_info_args:
+ query['uuid'] = lun_info_args['uuid']
+
+ response = self.send_request(
+ '/storage/luns/', 'get', query=query)
+
+ if response['num_records'] == '0':
+ return []
+
+ lun_list = []
+ for lun in response['records']:
+ lun_info = {}
+ lun_info['Vserver'] = lun['svm']['name']
+ lun_info['Volume'] = lun['location']['volume']['name']
+ lun_info['Size'] = lun['space']['size']
+ lun_info['Qtree'] = \
+ lun['location'].get('qtree', {}).get('name', '')
+ lun_info['Path'] = lun['name']
+ lun_info['OsType'] = lun['os_type']
+ lun_info['SpaceReserved'] = lun['space']['guarantee']['requested']
+ lun_info['UUID'] = lun['uuid']
+
+ # NOTE(nahimsouza): Currently, ONTAP REST API does not have the
+ # 'block-size' in the response. By default, we are setting its
+ # value to 512, since traditional block size advertised by hard
+ # disks is 512 bytes.
+ lun_info['BlockSize'] = 512
+
+ lun_list.append(lun_info)
+
+ return lun_list
+
+ def get_lun_sizes_by_volume(self, volume_name):
+ """"Gets the list of LUNs and their sizes from a given volume name"""
+
+ query = {
+ 'location.volume.name': volume_name,
+ 'fields': 'space.size,name'
+ }
+
+ response = self.send_request('/storage/luns/', 'get', query=query)
+
+ if response['num_records'] == '0':
+ return []
+
+ luns = []
+ for lun_info in response['records']:
+ luns.append({
+ 'path': lun_info.get('name', ''),
+ 'size': float(lun_info.get('space', {}).get('size', 0))
+ })
+ return luns
+
+ def get_file_sizes_by_dir(self, dir_path):
+ """Gets the list of files and their sizes from a given directory."""
+
+ # 'dir_path' will always be a FlexVol name
+ volume = self._get_volume_by_args(vol_name=dir_path)
+
+ query = {
+ 'type': 'file',
+ 'fields': 'size,name'
+ }
+
+ vol_uuid = volume['uuid']
+ try:
+ response = self.send_request(
+ f'/storage/volumes/{vol_uuid}/files',
+ 'get', query=query)
+ except netapp_api.NaApiError as e:
+ if e.code == netapp_api.REST_NO_SUCH_FILE:
+ return []
+ else:
+ raise e
+
+ files = []
+ for file_info in response['records']:
+ files.append({
+ 'name': file_info.get('name', ''),
+ 'file-size': float(file_info.get('size', 0))
+ })
+ return files
+
+ def get_volume_state(self, junction_path=None, name=None):
+ """Returns volume state for a given name or junction path."""
+
+ query_args = {}
+
+ if name:
+ query_args['name'] = name
+ if junction_path:
+ query_args['nas.path'] = junction_path
+
+ query_args['fields'] = 'state'
+
+ response = self.send_request('/storage/volumes/',
+ 'get', query=query_args)
+ try:
+ records = response.get('records', [])
+ unique_volume = self._get_unique_volume(records)
+ except exception.VolumeBackendAPIException:
+ return None
+
+ return unique_volume['state']
+
+ def delete_snapshot(self, volume_name, snapshot_name):
+ """Deletes a volume snapshot."""
+ volume = self._get_volume_by_args(vol_name=volume_name)
+ self.send_request(
+ f'/storage/volumes/{volume["uuid"]}/snapshots'
+ f'?name={snapshot_name}', 'delete')
+
+ def get_operational_lif_addresses(self):
+ """Gets the IP addresses of operational LIFs on the vserver."""
+
+ query = {
+ 'state': 'up',
+ 'fields': 'ip.address',
+ }
+
+ response = self.send_request(
+ '/network/ip/interfaces/', 'get', query=query)
+
+ return [lif_info['ip']['address']
+ for lif_info in response['records']]
+
+ def _list_vservers(self):
+ """Get the names of vservers present"""
+ query = {
+ 'fields': 'name',
+ }
+ response = self.send_request('/svm/svms', 'get', query=query,
+ enable_tunneling=False)
+
+ return [svm['name'] for svm in response.get('records', [])]
+
+ def _get_ems_log_destination_vserver(self):
+ """Returns the best vserver destination for EMS messages."""
+
+ # NOTE(nahimsouza): Differently from ZAPI, only 'data' SVMs can be
+ # managed by the SVM REST APIs - that's why the vserver type is not
+ # specified.
+ vservers = self._list_vservers()
+
+ if vservers:
+ return vservers[0]
+
+ raise exception.NotFound("No Vserver found to receive EMS messages.")
+
+ def send_ems_log_message(self, message_dict):
+ """Sends a message to the Data ONTAP EMS log."""
+
+ body = {
+ 'computer_name': message_dict['computer-name'],
+ 'event_source': message_dict['event-source'],
+ 'app_version': message_dict['app-version'],
+ 'category': message_dict['category'],
+ 'severity': 'notice',
+ 'autosupport_required': message_dict['auto-support'] == 'true',
+ 'event_id': message_dict['event-id'],
+ 'event_description': message_dict['event-description'],
+ }
+
+ bkp_connection = copy.copy(self.connection)
+ bkp_timeout = self.connection.get_timeout()
+ bkp_vserver = self.vserver
+
+ self.connection.set_timeout(25)
+ try:
+ # TODO(nahimsouza): Vserver is being set to replicate the ZAPI
+ # behavior, but need to check if this could be removed in REST API
+ self.connection.set_vserver(
+ self._get_ems_log_destination_vserver())
+ self.send_request('/support/ems/application-logs',
+ 'post', body=body)
+ LOG.debug('EMS executed successfully.')
+ except netapp_api.NaApiError as e:
+ LOG.warning('Failed to invoke EMS. %s', e)
+ finally:
+ # Restores the data
+ timeout = (
+ bkp_timeout if bkp_timeout is not None else DEFAULT_TIMEOUT)
+ self.connection.set_timeout(timeout)
+ self.connection = copy.copy(bkp_connection)
+ self.connection.set_vserver(bkp_vserver)
+
+ def get_performance_counter_info(self, object_name, counter_name):
+ """Gets info about one or more Data ONTAP performance counters."""
+
+ # NOTE(nahimsouza): This conversion is nedeed because different names
+ # are used in ZAPI and we want to avoid changes in the driver for now.
+ rest_counter_names = {
+ 'domain_busy': 'domain_busy_percent',
+ 'processor_elapsed_time': 'elapsed_time',
+ 'avg_processor_busy': 'average_processor_busy_percent',
+ }
+
+ rest_counter_name = counter_name
+ if counter_name in rest_counter_names:
+ rest_counter_name = rest_counter_names[counter_name]
+
+ # Get counter table info
+ query = {
+ 'counter_schemas.name': rest_counter_name,
+ 'fields': 'counter_schemas.*'
+ }
+
+ try:
+ table = self.send_request(
+ f'/cluster/counter/tables/{object_name}',
+ 'get', query=query, enable_tunneling=False)
+
+ name = counter_name # use the original name (ZAPI compatible)
+ base_counter = table['counter_schemas'][0]['denominator']['name']
+
+ query = {
+ 'counters.name': rest_counter_name,
+ 'fields': 'counters.*'
+ }
+
+ response = self.send_request(
+ f'/cluster/counter/tables/{object_name}/rows',
+ 'get', query=query, enable_tunneling=False)
+
+ table_rows = response.get('records', [])
+ labels = []
+ if len(table_rows) != 0:
+ labels = table_rows[0]['counters'][0].get('labels', [])
+
+ # NOTE(nahimsouza): Values have a different format on REST API
+ # and we want to keep compatibility with ZAPI for a while
+ if object_name == 'wafl' and counter_name == 'cp_phase_times':
+ # discard the prefix 'cp_'
+ labels = [label[3:] for label in labels]
+
+ return {
+ 'name': name,
+ 'labels': labels,
+ 'base-counter': base_counter,
+ }
+ except netapp_api.NaApiError:
+ raise exception.NotFound(_('Counter %s not found') % counter_name)
+
+ def get_performance_instance_uuids(self, object_name, node_name):
+ """Get UUIDs of performance instances for a cluster node."""
+
+ query = {
+ 'id': node_name + ':*',
+ }
+
+ response = self.send_request(
+ f'/cluster/counter/tables/{object_name}/rows',
+ 'get', query=query, enable_tunneling=False)
+
+ records = response.get('records', [])
+
+ uuids = []
+ for record in records:
+ uuids.append(record['id'])
+
+ return uuids
+
+ def get_performance_counters(self, object_name, instance_uuids,
+ counter_names):
+ """Gets more cDOT performance counters."""
+
+ # NOTE(nahimsouza): This conversion is nedeed because different names
+ # are used in ZAPI and we want to avoid changes in the driver for now.
+ rest_counter_names = {
+ 'domain_busy': 'domain_busy_percent',
+ 'processor_elapsed_time': 'elapsed_time',
+ 'avg_processor_busy': 'average_processor_busy_percent',
+ }
+
+ zapi_counter_names = {
+ 'domain_busy_percent': 'domain_busy',
+ 'elapsed_time': 'processor_elapsed_time',
+ 'average_processor_busy_percent': 'avg_processor_busy',
+ }
+
+ for i in range(len(counter_names)):
+ if counter_names[i] in rest_counter_names:
+ counter_names[i] = rest_counter_names[counter_names[i]]
+
+ query = {
+ 'id': '|'.join(instance_uuids),
+ 'counters.name': '|'.join(counter_names),
+ 'fields': 'id,counter_table.name,counters.*',
+ }
+
+ response = self.send_request(
+ f'/cluster/counter/tables/{object_name}/rows',
+ 'get', query=query, enable_tunneling=False)
+
+ counter_data = []
+ for record in response.get('records', []):
+ for counter in record['counters']:
+
+ counter_name = counter['name']
+
+ # Reverts the name conversion
+ if counter_name in zapi_counter_names:
+ counter_name = zapi_counter_names[counter_name]
+
+ counter_value = ''
+ if counter.get('value'):
+ counter_value = counter.get('value')
+ elif counter.get('values'):
+ # NOTE(nahimsouza): Conversion made to keep compatibility
+ # with old ZAPI format
+ values = counter.get('values')
+ counter_value = ','.join([str(v) for v in values])
+
+ counter_data.append({
+ 'instance-name': record['counter_table']['name'],
+ 'instance-uuid': record['id'],
+ 'node-name': record['id'].split(':')[0],
+ 'timestamp': int(time()),
+ counter_name: counter_value,
+ })
+
+ return counter_data
+
+ def get_aggregate_capacities(self, aggregate_names):
+ """Gets capacity info for multiple aggregates."""
+
+ if not isinstance(aggregate_names, list):
+ return {}
+
+ aggregates = {}
+ for aggregate_name in aggregate_names:
+ aggregates[aggregate_name] = self._get_aggregate_capacity(
+ aggregate_name)
+
+ return aggregates
+
+ def _get_aggregate_capacity(self, aggregate_name):
+ """Gets capacity info for an aggregate."""
+
+ fields = ('space.block_storage.available,space.block_storage.size,'
+ 'space.block_storage.used')
+
+ try:
+ aggrs = self._get_aggregates(aggregate_names=[aggregate_name],
+ fields=fields)
+
+ result = {}
+ if len(aggrs) > 0:
+ aggr = aggrs[0]
+
+ available = float(aggr['space']['block_storage']['available'])
+ total = float(aggr['space']['block_storage']['size'])
+ used = float(aggr['space']['block_storage']['used'])
+ percent_used = int((used * 100) // total)
+
+ result = {
+ 'percent-used': percent_used,
+ 'size-available': available,
+ 'size-total': total,
+ }
+
+ return result
+ except netapp_api.NaApiError as e:
+ if (e.code == netapp_api.REST_API_NOT_FOUND or
+ e.code == netapp_api.REST_UNAUTHORIZED):
+ LOG.debug('Aggregate capacity can only be collected with '
+ 'cluster scoped credentials.')
+ else:
+ LOG.exception('Failed to get info for aggregate %s.',
+ aggregate_name)
+ return {}
+
+ def get_node_for_aggregate(self, aggregate_name):
+ """Get home node for the specified aggregate.
+
+ This API could return None, most notably if it was sent
+ to a Vserver LIF, so the caller must be able to handle that case.
+ """
+
+ if not aggregate_name:
+ return None
+
+ fields = 'home_node.name'
+ try:
+ aggrs = self._get_aggregates(aggregate_names=[aggregate_name],
+ fields=fields)
+ node = None
+ if len(aggrs) > 0:
+ aggr = aggrs[0]
+ node = aggr['home_node']['name']
+
+ return node
+ except netapp_api.NaApiError as e:
+ if e.code == netapp_api.REST_API_NOT_FOUND:
+ return None
+ else:
+ raise e
+
+ def provision_qos_policy_group(self, qos_policy_group_info,
+ qos_min_support):
+ """Create QoS policy group on the backend if appropriate."""
+ if qos_policy_group_info is None:
+ return
+
+ # Legacy QoS uses externally provisioned QoS policy group,
+ # so we don't need to create one on the backend.
+ legacy = qos_policy_group_info.get('legacy')
+ if legacy:
+ return
+
+ spec = qos_policy_group_info.get('spec')
+
+ if not spec:
+ return
+
+ is_adaptive = na_utils.is_qos_policy_group_spec_adaptive(
+ qos_policy_group_info)
+ self._validate_qos_policy_group(is_adaptive, spec=spec,
+ qos_min_support=qos_min_support)
+
+ qos_policy_group = self._get_qos_first_policy_group_by_name(
+ spec['policy_name'])
+
+ if not qos_policy_group:
+ self._create_qos_policy_group(spec, is_adaptive)
+ else:
+ self._modify_qos_policy_group(spec, is_adaptive,
+ qos_policy_group)
+
+ def _get_qos_first_policy_group_by_name(self, qos_policy_group_name):
+ records = self._get_qos_policy_group_by_name(qos_policy_group_name)
+ if len(records) == 0:
+ return None
+
+ return records[0]
+
+ def _get_qos_policy_group_by_name(self, qos_policy_group_name):
+ query = {'name': qos_policy_group_name}
+
+ response = self.send_request('/storage/qos/policies/',
+ 'get', query=query)
+
+ records = response.get('records')
+ if not records:
+ return []
+
+ return records
+
+ def _qos_spec_to_api_args(self, spec, is_adaptive, vserver=None):
+ """Convert a QoS spec to REST args."""
+ rest_args = {}
+ if is_adaptive:
+ rest_args['adaptive'] = {}
+ if spec.get('absolute_min_iops'):
+ rest_args['adaptive']['absolute_min_iops'] = (
+ self._sanitize_qos_spec_value(
+ spec.get('absolute_min_iops')))
+ if spec.get('expected_iops'):
+ rest_args['adaptive']['expected_iops'] = (
+ self._sanitize_qos_spec_value(spec.get('expected_iops')))
+ if spec.get('expected_iops_allocation'):
+ rest_args['adaptive']['expected_iops_allocation'] = (
+ spec.get('expected_iops_allocation'))
+ if spec.get('peak_iops'):
+ rest_args['adaptive']['peak_iops'] = (
+ self._sanitize_qos_spec_value(spec.get('peak_iops')))
+ if spec.get('peak_iops_allocation'):
+ rest_args['adaptive']['peak_iops_allocation'] = (
+ spec.get('peak_iops_allocation'))
+ if spec.get('block_size'):
+ rest_args['adaptive']['block_size'] = (
+ spec.get('block_size'))
+ else:
+ rest_args['fixed'] = {}
+ qos_max = spec.get('max_throughput')
+ if qos_max and 'iops' in qos_max:
+ rest_args['fixed']['max_throughput_iops'] = (
+ self._sanitize_qos_spec_value(qos_max))
+ elif qos_max:
+ # Convert from B/s to MB/s
+ value = math.ceil(
+ self._sanitize_qos_spec_value(qos_max) / (10**6))
+ rest_args['fixed']['max_throughput_mbps'] = value
+
+ qos_min = spec.get('min_throughput')
+ if qos_min and 'iops' in qos_min:
+ rest_args['fixed']['min_throughput_iops'] = (
+ self._sanitize_qos_spec_value(qos_min))
+
+ if spec.get('policy_name'):
+ rest_args['name'] = spec.get('policy_name')
+ if spec.get('return_record'):
+ rest_args['return_records'] = spec.get('return_record')
+
+ if vserver:
+ rest_args['svm'] = {}
+ rest_args['svm']['name'] = vserver
+
+ return rest_args
+
+ def _sanitize_qos_spec_value(self, value):
+ value = value.lower()
+ value = value.replace('iops', '').replace('b/s', '')
+ value = int(value)
+ return value
+
+ def _create_qos_policy_group(self, spec, is_adaptive):
+ """Creates a QoS policy group."""
+ body = self._qos_spec_to_api_args(
+ spec, is_adaptive, vserver=self.vserver)
+
+ self.send_request('/storage/qos/policies/', 'post', body=body,
+ enable_tunneling=False)
+
+ def _modify_qos_policy_group(self, spec, is_adaptive, qos_policy_group):
+ """Modifies a QoS policy group."""
+ body = self._qos_spec_to_api_args(spec, is_adaptive)
+ if qos_policy_group['name'] == body['name']:
+ body.pop('name')
+
+ self.send_request(
+ f'/storage/qos/policies/{qos_policy_group["uuid"]}', 'patch',
+ body=body, enable_tunneling=False)
+
+ def get_vol_by_junc_vserver(self, vserver, junction):
+ """Gets the volume by junction path and vserver."""
+ volume = self._get_volume_by_args(vol_path=junction, vserver=vserver)
+ return volume['name']
+
+ def file_assign_qos(self, flex_vol, qos_policy_group_name,
+ qos_policy_group_is_adaptive, file_path):
+ """Assigns the named QoS policy-group to a file."""
+ volume = self._get_volume_by_args(flex_vol)
+ body = {
+ 'qos_policy.name': qos_policy_group_name
+ }
+
+ self.send_request(
+ f'/storage/volumes/{volume["uuid"]}/files/{file_path}',
+ 'patch', body=body, enable_tunneling=False)
+
+ def mark_qos_policy_group_for_deletion(self, qos_policy_group_info,
+ is_adaptive=False):
+ """Soft delete a QoS policy group backing a cinder volume."""
+ if qos_policy_group_info is None:
+ return
+
+ spec = qos_policy_group_info.get('spec')
+
+ # For cDOT we want to delete the QoS policy group that we created for
+ # this cinder volume. Because the QoS policy may still be "in use"
+ # after the zapi call to delete the volume itself returns successfully,
+ # we instead rename the QoS policy group using a specific pattern and
+ # later attempt on a best effort basis to delete any QoS policy groups
+ # matching that pattern.
+ if spec:
+ current_name = spec['policy_name']
+ new_name = DELETED_PREFIX + current_name
+ try:
+ self._rename_qos_policy_group(current_name, new_name)
+ except netapp_api.NaApiError as ex:
+ LOG.warning('Rename failure in cleanup of cDOT QoS policy '
+ 'group %(current_name)s: %(ex)s',
+ {'current_name': current_name, 'ex': ex})
+
+ # Attempt to delete any QoS policies named "delete-openstack-*".
+ self.remove_unused_qos_policy_groups()
+
+ def delete_file(self, path_to_file):
+ """Delete file at path."""
+ LOG.debug('Deleting file: %s', path_to_file)
+
+ volume_name = path_to_file.split('/')[2]
+ relative_path = '/'.join(path_to_file.split('/')[3:])
+ volume = self._get_volume_by_args(volume_name)
+
+ # Path requires "%2E" to represent "." and "%2F" to represent "/".
+ relative_path = relative_path.replace('.', '%2E').replace('/', '%2F')
+
+ self.send_request(f'/storage/volumes/{volume["uuid"]}'
+ + f'/files/{relative_path}', 'delete')
+
+ def _rename_qos_policy_group(self, qos_policy_group_name, new_name):
+ """Renames a QoS policy group."""
+ body = {'name': new_name}
+ query = {'name': qos_policy_group_name}
+ self.send_request('/storage/qos/policies/', 'patch', body=body,
+ query=query, enable_tunneling=False)
+
+ def remove_unused_qos_policy_groups(self):
+ """Deletes all QoS policy groups that are marked for deletion."""
+ query = {'name': f'{DELETED_PREFIX}*'}
+ self.send_request('/storage/qos/policies', 'delete', query=query)
+
+ def create_lun(self, volume_name, lun_name, size, metadata,
+ qos_policy_group_name=None,
+ qos_policy_group_is_adaptive=False):
+ """Issues API request for creating LUN on volume."""
+ self._validate_qos_policy_group(qos_policy_group_is_adaptive)
+
+ path = f'/vol/{volume_name}/{lun_name}'
+ space_reservation = metadata['SpaceReserved']
+ initial_size = size
+
+ body = {
+ 'name': path,
+ 'space.size': str(initial_size),
+ 'os_type': metadata['OsType'],
+ 'space.guarantee.requested': space_reservation
+ }
+
+ if qos_policy_group_name:
+ body['qos_policy.name'] = qos_policy_group_name
+
+ try:
+ self.send_request('/storage/luns', 'post', body=body)
+ except netapp_api.NaApiError as ex:
+ with excutils.save_and_reraise_exception():
+ LOG.error('Error provisioning volume %(lun_name)s on '
+ '%(volume_name)s. Details: %(ex)s',
+ {
+ 'lun_name': lun_name,
+ 'volume_name': volume_name,
+ 'ex': ex,
+ })
+
+ def do_direct_resize(self, path, new_size_bytes, force=True):
+ """Resize the LUN."""
+ seg = path.split("/")
+ LOG.info('Resizing LUN %s directly to new size.', seg[-1])
+
+ body = {'name': path, 'space.size': new_size_bytes}
+
+ self._lun_update_by_path(path, body)
+
+ def _get_lun_by_path(self, path, fields=None):
+ query = {'name': path}
+
+ if fields:
+ query['fields'] = fields
+
+ response = self.send_request('/storage/luns', 'get', query=query)
+ records = response.get('records', [])
+
+ return records
+
+ def _get_first_lun_by_path(self, path, fields=None):
+ records = self._get_lun_by_path(path, fields=fields)
+ if len(records) == 0:
+ return None
+
+ return records[0]
+
+ def _lun_update_by_path(self, path, body):
+ """Update the LUN."""
+ lun = self._get_first_lun_by_path(path)
+
+ if not lun:
+ raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)
+
+ self.send_request(f'/storage/luns/{lun["uuid"]}', 'patch', body=body)
+
+ def _validate_qos_policy_group(self, is_adaptive, spec=None,
+ qos_min_support=False):
+ if is_adaptive and not self.features.ADAPTIVE_QOS:
+ msg = _("Adaptive QoS feature requires ONTAP 9.4 or later.")
+ raise na_utils.NetAppDriverException(msg)
+
+ if not spec:
+ return
+
+ if 'min_throughput' in spec and not qos_min_support:
+ msg = 'min_throughput is not supported by this back end.'
+ raise na_utils.NetAppDriverException(msg)
+
+ def get_if_info_by_ip(self, ip):
+ """Gets the network interface info by ip."""
+ query_args = {}
+ query_args['ip.address'] = volume_utils.resolve_hostname(ip)
+ query_args['fields'] = 'svm'
+
+ result = self.send_request('/network/ip/interfaces/', 'get',
+ query=query_args, enable_tunneling=False)
+ num_records = result['num_records']
+ records = result.get('records', [])
+
+ if num_records == 0:
+ raise exception.NotFound(
+ _('No interface found on cluster for ip %s') % ip)
+
+ return [{'vserver': item['svm']['name']} for item in records]
+
+ def get_igroup_by_initiators(self, initiator_list):
+ """Get igroups exactly matching a set of initiators."""
+
+ igroup_list = []
+ if not initiator_list:
+ return igroup_list
+
+ query = {
+ 'svm.name': self.vserver,
+ 'initiators.name': ' '.join(initiator_list),
+ 'fields': 'name,protocol,os_type'
+ }
+
+ response = self.send_request('/protocols/san/igroups',
+ 'get', query=query)
+ records = response.get('records', [])
+ for igroup_item in records:
+ igroup = {'initiator-group-os-type': igroup_item['os_type'],
+ 'initiator-group-type': igroup_item['protocol'],
+ 'initiator-group-name': igroup_item['name']}
+ igroup_list.append(igroup)
+
+ return igroup_list
+
+ def add_igroup_initiator(self, igroup, initiator):
+ """Adds initiators to the specified igroup."""
+ query_initiator_uuid = {
+ 'name': igroup,
+ 'fields': 'uuid'
+ }
+
+ response_initiator_uuid = self.send_request(
+ '/protocols/san/igroups/', 'get', query=query_initiator_uuid)
+
+ response = response_initiator_uuid.get('records', [])
+ if len(response) < 1:
+ msg = _('Could not find igroup initiator.')
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ igroup_uuid = response[0]['uuid']
+
+ body = {
+ 'name': initiator
+ }
+
+ self.send_request('/protocols/san/igroups/' +
+ igroup_uuid + '/initiators',
+ 'post', body=body)
+
+ def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
+ """Creates igroup with specified args."""
+ body = {
+ 'name': igroup,
+ 'protocol': igroup_type,
+ 'os_type': os_type,
+ }
+ self.send_request('/protocols/san/igroups', 'post', body=body)
+
+ def map_lun(self, path, igroup_name, lun_id=None):
+ """Maps LUN to the initiator and returns LUN id assigned."""
+
+ body_post = {
+ 'lun.name': path,
+ 'igroup.name': igroup_name,
+ }
+
+ if lun_id is not None:
+ body_post['logical_unit_number'] = lun_id
+
+ try:
+ result = self.send_request('/protocols/san/lun-maps', 'post',
+ body=body_post,
+ query={'return_records': 'true'})
+ records = result.get('records')
+ lun_id_assigned = records[0].get('logical_unit_number')
+ return lun_id_assigned
+ except netapp_api.NaApiError as e:
+ code = e.code
+ message = e.message
+ LOG.warning('Error mapping LUN. Code :%(code)s, Message: '
+ '%(message)s', {'code': code, 'message': message})
+ raise
+
+ def get_lun_map(self, path):
+ """Gets the LUN map by LUN path."""
+ map_list = []
+
+ query = {
+ 'lun.name': path,
+ 'fields': 'igroup.name,logical_unit_number,svm.name',
+ }
+
+ response = self.send_request('/protocols/san/lun-maps',
+ 'get',
+ query=query)
+ num_records = response.get('num_records')
+ records = response.get('records', None)
+ if records is None or num_records is None:
+ return map_list
+
+ for element in records:
+ map_lun = {}
+ map_lun['initiator-group'] = element['igroup']['name']
+ map_lun['lun-id'] = element['logical_unit_number']
+ map_lun['vserver'] = element['svm']['name']
+ map_list.append(map_lun)
+
+ return map_list
+
+ def get_fc_target_wwpns(self):
+ """Gets the FC target details."""
+ wwpns = []
+ query = {
+ 'fields': 'wwpn'
+ }
+ response = self.send_request('/network/fc/interfaces',
+ 'get', query=query)
+
+ records = response.get('records')
+ for record in records:
+ wwpn = record.get('wwpn').lower()
+ wwpns.append(wwpn)
+
+ return wwpns
+
+ def unmap_lun(self, path, igroup_name):
+ """Unmaps a LUN from given initiator."""
+
+ # get lun amd igroup uuids
+ query_uuid = {
+ 'igroup.name': igroup_name,
+ 'lun.name': path,
+ 'fields': 'lun.uuid,igroup.uuid'
+ }
+
+ response_uuid = self.send_request(
+ '/protocols/san/lun-maps', 'get', query=query_uuid)
+
+ if response_uuid['num_records'] > 0:
+ lun_uuid = response_uuid['records'][0]['lun']['uuid']
+ igroup_uuid = response_uuid['records'][0]['igroup']['uuid']
+
+ try:
+ self.send_request(
+ f'/protocols/san/lun-maps/{lun_uuid}/{igroup_uuid}',
+ 'delete')
+ except netapp_api.NaApiError as e:
+ LOG.warning("Error unmapping LUN. Code: %(code)s, Message: "
+ "%(message)s", {'code': e.code,
+ 'message': e.message})
+ # if the LUN is already unmapped
+ if e.code == netapp_api.REST_NO_SUCH_LUN_MAP:
+ pass
+ else:
+ raise e
+ else:
+ # Input is invalid or LUN may already be unmapped
+ LOG.warning("Error unmapping LUN. Invalid input.")
+
+ def has_luns_mapped_to_initiators(self, initiator_list):
+ """Checks whether any LUNs are mapped to the given initiator(s)."""
+ query = {
+ 'initiators.name': ' '.join(initiator_list),
+ 'fields': 'lun_maps'
+ }
+
+ response = self.send_request('/protocols/san/igroups',
+ 'get', query=query)
+
+ records = response.get('records', [])
+ if len(records) > 0:
+ for record in records:
+ lun_maps = record.get('lun_maps', [])
+ if len(lun_maps) > 0:
+ return True
+
+ return False
+
+ def get_iscsi_service_details(self):
+ """Returns iscsi iqn."""
+ query = {
+ 'fields': 'target.name'
+ }
+ response = self.send_request(
+ '/protocols/san/iscsi/services', 'get', query=query)
+ records = response.get('records')
+ if records:
+ return records[0]['target']['name']
+
+ LOG.debug('No iSCSI service found for vserver %s', self.vserver)
+ return None
+
+ def check_iscsi_initiator_exists(self, iqn):
+ """Returns True if initiator exists."""
+ endpoint_url = '/protocols/san/iscsi/credentials'
+ initiator_exists = True
+ try:
+ query = {
+ 'initiator': iqn,
+ }
+ response = self.send_request(endpoint_url, 'get', query=query)
+ records = response.get('records')
+ if not records:
+ initiator_exists = False
+
+ except netapp_api.NaApiError:
+ initiator_exists = False
+
+ return initiator_exists
+
+ def set_iscsi_chap_authentication(self, iqn, username, password):
+ """Provides NetApp host's CHAP credentials to the backend."""
+ initiator_exists = self.check_iscsi_initiator_exists(iqn)
+
+ command_template = ('iscsi security %(mode)s -vserver %(vserver)s '
+ '-initiator-name %(iqn)s -auth-type CHAP '
+ '-user-name %(username)s')
+
+ if initiator_exists:
+ LOG.debug('Updating CHAP authentication for %(iqn)s.',
+ {'iqn': iqn})
+ command = command_template % {
+ 'mode': 'modify',
+ 'vserver': self.vserver,
+ 'iqn': iqn,
+ 'username': username,
+ }
+ else:
+ LOG.debug('Adding initiator %(iqn)s with CHAP authentication.',
+ {'iqn': iqn})
+ command = command_template % {
+ 'mode': 'create',
+ 'vserver': self.vserver,
+ 'iqn': iqn,
+ 'username': username,
+ }
+
+ try:
+ with self.ssh_client.ssh_connect_semaphore:
+ ssh_pool = self.ssh_client.ssh_pool
+ with ssh_pool.item() as ssh:
+ self.ssh_client.execute_command_with_prompt(ssh,
+ command,
+ 'Password:',
+ password)
+ except Exception as e:
+ msg = _('Failed to set CHAP authentication for target IQN %(iqn)s.'
+ ' Details: %(ex)s') % {
+ 'iqn': iqn,
+ 'ex': e,
+ }
+ LOG.error(msg)
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ def get_iscsi_target_details(self):
+ """Gets the iSCSI target portal details."""
+ query = {
+ 'services': 'data_iscsi',
+ 'fields': 'ip.address,enabled'
+ }
+
+ response = self.send_request('/network/ip/interfaces',
+ 'get', query=query)
+
+ target_list = []
+ records = response.get('records', [])
+ for record in records:
+ details = dict()
+ details['address'] = record['ip']['address']
+ details['tpgroup-tag'] = None
+ details['interface-enabled'] = record['enabled']
+ # NOTE(nahimsouza): from ONTAP documentation:
+ # ONTAP does not support changing the port number for iSCSI.
+ # Port number 3260 is registered as part of the iSCSI specification
+ # and cannot be used by any other application or service.
+ details['port'] = 3260
+ target_list.append(details)
+
+ return target_list
+
+ def move_lun(self, path, new_path):
+ """Moves the LUN at path to new path."""
+ seg = path.split("/")
+ new_seg = new_path.split("/")
+ LOG.debug("Moving LUN %(name)s to %(new_name)s.",
+ {'name': seg[-1], 'new_name': new_seg[-1]})
+ query = {
+ 'svm.name': self.vserver,
+ 'name': path
+ }
+ body = {
+ 'name': new_path,
+ }
+ self.send_request('/storage/luns/', 'patch', query=query, body=body)
+
+ def clone_file(self, flex_vol, src_path, dest_path, vserver,
+ dest_exists=False, source_snapshot=None, is_snapshot=False):
+ """Clones file on vserver."""
+ LOG.debug('Cloning file - volume %(flex_vol)s, src %(src_path)s, '
+ 'dest %(dest_path)s, vserver %(vserver)s,'
+ 'source_snapshot %(source_snapshot)s',
+ {
+ 'flex_vol': flex_vol,
+ 'src_path': src_path,
+ 'dest_path': dest_path,
+ 'vserver': vserver,
+ 'source_snapshot': source_snapshot,
+ })
+
+ volume = self._get_volume_by_args(flex_vol)
+ body = {
+ 'volume': {
+ 'uuid': volume['uuid'],
+ 'name': volume['name']
+ },
+ 'source_path': src_path,
+ 'destination_path': dest_path,
+ }
+ if is_snapshot and self.features.BACKUP_CLONE_PARAM:
+ body['is_backup'] = True
+
+ if dest_exists:
+ body['overwrite_destination'] = True
+
+ self.send_request('/storage/file/clone', 'post', body=body)
+
+ def clone_lun(self, volume, name, new_name, space_reserved='true',
+ qos_policy_group_name=None, src_block=0, dest_block=0,
+ block_count=0, source_snapshot=None, is_snapshot=False,
+ qos_policy_group_is_adaptive=False):
+ """Clones lun on vserver."""
+ LOG.debug('Cloning lun - volume: %(volume)s, name: %(name)s, '
+ 'new_name: %(new_name)s, space_reserved: %(space_reserved)s,'
+ ' qos_policy_group_name: %(qos_policy_group_name)s',
+ {
+ 'volume': volume,
+ 'name': name,
+ 'new_name': new_name,
+ 'space_reserved': space_reserved,
+ 'qos_policy_group_name': qos_policy_group_name,
+ })
+
+ # NOTE(nahimsouza): some parameters are not available on REST API,
+ # but they are in the header just to keep compatilbility with ZAPI:
+ # src_block, dest_block, block_count, is_snapshot
+
+ self._validate_qos_policy_group(qos_policy_group_is_adaptive)
+
+ source_path = f'/vol/{volume}'
+ if source_snapshot:
+ source_path += f'/.snapshot/{source_snapshot}'
+ source_path += f'/{name}'
+ body = {
+ 'svm': {
+ 'name': self.vserver
+ },
+ 'name': f'/vol/{volume}/{new_name}',
+ 'clone': {
+ 'source': {
+ 'name': source_path,
+ }
+ },
+ 'space': {
+ 'guarantee': {
+ 'requested': space_reserved == 'true',
+ }
+ }
+ }
+
+ if qos_policy_group_name:
+ body['qos_policy'] = {'name': qos_policy_group_name}
+
+ self.send_request('/storage/luns', 'post', body=body)
+
+ def destroy_lun(self, path, force=True):
+ """Destroys the LUN at the path."""
+ query = {}
+ query['name'] = path
+ query['svm'] = self.vserver
+
+ if force:
+ query['allow_delete_while_mapped'] = 'true'
+
+ self.send_request('/storage/luns/', 'delete', query=query)
+
+ def get_flexvol_capacity(self, flexvol_path=None, flexvol_name=None):
+ """Gets total capacity and free capacity, in bytes, of the flexvol."""
+ fields = 'name,space.available,space.afs_total'
+ try:
+ volume = self._get_volume_by_args(
+ vol_name=flexvol_name, vol_path=flexvol_path, fields=fields)
+ capacity = {
+ 'size-total': float(volume['space']['afs_total']),
+ 'size-available': float(volume['space']['available']),
+ }
+ return capacity
+ except exception.VolumeBackendAPIException:
+ msg = _('Volume %s not found.')
+ msg_args = flexvol_path or flexvol_name
+ raise na_utils.NetAppDriverException(msg % msg_args)
+
+ def get_provisioning_options_from_flexvol(self, flexvol_name):
+ """Get a dict of provisioning options matching existing flexvol."""
+
+ flexvol_info = self.get_flexvol(flexvol_name=flexvol_name)
+ dedupe_info = self.get_flexvol_dedupe_info(flexvol_name)
+
+ provisioning_opts = {
+ 'aggregate': flexvol_info['aggregate'],
+ # space-guarantee can be 'none', 'file', 'volume'
+ 'space_guarantee_type': flexvol_info.get('space-guarantee'),
+ 'snapshot_policy': flexvol_info['snapshot-policy'],
+ 'language': flexvol_info['language'],
+ 'dedupe_enabled': dedupe_info['dedupe'],
+ 'compression_enabled': dedupe_info['compression'],
+ 'snapshot_reserve': flexvol_info['percentage-snapshot-reserve'],
+ 'volume_type': flexvol_info['type'],
+ 'size': int(math.ceil(float(flexvol_info['size']) / units.Gi)),
+ 'is_flexgroup': flexvol_info['style-extended'] == 'flexgroup',
+ }
+
+ return provisioning_opts
+
+ def flexvol_exists(self, volume_name):
+ """Checks if a flexvol exists on the storage array."""
+ LOG.debug('Checking if volume %s exists', volume_name)
+
+ query = {
+ 'name': volume_name,
+ 'return_records': 'false'
+ }
+
+ response = self.send_request('/storage/volumes/', 'get', query=query)
+
+ return response['num_records'] > 0
+
+ def create_volume_async(self, name, aggregate_list, size_gb,
+ space_guarantee_type=None, snapshot_policy=None,
+ language=None, dedupe_enabled=False,
+ compression_enabled=False, snapshot_reserve=None,
+ volume_type='rw'):
+ """Creates a volume asynchronously."""
+
+ body = {
+ 'name': name,
+ 'size': size_gb * units.Gi,
+ 'type': volume_type,
+ }
+
+ if isinstance(aggregate_list, list):
+ body['style'] = 'flexgroup'
+ body['aggregates'] = [{'name': aggr} for aggr in aggregate_list]
+ else:
+ body['style'] = 'flexvol'
+ body['aggregates'] = [{'name': aggregate_list}]
+
+ if volume_type == 'dp':
+ snapshot_policy = None
+ else:
+ body['nas'] = {'path': '/%s' % name}
+
+ if snapshot_policy is not None:
+ body['snapshot_policy'] = {'name': snapshot_policy}
+
+ if space_guarantee_type:
+ body['guarantee'] = {'type': space_guarantee_type}
+
+ if language is not None:
+ body['language'] = language
+
+ if snapshot_reserve is not None:
+ body['space'] = {
+ 'snapshot': {
+ 'reserve_percent': str(snapshot_reserve)
+ }
+ }
+
+ # cDOT compression requires that deduplication be enabled.
+ if dedupe_enabled or compression_enabled:
+ body['efficiency'] = {'dedupe': 'background'}
+
+ if compression_enabled:
+ body['efficiency']['compression'] = 'background'
+
+ response = self.send_request('/storage/volumes/', 'post', body=body,
+ wait_on_accepted=False)
+
+ job_info = {
+ 'status': None,
+ 'jobid': response["job"]["uuid"],
+ 'error-code': None,
+ 'error-message': None,
+ }
+
+ return job_info
+
+ def create_flexvol(self, flexvol_name, aggregate_name, size_gb,
+ space_guarantee_type=None, snapshot_policy=None,
+ language=None, dedupe_enabled=False,
+ compression_enabled=False, snapshot_reserve=None,
+ volume_type='rw'):
+ """Creates a flexvol asynchronously and return the job info."""
+
+ return self.create_volume_async(
+ flexvol_name, aggregate_name, size_gb,
+ space_guarantee_type=space_guarantee_type,
+ snapshot_policy=snapshot_policy, language=language,
+ dedupe_enabled=dedupe_enabled,
+ compression_enabled=compression_enabled,
+ snapshot_reserve=snapshot_reserve, volume_type=volume_type)
+
+ def enable_volume_dedupe_async(self, volume_name):
+ """Enable deduplication on FlexVol/FlexGroup volume asynchronously."""
+
+ query = {
+ 'name': volume_name,
+ 'fields': 'uuid,style',
+ }
+ body = {
+ 'efficiency': {'dedupe': 'background'}
+ }
+ self.send_request('/storage/volumes/', 'patch', body=body, query=query,
+ wait_on_accepted=False)
+
+ def enable_volume_compression_async(self, volume_name):
+ """Enable compression on FlexVol/FlexGroup volume asynchronously."""
+ query = {
+ 'name': volume_name
+ }
+ body = {
+ 'efficiency': {'compression': 'background'}
+ }
+ self.send_request('/storage/volumes/', 'patch', body=body, query=query,
+ wait_on_accepted=False)
+
+ def _parse_lagtime(self, time_str):
+ """Parse lagtime string (ISO 8601) into a number of seconds."""
+
+ fmt_str = 'PT'
+ if 'H' in time_str:
+ fmt_str += '%HH'
+ if 'M' in time_str:
+ fmt_str += '%MM'
+ if 'S' in time_str:
+ fmt_str += '%SS'
+
+ t = None
+ try:
+ t = datetime.strptime(time_str, fmt_str)
+ except Exception:
+ LOG.debug("Failed to parse lagtime: %s", time_str)
+ raise
+
+ # convert to timedelta to get the total seconds
+ td = timedelta(hours=t.hour, minutes=t.minute, seconds=t.second)
+ return td.total_seconds()
+
+ def _get_snapmirrors(self, source_vserver=None, source_volume=None,
+ destination_vserver=None, destination_volume=None):
+
+ fields = ['state', 'source.svm.name', 'source.path',
+ 'destination.svm.name', 'destination.path',
+ 'transfer.end_time', 'lag_time', 'healthy', 'uuid']
+
+ query = {}
+ query['fields'] = '{}'.format(','.join(f for f in fields))
+
+ query_src_vol = source_volume if source_volume else '*'
+ query_src_vserver = source_vserver if source_vserver else '*'
+ query['source.path'] = query_src_vserver + ':' + query_src_vol
+
+ query_dst_vol = destination_volume if destination_volume else '*'
+ query_dst_vserver = destination_vserver if destination_vserver else '*'
+ query['destination.path'] = query_dst_vserver + ':' + query_dst_vol
+
+ response = self.send_request(
+ '/snapmirror/relationships', 'get', query=query)
+
+ snapmirrors = []
+ for record in response.get('records', []):
+ snapmirrors.append({
+ 'relationship-status': record.get('state'),
+ 'mirror-state': record['state'],
+ 'source-vserver': record['source']['svm']['name'],
+ 'source-volume': (record['source']['path'].split(':')[1] if
+ record.get('source') else None),
+ 'destination-vserver': record['destination']['svm']['name'],
+ 'destination-volume': (
+ record['destination']['path'].split(':')[1]
+ if record.get('destination') else None),
+ 'last-transfer-end-timestamp':
+ (record['transfer']['end_time'] if
+ record.get('transfer', {}).get('end_time') else None),
+ 'lag-time': (self._parse_lagtime(record['lag_time']) if
+ record.get('lag_time') else None),
+ 'is-healthy': record['healthy'],
+ 'uuid': record['uuid']
+ })
+
+ return snapmirrors
+
+ def get_snapmirrors(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ desired_attributes=None):
+ """Gets one or more SnapMirror relationships.
+
+ Either the source or destination info may be omitted.
+ Desired attributes exists only to keep consistent with ZAPI client
+ signature and has no effect in the output.
+ """
+
+ snapmirrors = self._get_snapmirrors(
+ source_vserver=source_vserver,
+ source_volume=source_volume,
+ destination_vserver=destination_vserver,
+ destination_volume=destination_volume)
+
+ return snapmirrors
+
+ def create_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ schedule=None, policy=None,
+ relationship_type='data_protection'):
+ """Creates a SnapMirror relationship.
+
+ The schedule and relationship type is kept to avoid breaking
+ the API used by data_motion, but are not used on the REST API.
+
+ The schedule is part of the policy associated the relationship and the
+ relationship_type will be ignored because XDP is the only type
+ supported through REST API.
+ """
+
+ body = {
+ 'source': {
+ 'path': source_vserver + ':' + source_volume
+ },
+ 'destination': {
+ 'path': destination_vserver + ':' + destination_volume
+ }
+ }
+
+ if policy:
+ body['policy'] = {'name': policy}
+
+ try:
+ self.send_request('/snapmirror/relationships/', 'post', body=body)
+ except netapp_api.NaApiError as e:
+ if e.code != netapp_api.REST_ERELATION_EXISTS:
+ raise e
+
+ def _set_snapmirror_state(self, state, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ wait_result=True):
+ """Change the snapmirror state between two volumes."""
+
+ snapmirror = self.get_snapmirrors(source_vserver, source_volume,
+ destination_vserver,
+ destination_volume)
+
+ if not snapmirror:
+ msg = _('Failed to get information about relationship between '
+ 'source %(src_vserver)s:%(src_volume)s and '
+ 'destination %(dst_vserver)s:%(dst_volume)s.') % {
+ 'src_vserver': source_vserver,
+ 'src_volume': source_volume,
+ 'dst_vserver': destination_vserver,
+ 'dst_volume': destination_volume}
+ raise na_utils.NetAppDriverException(msg)
+
+ uuid = snapmirror[0]['uuid']
+ body = {'state': state}
+ result = self.send_request('/snapmirror/relationships/' + uuid,
+ 'patch', body=body,
+ wait_on_accepted=wait_result)
+ job = result['job']
+ job_info = {
+ 'operation-id': None,
+ 'status': None,
+ 'jobid': job.get('uuid'),
+ 'error-code': None,
+ 'error-message': None,
+ 'relationship-uuid': uuid,
+ }
+
+ return job_info
+
+ def initialize_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ source_snapshot=None, transfer_priority=None):
+ """Initializes a SnapMirror relationship."""
+
+ # TODO: Trigger a geometry exception to be caught by data_motion.
+ # This error is raised when using ZAPI with different volume component
+ # numbers, but in REST, the job must be checked sometimes before that
+ # error occurs.
+
+ return self._set_snapmirror_state(
+ 'snapmirrored', source_vserver, source_volume,
+ destination_vserver, destination_volume, wait_result=False)
+
+ def abort_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ clear_checkpoint=False):
+ """Stops ongoing transfers for a SnapMirror relationship."""
+
+ snapmirror = self.get_snapmirrors(source_vserver, source_volume,
+ destination_vserver,
+ destination_volume)
+ if not snapmirror:
+ msg = _('Failed to get information about relationship between '
+ 'source %(src_vserver)s:%(src_volume)s and '
+ 'destination %(dst_vserver)s:%(dst_volume)s.') % {
+ 'src_vserver': source_vserver,
+ 'src_volume': source_volume,
+ 'dst_vserver': destination_vserver,
+ 'dst_volume': destination_volume}
+ raise na_utils.NetAppDriverException(msg)
+
+ snapmirror_uuid = snapmirror[0]['uuid']
+
+ query = {'state': 'transferring'}
+ transfers = self.send_request('/snapmirror/relationships/' +
+ snapmirror_uuid + '/transfers/', 'get',
+ query=query)
+
+ if not transfers.get('records'):
+ raise netapp_api.NaApiError(
+ code=netapp_api.ENOTRANSFER_IN_PROGRESS)
+
+ body = {'state': 'hard_aborted' if clear_checkpoint else 'aborted'}
+
+ for transfer in transfers['records']:
+ transfer_uuid = transfer['uuid']
+ self.send_request('/snapmirror/relationships/' +
+ snapmirror_uuid + '/transfers/' +
+ transfer_uuid, 'patch', body=body)
+
+ def delete_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+
+ """Deletes an SnapMirror relationship on destination."""
+
+ query_uuid = {}
+ query_uuid['source.path'] = source_vserver + ':' + source_volume
+ query_uuid['destination.path'] = (destination_vserver + ':' +
+ destination_volume)
+ query_uuid['fields'] = 'uuid'
+
+ response = self.send_request('/snapmirror/relationships/', 'get',
+ query=query_uuid)
+
+ records = response.get('records')
+ if not records:
+ raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)
+
+ # 'destination_only' deletes the snapmirror on destination but does not
+ # release it on source.
+ query_delete = {"destination_only": "true"}
+
+ snapmirror_uuid = records[0].get('uuid')
+ self.send_request('/snapmirror/relationships/' +
+ snapmirror_uuid, 'delete',
+ query=query_delete)
+
+ def resume_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+
+ """Resume a SnapMirror relationship."""
+
+ query_uuid = {}
+ query_uuid['source.path'] = source_vserver + ':' + source_volume
+ query_uuid['destination.path'] = (destination_vserver + ':' +
+ destination_volume)
+ query_uuid['fields'] = 'uuid,policy.type'
+
+ response_snapmirrors = self.send_request('/snapmirror/relationships/',
+ 'get', query=query_uuid)
+
+ records = response_snapmirrors.get('records')
+ if not records:
+ raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)
+
+ snapmirror_uuid = records[0]['uuid']
+ snapmirror_policy = records[0]['policy']['type']
+
+ body_resync = {}
+ if snapmirror_policy == 'async':
+ body_resync['state'] = 'snapmirrored'
+ elif snapmirror_policy == 'sync':
+ body_resync['state'] = 'in_sync'
+
+ self.send_request('/snapmirror/relationships/' +
+ snapmirror_uuid, 'patch',
+ body=body_resync)
+
+ def release_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume,
+ relationship_info_only=False):
+ """Removes a SnapMirror relationship on the source endpoint."""
+
+ query_uuid = {}
+ query_uuid['list_destinations_only'] = 'true'
+ query_uuid['source.path'] = source_vserver + ':' + source_volume
+ query_uuid['destination.path'] = (destination_vserver + ':' +
+ destination_volume)
+ query_uuid['fields'] = 'uuid'
+
+ response_snapmirrors = self.send_request('/snapmirror/relationships/',
+ 'get', query=query_uuid)
+
+ records = response_snapmirrors.get('records')
+ if not records:
+ raise netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND)
+
+ query_release = {}
+ if relationship_info_only:
+ # release without removing related snapshots
+ query_release['source_info_only'] = 'true'
+ else:
+ # release and removing all related snapshots
+ query_release['source_only'] = 'true'
+
+ snapmirror_uuid = records[0].get('uuid')
+ self.send_request('/snapmirror/relationships/' +
+ snapmirror_uuid, 'delete',
+ query=query_release)
+
+ def resync_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Resync a SnapMirror relationship."""
+
+ # We reuse the resume operation for resync since both are handled in
+ # the same way in the REST API, by setting the snapmirror relationship
+ # to the snapmirrored state.
+ self.resume_snapmirror(source_vserver,
+ source_volume,
+ destination_vserver,
+ destination_volume)
+
+ def quiesce_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Disables future transfers to a SnapMirror destination."""
+
+ return self._set_snapmirror_state(
+ 'paused', source_vserver, source_volume,
+ destination_vserver, destination_volume)
+
+ def break_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Breaks a data protection SnapMirror relationship."""
+
+ self._set_snapmirror_state(
+ 'broken-off', source_vserver, source_volume,
+ destination_vserver, destination_volume)
+
+ def update_snapmirror(self, source_vserver, source_volume,
+ destination_vserver, destination_volume):
+ """Schedules a SnapMirror update."""
+
+ snapmirror = self.get_snapmirrors(source_vserver, source_volume,
+ destination_vserver,
+ destination_volume)
+ if not snapmirror:
+ msg = _('Failed to get information about relationship between '
+ 'source %(src_vserver)s:%(src_volume)s and '
+ 'destination %(dst_vserver)s:%(dst_volume)s.') % {
+ 'src_vserver': source_vserver,
+ 'src_volume': source_volume,
+ 'dst_vserver': destination_vserver,
+ 'dst_volume': destination_volume}
+
+ raise na_utils.NetAppDriverException(msg)
+
+ snapmirror_uuid = snapmirror[0]['uuid']
+
+ # NOTE(nahimsouza): A POST with an empty body starts the update
+ # snapmirror operation.
+ try:
+ self.send_request('/snapmirror/relationships/' +
+ snapmirror_uuid + '/transfers/', 'post',
+ wait_on_accepted=False)
+ except netapp_api.NaApiError as e:
+ if (e.code != netapp_api.REST_UPDATE_SNAPMIRROR_FAILED):
+ LOG.warning('Unexpected failure during snapmirror update.'
+ 'Code: %(code)s, Message: %(message)s',
+ {'code': e.code, 'message': e.message})
+ raise
+
+ def mount_flexvol(self, flexvol_name, junction_path=None):
+ """Mounts a volume on a junction path."""
+ query = {'name': flexvol_name}
+ body = {'nas.path': (
+ junction_path if junction_path else '/%s' % flexvol_name)}
+ self.send_request('/storage/volumes', 'patch', query=query, body=body)
+
+ def get_cluster_name(self):
+ """Gets cluster name."""
+ query = {'fields': 'name'}
+
+ response = self.send_request('/cluster', 'get', query=query,
+ enable_tunneling=False)
+
+ return response['name']
+
+ def get_vserver_peers(self, vserver_name=None, peer_vserver_name=None):
+ """Gets one or more Vserver peer relationships."""
+ query = {
+ 'fields': 'svm.name,state,peer.svm.name,peer.cluster.name,'
+ 'applications'
+ }
+
+ if peer_vserver_name:
+ query['name'] = peer_vserver_name
+ if vserver_name:
+ query['svm.name'] = vserver_name
+
+ response = self.send_request('/svm/peers', 'get', query=query,
+ enable_tunneling=False)
+ records = response.get('records', [])
+
+ vserver_peers = []
+ for vserver_info in records:
+ vserver_peer = {
+ 'vserver': vserver_info['svm']['name'],
+ 'peer-vserver': vserver_info['peer']['svm']['name'],
+ 'peer-state': vserver_info['state'],
+ 'peer-cluster': vserver_info['peer']['cluster']['name'],
+ 'applications': vserver_info['applications'],
+ }
+ vserver_peers.append(vserver_peer)
+
+ return vserver_peers
+
+ def create_vserver_peer(self, vserver_name, peer_vserver_name,
+ vserver_peer_application=None):
+ """Creates a Vserver peer relationship."""
+ # default peering application to `snapmirror` if none is specified.
+ if not vserver_peer_application:
+ vserver_peer_application = ['snapmirror']
+
+ body = {
+ 'svm.name': vserver_name,
+ 'name': peer_vserver_name,
+ 'applications': vserver_peer_application
+ }
+
+ self.send_request('/svm/peers', 'post', body=body,
+ enable_tunneling=False)
+
+ def start_lun_move(self, lun_name, dest_ontap_volume,
+ src_ontap_volume=None, dest_lun_name=None):
+ """Starts a lun move operation between ONTAP volumes."""
+ if dest_lun_name is None:
+ dest_lun_name = lun_name
+ if src_ontap_volume is None:
+ src_ontap_volume = dest_ontap_volume
+
+ src_path = f'/vol/{src_ontap_volume}/{lun_name}'
+ dest_path = f'/vol/{dest_ontap_volume}/{dest_lun_name}'
+ body = {'name': dest_path}
+ self._lun_update_by_path(src_path, body)
+
+ return dest_path
+
+ def get_lun_move_status(self, dest_path):
+ """Get lun move job status from a given dest_path."""
+ lun = self._get_first_lun_by_path(
+ dest_path, fields='movement.progress')
+
+ if not lun:
+ return None
+
+ move_progress = lun['movement']['progress']
+ move_status = {
+ 'job-status': move_progress['state'],
+ 'last-failure-reason': (move_progress
+ .get('failure', {})
+ .get('message', None))
+ }
+
+ return move_status
+
+ def start_lun_copy(self, lun_name, dest_ontap_volume, dest_vserver,
+ src_ontap_volume=None, src_vserver=None,
+ dest_lun_name=None):
+ """Starts a lun copy operation between ONTAP volumes."""
+ if src_ontap_volume is None:
+ src_ontap_volume = dest_ontap_volume
+ if src_vserver is None:
+ src_vserver = dest_vserver
+ if dest_lun_name is None:
+ dest_lun_name = lun_name
+
+ src_path = f'/vol/{src_ontap_volume}/{lun_name}'
+ dest_path = f'/vol/{dest_ontap_volume}/{dest_lun_name}'
+
+ body = {
+ 'name': dest_path,
+ 'copy.source.name': src_path,
+ 'svm.name': dest_vserver
+ }
+
+ self.send_request('/storage/luns', 'post', body=body,
+ enable_tunneling=False)
+
+ return dest_path
+
+ def get_lun_copy_status(self, dest_path):
+ """Get lun copy job status from a given dest_path."""
+ lun = self._get_first_lun_by_path(
+ dest_path, fields='copy.source.progress')
+
+ if not lun:
+ return None
+
+ copy_progress = lun['copy']['source']['progress']
+ copy_status = {
+ 'job-status': copy_progress['state'],
+ 'last-failure-reason': (copy_progress
+ .get('failure', {})
+ .get('message', None))
+ }
+
+ return copy_status
+
+ def cancel_lun_copy(self, dest_path):
+ """Cancel an in-progress lun copy by deleting the lun."""
+ query = {
+ 'name': dest_path,
+ 'svm.name': self.vserver
+ }
+
+ try:
+ self.send_request('/storage/luns/', 'delete', query=query)
+ except netapp_api.NaApiError as e:
+ msg = (_('Could not cancel lun copy by deleting lun at %s. %s'))
+ raise na_utils.NetAppDriverException(msg % (dest_path, e))
+
+ def start_file_copy(self, file_name, dest_ontap_volume,
+ src_ontap_volume=None,
+ dest_file_name=None):
+ """Starts a file copy operation between ONTAP volumes."""
+ if src_ontap_volume is None:
+ src_ontap_volume = dest_ontap_volume
+ if dest_file_name is None:
+ dest_file_name = file_name
+
+ source_vol = self._get_volume_by_args(src_ontap_volume)
+
+ dest_vol = source_vol
+ if dest_ontap_volume != src_ontap_volume:
+ dest_vol = self._get_volume_by_args(dest_ontap_volume)
+
+ body = {
+ 'files_to_copy': [
+ {
+ 'source': {
+ 'path': f'{src_ontap_volume}/{file_name}',
+ 'volume': {
+ 'uuid': source_vol['uuid']
+ }
+ },
+ 'destination': {
+ 'path': f'{dest_ontap_volume}/{dest_file_name}',
+ 'volume': {
+ 'uuid': dest_vol['uuid']
+ }
+ }
+ }
+ ]
+ }
+
+ result = self.send_request('/storage/file/copy', 'post', body=body,
+ enable_tunneling=False)
+ return result['job']['uuid']
+
+ def get_file_copy_status(self, job_uuid):
+ """Get file copy job status from a given job's UUID."""
+ # TODO(rfluisa): Select only the fields that are needed here.
+ query = {}
+ query['fields'] = '*'
+
+ result = self.send_request(
+ f'/cluster/jobs/{job_uuid}', 'get', query=query,
+ enable_tunneling=False)
+
+ if not result or not result.get('state', None):
+ return None
+
+ state = result.get('state')
+ if state == 'success':
+ state = 'complete'
+ elif state == 'failure':
+ state = 'destroyed'
+
+ copy_status = {
+ 'job-status': state,
+ 'last-failure-reason': result.get('error', {}).get('message', None)
+ }
+
+ return copy_status
+
+ def rename_file(self, orig_file_name, new_file_name):
+ """Rename a volume file."""
+ LOG.debug("Renaming the file %(original)s to %(new)s.",
+ {'original': orig_file_name, 'new': new_file_name})
+
+ unique_volume = self._get_volume_by_args(
+ vol_name=orig_file_name.split('/')[2])
+
+ # Get the relative path
+ orig_file_name = '/'.join(orig_file_name.split('/')[3:])
+ new_file_name = '/'.join(new_file_name.split('/')[3:])
+
+ # Path requires "%2E" to represent "." and "%2F" to represent "/".
+ orig_file_name = orig_file_name.replace('.', '%2E').replace('/', '%2F')
+ new_file_name = new_file_name.replace('.', '%2E').replace('/', '%2F')
+
+ body = {'path': new_file_name}
+
+ self.send_request(
+ f'/storage/volumes/{unique_volume["uuid"]}/files/{orig_file_name}',
+ 'patch', body=body)
diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_base.py b/cinder/volume/drivers/netapp/dataontap/nfs_base.py
index 3b8a32266..e0116efe1 100644
--- a/cinder/volume/drivers/netapp/dataontap/nfs_base.py
+++ b/cinder/volume/drivers/netapp/dataontap/nfs_base.py
@@ -31,6 +31,7 @@ import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_log import versionutils
from oslo_utils import netutils
from oslo_utils import units
import six
@@ -648,7 +649,7 @@ class NetAppNfsDriver(driver.ManageableVD,
raise NotImplementedError()
def _copy_from_img_service(self, context, volume, image_service,
- image_id):
+ image_id, use_copyoffload_tool=False):
raise NotImplementedError()
def clone_image(self, context, volume,
@@ -675,6 +676,11 @@ class NetAppNfsDriver(driver.ManageableVD,
major, minor = self.zapi_client.get_ontapi_version()
col_path = self.configuration.netapp_copyoffload_tool_path
+ if col_path:
+ msg = ('The "netapp_copyoffload_tool_path" configuration option '
+ 'is deprecated and will be removed soon. Please, do not '
+ 'set it.')
+ versionutils.report_deprecated_feature(LOG, msg)
try:
cache_result = self._find_image_in_cache(image_id)
if cache_result:
@@ -683,10 +689,15 @@ class NetAppNfsDriver(driver.ManageableVD,
cloned = self._direct_nfs_clone(volume, image_location,
image_id)
- # Try to use the copy offload tool
- if not cloned and col_path and major == 1 and minor >= 20:
- cloned = self._copy_from_img_service(context, volume,
- image_service, image_id)
+ # Try to use the deprecated copy offload tool or file copy.
+ if not cloned:
+ # We will use copy offload tool if the copy offload tool
+ # path exists and the version is greater than or equal to
+ # 1.20
+ use_tool = bool(col_path) and (major == 1 and minor >= 20)
+ cloned = self._copy_from_img_service(
+ context, volume, image_service, image_id,
+ use_copyoffload_tool=use_tool)
if cloned:
self._do_qos_for_volume(volume, extra_specs)
diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
index 0cccbef64..f070fe10e 100644
--- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
+++ b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
@@ -314,7 +314,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"""Gets the vserver and export volume for share."""
(host_ip, export_path) = self._get_export_ip_path(volume_id, share)
ifs = self.zapi_client.get_if_info_by_ip(host_ip)
- vserver = ifs[0].get_child_content('vserver')
+ vserver = ifs[0].get('vserver')
exp_volume = self.zapi_client.get_vol_by_junc_vserver(vserver,
export_path)
return vserver, exp_volume
@@ -512,7 +512,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"""Get vserver for the mentioned ip."""
try:
ifs = self.zapi_client.get_if_info_by_ip(ip)
- vserver = ifs[0].get_child_content('vserver')
+ vserver = ifs[0].get('vserver')
return vserver
except Exception:
return None
@@ -620,7 +620,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
if not vserver:
raise exception.NotFound(_("Unable to locate an SVM that is "
"managing the IP address '%s'") % ip)
- return ip
+ return ip, vserver
def _copy_from_cache(self, volume, image_id, cache_result):
"""Try copying image file_name from cached file_name."""
@@ -642,6 +642,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
LOG.debug("Trying copy from cache using copy offload.")
self._copy_from_remote_cache(volume, image_id, cache_copy)
copied = True
+ elif cache_copy:
+ LOG.debug("Trying copy from cache using file copy.")
+ self._copy_from_remote_cache(volume, image_id, cache_copy,
+ use_copyoffload_tool=False)
+ copied = True
except Exception:
LOG.exception('Error in workflow copy from cache.')
return copied
@@ -670,41 +675,55 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
cache_copy = res
return cache_copy, found_local_copy
- def _copy_from_remote_cache(self, volume, image_id, cache_copy):
+ def _copy_from_remote_cache(self, volume, image_id, cache_copy,
+ use_copyoffload_tool=True):
"""Copies the remote cached image to the provided volume.
- Executes the copy offload binary which copies the cached image to
- the destination path of the provided volume. Also registers the new
- copy of the image as a cached image.
+ Executes either the copy offload binary or the file copy operation,
+ copying the cached image to the destination path of the provided
+ volume. Also registers the new copy of the image as a cached image.
"""
(nfs_share, file_name) = cache_copy
- col_path = self.configuration.netapp_copyoffload_tool_path
- src_ip, src_path = self._get_source_ip_and_path(nfs_share, file_name)
- dest_ip, dest_path = self._get_destination_ip_and_path(volume)
+ (src_ip, src_vserver, src_share_path, src_path) = (
+ self._get_source_ip_and_path(nfs_share, file_name))
+ (dest_ip, dest_vserver, dest_path) = (
+ self._get_destination_ip_and_path(volume))
- # Always run copy offload as regular user, it's sufficient
- # and rootwrap doesn't allow copy offload to run as root anyways.
- self._execute(col_path, src_ip, dest_ip, src_path, dest_path,
- run_as_root=False, check_exit_code=0)
+ # NOTE(felipe_rodrigues): the copy offload tool code will be removed in
+ # the Antelope release.
+ col_path = self.configuration.netapp_copyoffload_tool_path
+ if use_copyoffload_tool and col_path:
+ # Always run copy offload as regular user, it's sufficient
+ # and rootwrap doesn't allow copy offload to run as root anyways.
+ self._execute(col_path, src_ip, dest_ip, src_path, dest_path,
+ run_as_root=False, check_exit_code=0)
+ LOG.debug("Copied image from cache to volume %s using "
+ "copy offload.", volume['id'])
+ else:
+ dest_share_path = dest_path.rsplit("/", 1)[0]
+ self._copy_file(file_name, file_name, src_share_path, src_vserver,
+ dest_share_path, dest_vserver,
+ dest_backend_name=self.backend_name,
+ dest_file_name=volume.name)
+ LOG.debug("Copied image from cache to volume %s using "
+ "file copy operation.", volume['id'])
self._register_image_in_cache(volume, image_id)
- LOG.debug("Copied image from cache to volume %s using copy offload.",
- volume['id'])
def _get_source_ip_and_path(self, nfs_share, file_name):
host, share_path = na_utils.get_export_host_junction_path(nfs_share)
- src_ip = self._get_ip_verify_on_cluster(host)
+ (src_ip, src_vserver) = self._get_ip_verify_on_cluster(host)
src_path = os.path.join(share_path, file_name)
- return src_ip, src_path
+ return src_ip, src_vserver, share_path, src_path
def _get_destination_ip_and_path(self, volume):
share = volume_utils.extract_host(volume['host'], level='pool')
share_ip, share_path = na_utils.get_export_host_junction_path(share)
- dest_ip = self._get_ip_verify_on_cluster(share_ip)
+ (dest_ip, vserver) = self._get_ip_verify_on_cluster(share_ip)
dest_path = os.path.join(share_path, volume['name'])
- return dest_ip, dest_path
+ return dest_ip, vserver, dest_path
def _clone_file_dst_exists(self, share, src_name, dst_name,
dest_exists=False):
@@ -714,38 +733,39 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
dest_exists=dest_exists)
def _copy_from_img_service(self, context, volume, image_service,
- image_id):
- """Copies from the image service using copy offload."""
+ image_id, use_copyoffload_tool=True):
+ """Copies from the image service using copy offload or file copy."""
- LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
locations = self._construct_image_nfs_url(image_loc)
src_ip = None
+ src_vserver = None
+ src_volume = None
selected_loc = None
cloned = False
# this will match the first location that has a valid IP on cluster
for location in locations:
- conn, dr = self._check_get_nfs_path_segs(location)
+ conn, src_volume = self._check_get_nfs_path_segs(location)
if conn:
try:
- src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
+ (src_ip, src_vserver) = (
+ self._get_ip_verify_on_cluster(conn.split(':')[0]))
selected_loc = location
break
except exception.NotFound:
pass
- if src_ip is None:
+ if src_ip is None or src_vserver is None:
raise exception.NotFound(_("Source host details not found."))
(__, ___, img_file) = selected_loc.rpartition('/')
- src_path = os.path.join(dr, img_file)
- dst_ip, vol_path = self._get_destination_ip_and_path(volume)
- share_path = vol_path.rsplit("/", 1)[0]
- dst_share = dst_ip + ':' + share_path
+ (dst_ip, dest_vserver, vol_path) = (
+ self._get_destination_ip_and_path(volume))
+ dest_share_path = vol_path.rsplit("/", 1)[0]
+ dst_share = dst_ip + ':' + dest_share_path
# tmp file is required to deal with img formats
tmp_img_file = six.text_type(uuid.uuid4())
- col_path = self.configuration.netapp_copyoffload_tool_path
img_info = image_service.show(context, image_id)
self._check_share_can_hold_size(dst_share, img_info['size'])
run_as_root = self._execute_as_root
@@ -754,14 +774,27 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
dst_img_local = os.path.join(dst_dir, tmp_img_file)
try:
- dst_img_serv_path = os.path.join(
- share_path, tmp_img_file)
- # Always run copy offload as regular user, it's sufficient
- # and rootwrap doesn't allow copy offload to run as root
- # anyways.
- self._execute(col_path, src_ip, dst_ip, src_path,
- dst_img_serv_path, run_as_root=False,
- check_exit_code=0)
+ # NOTE(felipe_rodrigues): the copy offload tool code will be
+ # removed in the AA release.
+ col_path = self.configuration.netapp_copyoffload_tool_path
+ if col_path and use_copyoffload_tool:
+ LOG.debug("Trying copy from image service using copy offload.")
+ dst_img_serv_path = os.path.join(dest_share_path, tmp_img_file)
+ src_path = os.path.join(src_volume, img_file)
+ # Always run copy offload as regular user, it's sufficient
+ # and rootwrap doesn't allow copy offload to run as root
+ # anyways.
+ self._execute(col_path, src_ip, dst_ip, src_path,
+ dst_img_serv_path, run_as_root=False,
+ check_exit_code=0)
+ else:
+ LOG.debug("Trying copy from image service using file copy.")
+ src_volume = ''.join(src_volume.split("/", 1))
+ dest_share_path = ''.join(dest_share_path.split("/", 1))
+ self._copy_file(img_file, img_file, src_volume, src_vserver,
+ dest_share_path, dest_vserver,
+ dest_backend_name=self.backend_name,
+ dest_file_name=tmp_img_file)
self._discover_file_till_timeout(dst_img_local, timeout=120)
LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
@@ -1072,7 +1105,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"""Check whether storage can perform clone file for FlexGroup"""
return self.zapi_client.features.FLEXGROUP_CLONE_FILE
- def _cancel_file_copy(self, job_uuid, volume, dest_pool,
+ def _cancel_file_copy(self, job_uuid, file_name, dest_pool,
dest_backend_name=None):
"""Cancel an on-going file copy operation."""
try:
@@ -1082,7 +1115,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
self.zapi_client.destroy_file_copy(job_uuid)
except na_utils.NetAppDriverException:
dest_client = dot_utils.get_client_for_backend(dest_backend_name)
- file_path = '%s/%s' % (dest_pool, volume.name)
+ file_path = '%s/%s' % (dest_pool, file_name)
try:
dest_client.delete_file(file_path)
except Exception:
@@ -1091,17 +1124,17 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
'pool %s and delete it manually to avoid unused '
'resources.', file_path, dest_pool)
- def _copy_file(self, volume, src_ontap_volume, src_vserver,
+ def _copy_file(self, file_name, volume_id, src_ontap_volume, src_vserver,
dest_ontap_volume, dest_vserver, dest_file_name=None,
dest_backend_name=None, cancel_on_error=False):
"""Copies file from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_file_copy(
- volume.name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
+ file_name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
dest_file_name=dest_file_name)
- LOG.debug('Start copying file %(vol)s from '
+ LOG.debug('Start copying file %(file)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s. Job UUID is %(job)s.',
- {'vol': volume.name, 'src_vserver': src_vserver,
+ {'file': file_name, 'src_vserver': src_vserver,
'src_ontap_vol': src_ontap_volume,
'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_ontap_volume,
@@ -1116,11 +1149,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
- status_error_msg % (volume.id, job_uuid))
+ status_error_msg % (file_name, job_uuid))
elif copy_status['job-status'] == 'destroyed':
status_error_msg = (_('Error copying file %s. %s.'))
raise na_utils.NetAppDriverException(
- status_error_msg % (volume.id,
+ status_error_msg % (file_name,
copy_status['last-failure-reason']))
elif copy_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
@@ -1137,7 +1170,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
if cancel_on_error:
try:
self._cancel_file_copy(
- job_uuid, volume, dest_ontap_volume,
+ job_uuid, file_name, dest_ontap_volume,
dest_backend_name=dest_backend_name)
except na_utils.NetAppDriverException as ex:
LOG.error("Failed to cancel file copy operation. %s",
@@ -1146,7 +1179,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
ctxt.reraise = False
msg = (_('Timeout waiting volume %s to complete '
'migration.'))
- raise na_utils.NetAppDriverTimeout(msg % volume.id)
+ raise na_utils.NetAppDriverTimeout(msg % volume_id)
def _finish_volume_migration(self, src_volume, dest_pool):
"""Finish volume migration to another ONTAP volume."""
@@ -1171,8 +1204,8 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
[vserver_peer_application])
src_ontap_volume_name = src_pool.split(':/')[1]
dest_ontap_volume_name = dest_pool.split(':/')[1]
- self._copy_file(volume, src_ontap_volume_name, src_vserver,
- dest_ontap_volume_name, dest_vserver,
+ self._copy_file(volume.name, volume.id, src_ontap_volume_name,
+ src_vserver, dest_ontap_volume_name, dest_vserver,
dest_backend_name=dest_backend_name,
cancel_on_error=True)
updates = self._finish_volume_migration(volume, dest_pool)
@@ -1193,7 +1226,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
'vserver': vserver})
src_ontap_volume_name = src_pool.split(':/')[1]
dest_ontap_volume_name = dest_pool.split(':/')[1]
- self._copy_file(volume, src_ontap_volume_name, vserver,
+ self._copy_file(volume.name, volume.id, src_ontap_volume_name, vserver,
dest_ontap_volume_name, vserver,
dest_backend_name=dest_backend_name,
cancel_on_error=True)
diff --git a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py
index 2437f802e..aedd9e1ff 100644
--- a/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py
+++ b/cinder/volume/drivers/netapp/dataontap/utils/capabilities.py
@@ -22,34 +22,9 @@ import re
from oslo_log import log as logging
import six
-from cinder import exception
-from cinder.i18n import _
-
LOG = logging.getLogger(__name__)
-# NOTE(cknight): The keys in this map are tuples that contain arguments needed
-# for efficient use of the system-user-capability-get-iter cDOT API. The
-# values are SSC extra specs associated with the APIs listed in the keys.
-SSC_API_MAP = {
- ('storage.aggregate', 'show', 'aggr-options-list-info'): [
- 'netapp_raid_type',
- ],
- ('storage.disk', 'show', 'storage-disk-get-iter'): [
- 'netapp_disk_type',
- ],
- ('snapmirror', 'show', 'snapmirror-get-iter'): [
- 'netapp_mirrored',
- ],
- ('volume.efficiency', 'show', 'sis-get-iter'): [
- 'netapp_dedup',
- 'netapp_compression',
- ],
- ('volume', '*show', 'volume-get-iter'): [
- 'netapp_flexvol_encryption',
- ],
-}
-
class CapabilitiesLibrary(object):
@@ -64,30 +39,7 @@ class CapabilitiesLibrary(object):
self.invalid_extra_specs = []
def check_api_permissions(self):
- """Check which APIs that support SSC functionality are available."""
-
- inaccessible_apis = []
- invalid_extra_specs = []
-
- for api_tuple, extra_specs in SSC_API_MAP.items():
- object_name, operation_name, api = api_tuple
- if not self.zapi_client.check_cluster_api(object_name,
- operation_name,
- api):
- inaccessible_apis.append(api)
- invalid_extra_specs.extend(extra_specs)
-
- if inaccessible_apis:
- if 'volume-get-iter' in inaccessible_apis:
- msg = _('User not permitted to query Data ONTAP volumes.')
- raise exception.VolumeBackendAPIException(data=msg)
- else:
- LOG.warning('The configured user account does not have '
- 'sufficient privileges to use all needed '
- 'APIs. The following extra specs will fail '
- 'or be ignored: %s.', invalid_extra_specs)
-
- self.invalid_extra_specs = invalid_extra_specs
+ self.invalid_extra_specs = self.zapi_client.check_api_permissions()
def cluster_user_supported(self):
return not self.invalid_extra_specs
diff --git a/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py b/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py
index c696767b4..8ffcaffaa 100644
--- a/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py
+++ b/cinder/volume/drivers/netapp/dataontap/utils/data_motion.py
@@ -353,7 +353,8 @@ class DataMotionMixin(object):
src_vserver, src_flexvol_name, dest_vserver,
dest_flexvol_name,
desired_attributes=['relationship-status', 'mirror-state'])[0]
- if snapmirror.get('relationship-status') != 'quiesced':
+ if (snapmirror.get('relationship-status') not in ['quiesced',
+ 'paused']):
msg = _("SnapMirror relationship is not quiesced.")
raise na_utils.NetAppDriverException(msg)
@@ -524,8 +525,8 @@ class DataMotionMixin(object):
dest_flexvol_name)
except loopingcall.LoopingCallTimeOut:
- msg = _("Timeout waiting destination FlexGroup to to come "
- "online.")
+ msg = _("Timeout waiting destination FlexGroup "
+ "to come online.")
raise na_utils.NetAppDriverException(msg)
else:
@@ -534,6 +535,24 @@ class DataMotionMixin(object):
size,
**provisioning_options)
+ timeout = self._get_replication_volume_online_timeout()
+
+ def _wait_volume_is_online():
+ volume_state = dest_client.get_volume_state(
+ name=dest_flexvol_name)
+ if volume_state and volume_state == 'online':
+ raise loopingcall.LoopingCallDone()
+
+ try:
+ wait_call = loopingcall.FixedIntervalWithTimeoutLoopingCall(
+ _wait_volume_is_online)
+ wait_call.start(interval=5, timeout=timeout).wait()
+
+ except loopingcall.LoopingCallTimeOut:
+ msg = _("Timeout waiting destination FlexVol to to come "
+ "online.")
+ raise na_utils.NetAppDriverException(msg)
+
def ensure_snapmirrors(self, config, src_backend_name, src_flexvol_names):
"""Ensure all the SnapMirrors needed for whole-backend replication."""
backend_names = self.get_replication_backend_names(config)
diff --git a/cinder/volume/drivers/netapp/dataontap/utils/utils.py b/cinder/volume/drivers/netapp/dataontap/utils/utils.py
index dae44d84d..314863cab 100644
--- a/cinder/volume/drivers/netapp/dataontap/utils/utils.py
+++ b/cinder/volume/drivers/netapp/dataontap/utils/utils.py
@@ -26,6 +26,7 @@ from cinder.i18n import _
from cinder.volume import configuration
from cinder.volume import driver
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
+from cinder.volume.drivers.netapp.dataontap.client import client_cmode_rest
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume import volume_utils
@@ -65,15 +66,28 @@ def get_client_for_backend(backend_name, vserver_name=None):
"""Get a cDOT API client for a specific backend."""
config = get_backend_configuration(backend_name)
- client = client_cmode.Client(
- transport_type=config.netapp_transport_type,
- username=config.netapp_login,
- password=config.netapp_password,
- hostname=config.netapp_server_hostname,
- port=config.netapp_server_port,
- vserver=vserver_name or config.netapp_vserver,
- trace=volume_utils.TRACE_API,
- api_trace_pattern=config.netapp_api_trace_pattern)
+ if config.netapp_use_legacy_client:
+ client = client_cmode.Client(
+ transport_type=config.netapp_transport_type,
+ username=config.netapp_login,
+ password=config.netapp_password,
+ hostname=config.netapp_server_hostname,
+ port=config.netapp_server_port,
+ vserver=vserver_name or config.netapp_vserver,
+ trace=volume_utils.TRACE_API,
+ api_trace_pattern=config.netapp_api_trace_pattern)
+ else:
+ client = client_cmode_rest.RestClient(
+ transport_type=config.netapp_transport_type,
+ ssl_cert_path=config.netapp_ssl_cert_path,
+ username=config.netapp_login,
+ password=config.netapp_password,
+ hostname=config.netapp_server_hostname,
+ port=config.netapp_server_port,
+ vserver=vserver_name or config.netapp_vserver,
+ trace=volume_utils.TRACE_API,
+ api_trace_pattern=config.netapp_api_trace_pattern,
+ async_rest_timeout=config.netapp_async_rest_timeout)
return client
diff --git a/cinder/volume/drivers/netapp/options.py b/cinder/volume/drivers/netapp/options.py
index c32bd2799..a92d38c77 100644
--- a/cinder/volume/drivers/netapp/options.py
+++ b/cinder/volume/drivers/netapp/options.py
@@ -50,14 +50,34 @@ netapp_connection_opts = [
cfg.IntOpt('netapp_server_port',
help=('The TCP port to use for communication with the storage '
'system or proxy server. If not specified, Data ONTAP '
- 'drivers will use 80 for HTTP and 443 for HTTPS.')), ]
+ 'drivers will use 80 for HTTP and 443 for HTTPS.')),
+ cfg.BoolOpt('netapp_use_legacy_client',
+ default=True,
+ help=('Select which ONTAP client to use for retrieving and '
+ 'modifying data on the storage. The legacy client '
+ 'relies on ZAPI calls. If set to False, the new REST '
+ 'client is used, which runs REST calls if supported, '
+ 'otherwise falls back to the equivalent ZAPI call.')),
+ cfg.IntOpt('netapp_async_rest_timeout',
+ min=60,
+ default=60, # One minute
+ help='The maximum time in seconds to wait for completing a '
+ 'REST asynchronous operation.'), ]
netapp_transport_opts = [
cfg.StrOpt('netapp_transport_type',
default='http',
choices=['http', 'https'],
help=('The transport protocol used when communicating with '
- 'the storage system or proxy server.')), ]
+ 'the storage system or proxy server.')),
+ cfg.StrOpt('netapp_ssl_cert_path',
+ help=("The path to a CA_BUNDLE file or directory with "
+ "certificates of trusted CA. If set to a directory, it "
+ "must have been processed using the c_rehash utility "
+ "supplied with OpenSSL. If not informed, it will use the "
+ "Mozilla's carefully curated collection of Root "
+ "Certificates for validating the trustworthiness of SSL "
+ "certificates. Only applies with new REST client.")), ]
netapp_basicauth_opts = [
cfg.StrOpt('netapp_login',
@@ -129,7 +149,10 @@ netapp_nfs_extra_opts = [
help=('This option specifies the path of the NetApp copy '
'offload tool binary. Ensure that the binary has execute '
'permissions set which allow the effective user of the '
- 'cinder-volume process to execute the file.')), ]
+ 'cinder-volume process to execute the file.'),
+ deprecated_for_removal=True,
+ deprecated_reason='The CopyOfflload tool is no longer '
+ 'available for downloading.'), ]
netapp_san_opts = [
cfg.StrOpt('netapp_lun_ostype',
help=('This option defines the type of operating system that'
diff --git a/cinder/volume/drivers/netapp/utils.py b/cinder/volume/drivers/netapp/utils.py
index 15dfde894..7186ce757 100644
--- a/cinder/volume/drivers/netapp/utils.py
+++ b/cinder/volume/drivers/netapp/utils.py
@@ -184,6 +184,13 @@ def trace_filter_func_api(all_args):
return re.match(API_TRACE_PATTERN, api_name) is not None
+def trace_filter_func_rest_api(all_args):
+ url = all_args.get('url')
+ if url is None:
+ return True
+ return re.match(API_TRACE_PATTERN, url) is not None
+
+
def round_down(value, precision='0.00'):
return float(decimal.Decimal(str(value)).quantize(
decimal.Decimal(precision), rounding=decimal.ROUND_DOWN))
diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py
index ebeb59eaa..4829755a0 100644
--- a/cinder/volume/drivers/pure.py
+++ b/cinder/volume/drivers/pure.py
@@ -282,13 +282,6 @@ class PureBaseVolumeDriver(san.SanDriver):
ssl_cert_path = replication_device.get("ssl_cert_path", None)
repl_type = replication_device.get("type",
REPLICATION_TYPE_ASYNC)
- if (
- repl_type == REPLICATION_TYPE_SYNC
- and "NVMe" in self._storage_protocol
- ):
- msg = _('NVMe driver does not support synchronous '
- 'replication')
- raise PureDriverException(reason=msg)
uniform = strutils.bool_from_string(
replication_device.get("uniform", False))
diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py
index 828708bdf..07c26c8ce 100644
--- a/cinder/volume/drivers/rbd.py
+++ b/cinder/volume/drivers/rbd.py
@@ -57,6 +57,7 @@ from cinder.objects.volume_type import VolumeType
from cinder import utils
from cinder.volume import configuration
from cinder.volume import driver
+from cinder.volume import qos_specs
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@@ -140,6 +141,58 @@ CONF.register_opts(RBD_OPTS, group=configuration.SHARED_CONF_GROUP)
EXTRA_SPECS_REPL_ENABLED = "replication_enabled"
EXTRA_SPECS_MULTIATTACH = "multiattach"
+QOS_KEY_MAP = {
+ 'total_iops_sec': {
+ 'ceph_key': 'rbd_qos_iops_limit',
+ 'default': 0
+ },
+ 'read_iops_sec': {
+ 'ceph_key': 'rbd_qos_read_iops_limit',
+ 'default': 0
+ },
+ 'write_iops_sec': {
+ 'ceph_key': 'rbd_qos_write_iops_limit',
+ 'default': 0
+ },
+ 'total_bytes_sec': {
+ 'ceph_key': 'rbd_qos_bps_limit',
+ 'default': 0
+ },
+ 'read_bytes_sec': {
+ 'ceph_key': 'rbd_qos_read_bps_limit',
+ 'default': 0
+ },
+ 'write_bytes_sec': {
+ 'ceph_key': 'rbd_qos_write_bps_limit',
+ 'default': 0
+ },
+ 'total_iops_sec_max': {
+ 'ceph_key': 'rbd_qos_bps_burst',
+ 'default': 0
+ },
+ 'read_iops_sec_max': {
+ 'ceph_key': 'rbd_qos_read_iops_burst',
+ 'default': 0
+ },
+ 'write_iops_sec_max': {
+ 'ceph_key': 'rbd_qos_write_iops_burst',
+ 'default': 0
+ },
+ 'total_bytes_sec_max': {
+ 'ceph_key': 'rbd_qos_bps_burst',
+ 'default': 0
+ },
+ 'read_bytes_sec_max': {
+ 'ceph_key': 'rbd_qos_read_bps_burst',
+ 'default': 0
+ },
+ 'write_bytes_sec_max': {
+ 'ceph_key': 'rbd_qos_write_bps_burst',
+ 'default': 0
+ }}
+
+CEPH_QOS_SUPPORTED_VERSION = 15
+
# RBD
class RBDDriverException(exception.VolumeDriverException):
@@ -230,9 +283,19 @@ class RADOSClient(object):
class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
driver.ManageableVD, driver.ManageableSnapshotsVD,
driver.BaseVD):
- """Implements RADOS block device (RBD) volume commands."""
+ """Implements RADOS block device (RBD) volume commands.
+
+
+ Version history:
+
+ .. code-block:: none
+
+ 1.3.0 - Added QoS Support
+
+
+ """
- VERSION = '1.2.0'
+ VERSION = '1.3.0'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Cinder_Jenkins"
@@ -554,6 +617,9 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
ioctx.close()
client.shutdown()
+ def _supports_qos(self):
+ return self.RBDProxy().version()[1] >= CEPH_QOS_SUPPORTED_VERSION
+
@staticmethod
def _get_backup_snaps(rbd_image) -> list:
"""Get list of any backup snapshots that exist on this volume.
@@ -688,7 +754,8 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
'max_over_subscription_ratio': (
self.configuration.safe_get('max_over_subscription_ratio')),
'location_info': location_info,
- 'backend_state': 'down'
+ 'backend_state': 'down',
+ 'qos_support': self._supports_qos(),
}
backend_name = self.configuration.safe_get('volume_backend_name')
@@ -927,6 +994,19 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
LOG.debug('Unable to retrieve extra specs info')
return False
+ def _qos_specs_from_volume_type(self, volume_type):
+ if not volume_type:
+ return None
+
+ qos_specs_id = volume_type.get('qos_specs_id')
+ if qos_specs_id is not None:
+ ctxt = context.get_admin_context()
+ vol_qos_specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)
+ LOG.debug('qos_specs: %s', qos_specs)
+ if vol_qos_specs['consumer'] in ('back-end', 'both'):
+ return vol_qos_specs['specs']
+ return None
+
def _setup_volume(
self,
volume: Volume,
@@ -941,6 +1021,16 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
had_multiattach = False
volume_type = volume.volume_type
+ specs = self._qos_specs_from_volume_type(volume_type)
+
+ if specs:
+ if self._supports_qos():
+ self.update_rbd_image_qos(volume, specs)
+ else:
+ LOG.warning("Backend QOS policies for ceph not "
+ "supported prior to librbd version %s",
+ CEPH_QOS_SUPPORTED_VERSION)
+
want_replication = self._is_replicated_type(volume_type)
want_multiattach = self._is_multiattach_type(volume_type)
@@ -1446,7 +1536,47 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
new_type: VolumeType,
diff: Union[dict[str, dict[str, str]], dict[str, dict], None],
host: Optional[dict[str, str]]) -> tuple[bool, dict]:
- """Retype from one volume type to another on the same backend."""
+ """Retype from one volume type to another on the same backend.
+
+ Returns a tuple of (diff, equal), where 'equal' is a boolean indicating
+ whether there is any difference, and 'diff' is a dictionary with the
+ following format:
+
+ .. code-block:: default
+
+ {
+ 'encryption': {},
+ 'extra_specs': {},
+ 'qos_specs': {'consumer': (u'front-end', u'back-end'),
+ u'total_bytes_sec': (None, u'2048000'),
+ u'total_iops_sec': (u'200', None)
+ {...}}
+ }
+ """
+ # NOTE(rogeryu): If `diff` contains `qos_specs`, `qos_spec` must have
+ # the `consumer` parameter, whether or not there is a difference.]
+ # Remove qos keys present in RBD image that are no longer in cinder qos
+ # spec, new keys are added in _setup_volume.
+ if diff and diff.get('qos_specs') and self._supports_qos():
+ specs = diff.get('qos_specs', {})
+ if (specs.get('consumer')
+ and specs['consumer'][1] == 'front-end'
+ and specs['consumer'][0] != 'front-end'):
+ del_qos_keys = [key for key in specs.keys()
+ if key in QOS_KEY_MAP.keys()]
+ else:
+ del_qos_keys = []
+ existing_config = self.get_rbd_image_qos(volume)
+ for k, v in QOS_KEY_MAP.items():
+ qos_val = specs.get(k, None)
+ vol_val = int(existing_config.get(v['ceph_key']))
+ if not qos_val:
+ if vol_val != v['default']:
+ del_qos_keys.append(k)
+ continue
+ if qos_val[1] is None and vol_val != v['default']:
+ del_qos_keys.append(k)
+ self.delete_rbd_image_qos_keys(volume, del_qos_keys)
return True, self._setup_volume(volume, new_type)
@staticmethod
@@ -2292,3 +2422,62 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
volume = objects.Volume.get_by_id(context, backup.volume_id)
return (volume, False)
+
+ @utils.retry(exception.VolumeBackendAPIException)
+ def get_rbd_image_qos(self, volume):
+ try:
+ with RBDVolumeProxy(self, volume.name) as rbd_image:
+ current = {k['name']: k['value']
+ for k in rbd_image.config_list()}
+ return current
+ except Exception as e:
+ msg = (_("Failed to get qos specs for rbd image "
+ "%(rbd_image_name)s, due to "
+ "%(error)s.")
+ % {'rbd_image_name': volume.name,
+ 'error': e})
+ raise exception.VolumeBackendAPIException(
+ data=msg)
+
+ @utils.retry(exception.VolumeBackendAPIException)
+ def update_rbd_image_qos(self, volume, qos_specs):
+ try:
+ with RBDVolumeProxy(self, volume.name) as rbd_image:
+ for qos_key, qos_val in qos_specs.items():
+ if qos_key in QOS_KEY_MAP:
+ rbd_image.config_set(QOS_KEY_MAP[qos_key]['ceph_key'],
+ str(qos_val))
+ LOG.debug('qos_specs: %(qos_key)s successfully set to'
+ ' %(qos_value)s', {'qos_key': qos_key,
+ 'qos_value': qos_val})
+ else:
+ LOG.warning('qos_specs: the requested qos key'
+ '%(qos_key)s does not exist',
+ {'qos_key': qos_key,
+ 'qos_value': qos_val})
+ except Exception as e:
+ msg = (_('Failed to set qos spec %(qos_key)s '
+ 'for rbd image %(rbd_image_name)s, '
+ 'due to %(error)s.')
+ % {'qos_key': qos_key,
+ 'rbd_image_name': volume.name,
+ 'error': e})
+ raise exception.VolumeBackendAPIException(data=msg)
+
+ @utils.retry(exception.VolumeBackendAPIException)
+ def delete_rbd_image_qos_keys(self, volume, qos_keys):
+ try:
+ with RBDVolumeProxy(self, volume.name) as rbd_image:
+ for key in qos_keys:
+ rbd_image.config_remove(QOS_KEY_MAP[key]['ceph_key'])
+ LOG.debug('qos_specs: %(qos_key)s was '
+ 'successfully unset',
+ {'qos_key': key})
+ except Exception as e:
+ msg = (_("Failed to delete qos keys %(qos_key)s "
+ "for rbd image %(rbd_image_name)s, "
+ "due to %(error)s.")
+ % {'qos_key': key,
+ 'rbd_image_name': volume.name,
+ 'error': e})
+ raise exception.VolumeBackendAPIException(data=msg)
diff --git a/cinder/volume/targets/iet.py b/cinder/volume/targets/iet.py
index 03e1e995e..592ac56df 100644
--- a/cinder/volume/targets/iet.py
+++ b/cinder/volume/targets/iet.py
@@ -60,7 +60,7 @@ class IetAdm(iscsi.ISCSITarget):
LOG.exception("Failed to open iet session list for %s", iqn)
raise
- session_list = re.split('^tid:(?m)', sessions)[1:]
+ session_list = re.split('(?m)^tid:', sessions)[1:]
for ses in session_list:
m = re.match(r'(\d+) name:(\S+)\s+', ses)
if m and iqn in m.group(2):
@@ -213,7 +213,7 @@ class IetAdm(iscsi.ISCSITarget):
{'vol_id': vol_id, 'e': e})
return None
- session_list = re.split('^tid:(?m)', sessions)[1:]
+ session_list = re.split('(?m)^tid:', sessions)[1:]
for ses in session_list:
m = re.match(r'(\d+) name:(\S+)\s+sid:(\d+).+\s+cid:(\d+)', ses)
if m and tid in m.group(1) and name in m.group(2):
diff --git a/doc/source/cli/cli-manage-volumes.rst b/doc/source/cli/cli-manage-volumes.rst
index 0b38c09f5..a317f47ca 100644
--- a/doc/source/cli/cli-manage-volumes.rst
+++ b/doc/source/cli/cli-manage-volumes.rst
@@ -506,6 +506,9 @@ Starting with microversion 3.55 and later, Cinder supports the ability to
transfer volume without snapshots. If users don't want to transfer snapshots,
they need to specify the new optional argument `--no-snapshots`.
+Starting with microversion 3.70 and later, Cinder supports the ability to
+transfer encrypted volumes. Snapshots must be transferred with the volume.
+
.. note::
The procedure for volume transfer is intended for projects (both the
diff --git a/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst b/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst
index c7a44d91b..e949bce3b 100644
--- a/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst
@@ -162,3 +162,67 @@ refer to the `Ceph documentation
Note that with the RBD driver in cinder you need to configure the pool
replication option in image mode. For instance, if your pool is named
``volumes``, the command would be: ``rbd mirror pool enable volumes image``.
+
+RBD QoS
+~~~~~~~~~~~~~
+
+Currently, the Cinder RBD driver supports the following QoS options compatible
+with Ceph Octopus release and above:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Cinder Value
+ - Ceph Mapping
+ * - ``total_iops_sec``
+ - ``rbd_qos_iops_limit``
+ * -
+ -
+ * - ``read_iops_sec``
+ - ``rbd_qos_read_iops_limit``
+ * -
+ -
+ * - ``write_iops_sec``
+ - ``rbd_qos_write_iops_limit``
+ * -
+ -
+ * - ``total_bytes_sec``
+ - ``rbd_qos_bps_limit``
+ * -
+ -
+ * - ``read_bytes_sec``
+ - ``rbd_qos_read_bps_limit``
+ * -
+ -
+ * - ``write_bytes_sec``
+ - ``rbd_qos_write_bps_limit``
+ * -
+ -
+ * - ``total_iops_sec_max``
+ - ``rbd_qos_bps_burst``
+ * -
+ -
+ * - ``read_iops_sec_max``
+ - ``rbd_qos_read_iops_burst``
+ * -
+ -
+ * - ``write_iops_sec_max``
+ - ``rbd_qos_write_iops_burst``
+ * -
+ -
+ * - ``total_bytes_sec_max``
+ - ``rbd_qos_bps_burst``
+ * -
+ -
+ * - ``read_bytes_sec_max``
+ - ``rbd_qos_read_bps_burst``
+ * -
+ -
+ * - ``write_bytes_sec_max``
+ - ``rbd_qos_write_bps_burst``
+ * -
+ -
+
+
+For more information on QoS settings you may refer to `Ceph QoS documentation
+<https://docs.ceph.com/en/latest/rbd/rbd-config-ref/#qos-settings/>`_.
diff --git a/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst b/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst
index 99df1b6e1..e80a617b3 100644
--- a/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst
@@ -48,8 +48,8 @@ Supported operations
Driver configuration
~~~~~~~~~~~~~~~~~~~~
-.. note:: The following instructions should all be performed on Block Storage
- nodes.
+.. note:: The following instructions should all be performed on
+ cinder-volume container.
#. Install `storops` from pypi:
diff --git a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst
index 765486971..129a669d4 100644
--- a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst
@@ -89,17 +89,31 @@ Set up Hitachi storage
You need to specify settings as described below for storage systems. For
details about each setting, see the user's guide of the storage systems.
-#. User accounts
+Common resources:
- Create a storage device account belonging to the Administrator User Group.
+- ``All resources``
+ The name of any storage resource, such as a DP pool or a host group,
+ cannot contain any whitespace characters or else it will be unusable
+ by the driver.
-#. DP Pool
+- ``User accounts``
+ Create a storage device account belonging to the Administrator User Group.
- Create a DP pool that is used by the driver.
+- ``DP Pool``
+ Create a DP pool that is used by the driver.
-#. Ports
+- ``Resource group``
+ If using a new resource group for exclusive use by an OpenStack system,
+ create a new resource group, and assign the necessary resources, such as
+ LDEVs, port, and host group (iSCSI target) to the created resource.
- Enable Port Security for the ports used by the driver.
+- ``Ports``
+ Enable Port Security for the ports used by the driver.
+
+If you use iSCSI:
+
+- ``Ports``
+ Assign an IP address and a TCP port number to the port.
Set up Hitachi storage volume driver
------------------------------------
@@ -140,7 +154,7 @@ This table shows configuration options for Hitachi block storage driver.
cinder.volume.drivers.hitachi.hbsd_common
cinder.volume.drivers.hitachi.hbsd_rest
- cinder.volume.drivers.hitachi.hbsd_fc
+ cinder.volume.drivers.hitachi.hbsd_rest_fc
Required options
----------------
@@ -159,4 +173,3 @@ Required options
- ``hitachi_pool``
Pool number or pool name of the DP pool.
-
diff --git a/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst b/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst
index 560d16781..65e58a946 100644
--- a/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst
@@ -22,6 +22,9 @@ Supported operations
* Create, modify, delete, and list snapshots of consistency groups.
* Create consistency group from consistency group or consistency group
snapshot.
+* Revert a volume to a snapshot.
+* Manage and unmanage volumes and snapshots.
+* List manageable volumes and snapshots.
External package installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -66,6 +69,20 @@ Configure the driver back-end section with the parameters below.
san_ip = InfiniBox management IP
+* Verify that the InfiniBox array can be managed via an HTTPS connection.
+ And the ``driver_use_ssl`` parameter should be set to ``true`` to enable
+ use of the HTTPS protocol. HTTP can also be used if ``driver_use_ssl``
+ is set to (or defaults to) ``false``. To suppress requests library SSL
+ certificate warnings, set the ``suppress_requests_ssl_warnings`` parameter
+ to ``true``.
+
+ .. code-block:: ini
+
+ driver_use_ssl = true/false
+ suppress_requests_ssl_warnings = true/false
+
+ These parameters defaults to ``false``.
+
* Configure user credentials.
The driver requires an InfiniBox user with administrative privileges.
@@ -175,6 +192,8 @@ Configuration example
[infinidat-pool-a]
volume_driver = cinder.volume.drivers.infinidat.InfiniboxVolumeDriver
volume_backend_name = infinidat-pool-a
+ driver_use_ssl = true
+ suppress_requests_ssl_warnings = true
san_ip = 10.1.2.3
san_login = openstackuser
san_password = openstackpass
diff --git a/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst b/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst
index 930ced4c0..56cf10fe2 100644
--- a/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst
@@ -26,8 +26,6 @@ means you do not have the high-availability and non-disruptive upgrade
benefits provided by FlashArray. Multipathing must be used to take advantage
of these benefits.
-The NVMe driver does not support synchronous replication using ActiveCluster.
-
Supported operations
~~~~~~~~~~~~~~~~~~~~
@@ -51,8 +49,7 @@ Supported operations
* Create a thin provisioned volume.
-* Replicate volumes to remote Pure Storage array(s) - synchronous replication
- is not supported with the NVMe driver.
+* Replicate volumes to remote Pure Storage array(s)
QoS support for the Pure Storage drivers include the ability to set the
following capabilities in the OpenStack Block Storage API
@@ -267,10 +264,6 @@ of the remote array.
The ``REPLICATION_TYPE`` value for the ``type`` key can be either ``sync`` or
``async``
-.. note::
-
- Synchronous replication is not supported by the NVMe driver.
-
If the ``type`` is ``sync`` volumes will be created in a stretched Pod. This
requires two arrays pre-configured with Active Cluster enabled. You can
optionally specify ``uniform`` as ``true`` or ``false``, this will instruct
diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
index dae44be94..384a633da 100644
--- a/doc/source/reference/support-matrix.ini
+++ b/doc/source/reference/support-matrix.ini
@@ -875,7 +875,7 @@ driver.huawei_v5=missing
driver.huawei_18000=missing
driver.huawei_dorado=missing
driver.huawei_fusionstorage=missing
-driver.infinidat=missing
+driver.infinidat=complete
driver.ibm_ds8k=complete
driver.ibm_flashsystem=missing
driver.ibm_gpfs=missing
diff --git a/mypy-files.txt b/mypy-files.txt
index 9bf7a2314..b7338913d 100644
--- a/mypy-files.txt
+++ b/mypy-files.txt
@@ -11,6 +11,7 @@ cinder/i18n.py
cinder/image/cache.py
cinder/image/glance.py
cinder/image/image_utils.py
+cinder/keymgr/transfer.py
cinder/exception.py
cinder/manager.py
cinder/objects/backup.py
diff --git a/releasenotes/notes/bp-infinidat-add-snapshot-revert-1bab97e85ff10780.yaml b/releasenotes/notes/bp-infinidat-add-snapshot-revert-1bab97e85ff10780.yaml
new file mode 100644
index 000000000..11931e336
--- /dev/null
+++ b/releasenotes/notes/bp-infinidat-add-snapshot-revert-1bab97e85ff10780.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Infinidat driver: Added support for revert to snapshot operation.
diff --git a/releasenotes/notes/bug-1936848-6ecc78e0e970419a.yaml b/releasenotes/notes/bug-1936848-6ecc78e0e970419a.yaml
new file mode 100644
index 000000000..5cf26d331
--- /dev/null
+++ b/releasenotes/notes/bug-1936848-6ecc78e0e970419a.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ PowerMax driver `bug #1936848
+ <https://bugs.launchpad.net/cinder/+bug/1936848>`_: Fixed
+ Generic Volume Group error where the name has been changed
+ in OpenStack and is not reflected on the corresponding storage
+ group on the PowerMax.
diff --git a/releasenotes/notes/bug-1978729-cinder-backup-4cd87c4d71b7713e.yaml b/releasenotes/notes/bug-1978729-cinder-backup-4cd87c4d71b7713e.yaml
new file mode 100644
index 000000000..42359b0d2
--- /dev/null
+++ b/releasenotes/notes/bug-1978729-cinder-backup-4cd87c4d71b7713e.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ `Bug #1978729 <https://bugs.launchpad.net/cinder/+bug/1978729>`_: Fixed
+ context.message_action is None on errors by backup drivers. The message_*
+ properties of the context were not passed during rpc, which caused a double
+ exception when a backup driver raised an exception, masking the actual backup
+ driver exception.
diff --git a/releasenotes/notes/bug-1981354-infinidat-iscsi-fix-multipath-3f8a0be5f541c66e.yaml b/releasenotes/notes/bug-1981354-infinidat-iscsi-fix-multipath-3f8a0be5f541c66e.yaml
new file mode 100644
index 000000000..a2d464553
--- /dev/null
+++ b/releasenotes/notes/bug-1981354-infinidat-iscsi-fix-multipath-3f8a0be5f541c66e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ Infinidat Driver `bug #1981354
+ <https://bugs.launchpad.net/cinder/+bug/1981354>`_:
+ Fixed Infinidat driver to return all configured and
+ enabled iSCSI portals for a given network space.
diff --git a/releasenotes/notes/bug-1981982-infinidat-fix-ssl-options-6ddd852c24b16760.yaml b/releasenotes/notes/bug-1981982-infinidat-fix-ssl-options-6ddd852c24b16760.yaml
new file mode 100644
index 000000000..91da2d965
--- /dev/null
+++ b/releasenotes/notes/bug-1981982-infinidat-fix-ssl-options-6ddd852c24b16760.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Infinidat Driver `bug #1981982
+ <https://bugs.launchpad.net/cinder/+bug/1981982>`_:
+ Fixed Infinidat driver to use TLS/SSL communication between the Cinder
+ volume service and the storage backend. Admin can set `True` or `False`
+ for the `driver_use_ssl` and `suppress_requests_ssl_warnings` options in
+ the driver section of cinder.conf to enable or disable these features.
diff --git a/releasenotes/notes/bug-1982078-Driver_initialization_error_w.r.t_default_portset-3992a060cca2adcb.yaml b/releasenotes/notes/bug-1982078-Driver_initialization_error_w.r.t_default_portset-3992a060cca2adcb.yaml
new file mode 100644
index 000000000..35e0cbd51
--- /dev/null
+++ b/releasenotes/notes/bug-1982078-Driver_initialization_error_w.r.t_default_portset-3992a060cca2adcb.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ IBM Spectrum Virtualize Family driver: `Bug #1982078
+ <https://bugs.launchpad.net/cinder/+bug/1982078>`_:
+ Fixed the default portset value during driver
+ initialization.
diff --git a/releasenotes/notes/fix-powerflex-volume-cache-da3fa1769ef78ae8.yaml b/releasenotes/notes/fix-powerflex-volume-cache-da3fa1769ef78ae8.yaml
new file mode 100644
index 000000000..29d3c9985
--- /dev/null
+++ b/releasenotes/notes/fix-powerflex-volume-cache-da3fa1769ef78ae8.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+ - |
+ PowerFlex driver `bug #1942095
+ <https://bugs.launchpad.net/cinder/+bug/1942095>`_: Fixed Cinder
+ volume caching mechanism for the driver. Now the driver
+ correctly raises ``exception.SnapshotLimitReached`` when maximum
+ snapshots are created for a given volume and a volume cache is
+ invalidated to allow a new row of fast volume clones.
+
diff --git a/releasenotes/notes/hitachi-vsp-fix-resource-lock-msg-5a119426e6c65998.yaml b/releasenotes/notes/hitachi-vsp-fix-resource-lock-msg-5a119426e6c65998.yaml
new file mode 100644
index 000000000..3f383fbbc
--- /dev/null
+++ b/releasenotes/notes/hitachi-vsp-fix-resource-lock-msg-5a119426e6c65998.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Hitachi driver `bug #1989176
+ <https://bugs.launchpad.net/cinder/+bug/1989176>`_:
+ Fixed Hitachi driver to output a message for resource lock correctly.
diff --git a/releasenotes/notes/hitachi-vsp-port-scheduler-207e01b3cd13350b.yaml b/releasenotes/notes/hitachi-vsp-port-scheduler-207e01b3cd13350b.yaml
new file mode 100644
index 000000000..7069d395c
--- /dev/null
+++ b/releasenotes/notes/hitachi-vsp-port-scheduler-207e01b3cd13350b.yaml
@@ -0,0 +1,14 @@
+---
+features:
+ - |
+ Hitachi driver: Add a feature ``Port Scheduler``. This feature is enabled
+ when specifying ``True`` for the parameter ``hitachi_port_scheduler``.
+ When this feature is enabled and an attach request is received, the active
+ WWNs that are obtained by Fibre Channel Zone Manager will be distributed
+ and registered to the host groups of each port of the storage system.
+ To use this feature, specify ``True`` for both parameters
+ ``hitachi_group_request`` and ``hitachi_rest_name_only_discovery``.
+ If you specify ``False`` or use default value for the
+ ``hitachi_rest_name_only_discovery``, it will take a long time to attach
+ volume, by seeking the host group for all specified ports.
+ This feature is supported on Fibre Channel only.
diff --git a/releasenotes/notes/hpe-3par-add-get-manageable-2926f21116c98599.yaml b/releasenotes/notes/hpe-3par-add-get-manageable-2926f21116c98599.yaml
new file mode 100644
index 000000000..c5421b6cb
--- /dev/null
+++ b/releasenotes/notes/hpe-3par-add-get-manageable-2926f21116c98599.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ HPE 3PAR driver `Bug #1819903 <https://bugs.launchpad.net/cinder/+bug/1819903>`_:
+ Fixed: umanaged volumes & snapshots missing from cinder manageable-list.
diff --git a/releasenotes/notes/infinidat-manage-unmanage-ccc42b79d741369f.yaml b/releasenotes/notes/infinidat-manage-unmanage-ccc42b79d741369f.yaml
new file mode 100644
index 000000000..06bf3e4e6
--- /dev/null
+++ b/releasenotes/notes/infinidat-manage-unmanage-ccc42b79d741369f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Infinidat driver: Added support to manage and unmanage volumes and
+ snapshots. Also added the functionality to list the manageable
+ volumes and snapshots.
diff --git a/releasenotes/notes/lock_path-940af881b2112bbe.yaml b/releasenotes/notes/lock_path-940af881b2112bbe.yaml
new file mode 100644
index 000000000..39d60a0e5
--- /dev/null
+++ b/releasenotes/notes/lock_path-940af881b2112bbe.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - |
+ os-brick file lock location can be specified independently of the Cinder
+ service lock location using ``lock_path`` in the ``[os_brick]``
+ configuration section. Useful for HCI deployments and when running Cinder
+ and Glance with Cinder backend on the same host.
+upgrade:
+ - |
+ On HCI deployments and when running Cinder and Glance with Cinder backend
+ on the same host an os-brick shared location can be configured using the
+ ``lock_path`` in the ``[os_brick]`` configuration section.
diff --git a/releasenotes/notes/netapp-nfs-copy-offload-image-812c7152d9fe4aae.yaml b/releasenotes/notes/netapp-nfs-copy-offload-image-812c7152d9fe4aae.yaml
new file mode 100644
index 000000000..26044fbda
--- /dev/null
+++ b/releasenotes/notes/netapp-nfs-copy-offload-image-812c7152d9fe4aae.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ NetApp NFS driver: add an alternative approach to perform the efficient clone
+ image when the Glance source store and Cinder destination pool are not in the
+ same FlexVol, but they are in the same Cluster. Previously, the driver required
+ the copy offload tool for doing it efficiently, which is no longer available.
+ Now, the operators can maintain their efficient clone image by relying on the
+ storage file copy operation.
diff --git a/releasenotes/notes/netapp-nfs-deprecate-copy-offload-option-f9d6fe8e3dfafb04.yaml b/releasenotes/notes/netapp-nfs-deprecate-copy-offload-option-f9d6fe8e3dfafb04.yaml
new file mode 100644
index 000000000..a30e5f38d
--- /dev/null
+++ b/releasenotes/notes/netapp-nfs-deprecate-copy-offload-option-f9d6fe8e3dfafb04.yaml
@@ -0,0 +1,5 @@
+---
+deprecations:
+ - |
+ Deprecate NetApp NFS option `netapp_copyoffload_tool_path`. The tool is no longer
+ available for downloading.
diff --git a/releasenotes/notes/netapp-ontap-rest-api-client-d889cfa895f01249.yaml b/releasenotes/notes/netapp-ontap-rest-api-client-d889cfa895f01249.yaml
new file mode 100644
index 000000000..c218b598a
--- /dev/null
+++ b/releasenotes/notes/netapp-ontap-rest-api-client-d889cfa895f01249.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ NetApp drivers: NFS, iSCSI and FCP drivers have now the option to request
+ ONTAP operations through REST API. The new option `netapp_use_legacy_client`
+ switches between the old ZAPI client approach and new REST client. It is
+ default to `True`, meaning that the drivers will keep working as before
+ using ZAPI operations. If desired, this option can be set to `False` interacting
+ with the storage using the new REST client. However, this new client still
+ relies on ZAPI calls for consistency group snapshot operation.
+
+ The drivers can only be configured with REST client when using ONTAP storage
+ 9.11.1 or newer.
+
+ NOTE: Enabling ONTAP REST client changes the behavior of QoS specs. Earlier,
+ QoS values could be represented in BPS (bytes per second), but now REST client
+ only supports integer values represented in MBPS (Megabytes per second).
+ It means that though the user specifies the value in BPS, it will be converted
+ to MBPS and rounded up.
diff --git a/releasenotes/notes/pure-storage-nvme-driver-f4217c00379c4827.yaml b/releasenotes/notes/pure-storage-nvme-driver-f4217c00379c4827.yaml
index c72121465..3fd81a526 100644
--- a/releasenotes/notes/pure-storage-nvme-driver-f4217c00379c4827.yaml
+++ b/releasenotes/notes/pure-storage-nvme-driver-f4217c00379c4827.yaml
@@ -4,4 +4,4 @@ features:
Pure Storage adds a new driver to support NVMe-RoCE for the
FlashArray.
All features of the iSCSI and FC drivers are fully supported by this new
- driver with the exception of synchronous replication.
+ driver.
diff --git a/releasenotes/notes/rbd-backend-qos-implementation-0e141b742e277d26.yaml b/releasenotes/notes/rbd-backend-qos-implementation-0e141b742e277d26.yaml
new file mode 100644
index 000000000..007ea8b1c
--- /dev/null
+++ b/releasenotes/notes/rbd-backend-qos-implementation-0e141b742e277d26.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ RBD driver: Added QoS support.
diff --git a/releasenotes/notes/slug-b6a0fc3db0a2dd45.yaml b/releasenotes/notes/slug-b6a0fc3db0a2dd45.yaml
new file mode 100644
index 000000000..a6392c35b
--- /dev/null
+++ b/releasenotes/notes/slug-b6a0fc3db0a2dd45.yaml
@@ -0,0 +1,8 @@
+---
+other:
+ - |
+ Unified how cinder calculates the virtual free storage space for a pool.
+ Previously Cinder had 2 different mechanisms for calculating the
+ virtual free storage. Now both the Capacity Filter and the Capacity
+ Weigher use the same mechanism, which is based upon the defined terms in
+ https://specs.openstack.org/openstack/cinder-specs/specs/queens/provisioning-improvements.html
diff --git a/releasenotes/notes/transfer-encrypted-volume-2f040a6993435e79.yaml b/releasenotes/notes/transfer-encrypted-volume-2f040a6993435e79.yaml
new file mode 100644
index 000000000..8f7213f3d
--- /dev/null
+++ b/releasenotes/notes/transfer-encrypted-volume-2f040a6993435e79.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Starting with API microversion 3.70, encrypted volumes can be transferred
+ to a user in a different project. Prior to microversion 3.70, the transfer
+ is blocked due to the inability to transfer ownership of the volume's
+ encryption key. With microverson 3.70, ownership of the encryption key is
+ transferred when the volume is transferred.
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index fc1396afe..2822cc565 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -8,11 +8,11 @@ msgid ""
msgstr ""
"Project-Id-Version: Cinder Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-08-03 16:12+0000\n"
+"POT-Creation-Date: 2022-09-10 21:16+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-08-04 12:27+0000\n"
+"PO-Revision-Date: 2022-09-10 01:52+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -253,8 +253,8 @@ msgstr "15.5.0"
msgid "15.6.0"
msgstr "15.6.0"
-msgid "15.6.0-13"
-msgstr "15.6.0-13"
+msgid "15.6.0-15"
+msgstr "15.6.0-15"
msgid "16.0.0"
msgstr "16.0.0"
@@ -280,8 +280,8 @@ msgstr "16.4.1"
msgid "16.4.2"
msgstr "16.4.2"
-msgid "16.4.2-6"
-msgstr "16.4.2-6"
+msgid "16.4.2-8"
+msgstr "16.4.2-8"
msgid "17.0.0"
msgstr "17.0.0"
@@ -301,6 +301,9 @@ msgstr "17.3.0"
msgid "17.4.0"
msgstr "17.4.0"
+msgid "17.4.0-5"
+msgstr "17.4.0-5"
+
msgid "18.0.0"
msgstr "18.0.0"
@@ -310,8 +313,8 @@ msgstr "18.1.0"
msgid "18.2.0"
msgstr "18.2.0"
-msgid "18.2.0-6"
-msgstr "18.2.0-6"
+msgid "18.2.0-7"
+msgstr "18.2.0-7"
msgid "19.0.0"
msgstr "19.0.0"
@@ -322,8 +325,8 @@ msgstr "19.1.0"
msgid "19.1.1"
msgstr "19.1.1"
-msgid "19.1.1-4"
-msgstr "19.1.1-4"
+msgid "19.1.1-8"
+msgstr "19.1.1-8"
msgid ""
"2. The FlexGroup pool has a different view of aggregate capabilites, "
@@ -345,12 +348,12 @@ msgstr "20.0.0"
msgid "20.0.0.0rc1"
msgstr "20.0.0.0rc1"
-msgid "20.0.0.0rc1-206"
-msgstr "20.0.0.0rc1-206"
-
msgid "20.0.1"
msgstr "20.0.1"
+msgid "20.0.1-6"
+msgstr "20.0.1-6"
+
msgid ""
"3. The ``utilization`` capability is not calculated to FlexGroup pools, it "
"is always set to default of 50."
@@ -3262,6 +3265,17 @@ msgstr ""
"in volume stats."
msgid ""
+"Dell EMC PowerFlex driver: Report trimming/discard support to Nova and "
+"Cinder on thin volumes that don't have snapshots. Not doing trim on volumes "
+"with snapshots is the vendor's recommendation, but can be overriden with the "
+"``report_discard_supported`` configuration option."
+msgstr ""
+"Dell EMC PowerFlex driver: Report trimming/discard support to Nova and "
+"Cinder on thin volumes that don't have snapshots. Not doing trim on volumes "
+"with snapshots is the vendor's recommendation, but can be overridden with "
+"the ``report_discard_supported`` configuration option."
+
+msgid ""
"Dell EMC PowerMax driver now faciliates the user to override the short host "
"name and port group name seen in PowerMax masking view and storage view "
"terminology. This means the user can give more meaningful names, especially "
@@ -3285,6 +3299,18 @@ msgstr ""
"tagging to allow the user to specify a user defined tag to facilitate easy "
"access and classification."
+msgid ""
+"Dell EMC PowerMax driver: Report trimming/discard support to Nova and Cinder."
+msgstr ""
+"Dell EMC PowerMax driver: Report trimming/discard support to Nova and Cinder."
+
+msgid ""
+"Dell EMC PowerStore driver: Report trimming/discard support to Nova and "
+"Cinder."
+msgstr ""
+"Dell EMC PowerStore driver: Report trimming/discard support to Nova and "
+"Cinder."
+
msgid "Dell EMC PowerVault ME Series storage arrays are now supported."
msgstr "Dell EMC PowerVault ME Series storage arrays are now supported."
@@ -3625,6 +3651,13 @@ msgstr ""
"note."
msgid ""
+"Deprecate NetApp NFS option `netapp_copyoffload_tool_path`. The tool is no "
+"longer available for downloading."
+msgstr ""
+"Deprecate NetApp NFS option `netapp_copyoffload_tool_path`. The tool is no "
+"longer available for download."
+
+msgid ""
"Deprecate option `check_max_pool_luns_threshold`. The VNX driver will always "
"check the threshold."
msgstr ""
@@ -4804,6 +4837,29 @@ msgid "Hitachi driver: Add Cinder generic volume groups."
msgstr "Hitachi driver: Add Cinder generic volume groups."
msgid ""
+"Hitachi driver: Add a feature ``Port Scheduler``. This feature is enabled "
+"when specifying ``True`` for the parameter ``hitachi_port_scheduler``. When "
+"this feature is enabled and an attach request is received, the active WWNs "
+"that are obtained by Fibre Channel Zone Manager will be distributed and "
+"registered to the host groups of each port of the storage system. To use "
+"this feature, specify ``True`` for both parameters ``hitachi_group_request`` "
+"and ``hitachi_rest_name_only_discovery``. If you specify ``False`` or use "
+"default value for the ``hitachi_rest_name_only_discovery``, it will take a "
+"long time to attach volume, by seeking the host group for all specified "
+"ports. This feature is supported on Fibre Channel only."
+msgstr ""
+"Hitachi driver: Add a feature ``Port Scheduler``. This feature is enabled "
+"when specifying ``True`` for the parameter ``hitachi_port_scheduler``. When "
+"this feature is enabled and an attach request is received, the active WWNs "
+"that are obtained by Fibre Channel Zone Manager will be distributed and "
+"registered to the host groups of each port of the storage system. To use "
+"this feature, specify ``True`` for both parameters ``hitachi_group_request`` "
+"and ``hitachi_rest_name_only_discovery``. If you specify ``False`` or use "
+"the default value for the ``hitachi_rest_name_only_discovery``, it will take "
+"a long time to attach volume, by seeking the host group for all specified "
+"ports. This feature is supported on Fibre Channel only."
+
+msgid ""
"Hitachi driver: Add target port assignment. Defining particular ports in "
"extra spec ``hbsd:target_ports`` determines which of the ports specified by "
"the ``hitachi_target_ports`` or the ``hitachi_compute_target_ports`` "
@@ -5174,6 +5230,15 @@ msgstr ""
"volumes during retype operation."
msgid ""
+"IBM Spectrum Virtualize Family driver: `Bug #1976499 <https://bugs.launchpad."
+"net/cinder/+bug/1976499>`_: Setting correct SVC Code level for "
+"lsfcportsetmember call."
+msgstr ""
+"IBM Spectrum Virtualize Family driver: `Bug #1976499 <https://bugs.launchpad."
+"net/cinder/+bug/1976499>`_: Setting correct SVC Code level for "
+"lsfcportsetmember call."
+
+msgid ""
"IBM Spectrum Virtualize Family: Added support for revert to snapshot for "
"global-mirror volume."
msgstr ""
@@ -5653,6 +5718,15 @@ msgstr ""
"reference guide continues to show URLs with a project_id because the legacy "
"behaviour continues to be supported."
+msgid ""
+"Infinidat Driver `bug #1981354 <https://bugs.launchpad.net/cinder/"
+"+bug/1981354>`_: Fixed Infinidat driver to return all configured and enabled "
+"iSCSI portals for a given network space."
+msgstr ""
+"Infinidat Driver `bug #1981354 <https://bugs.launchpad.net/cinder/"
+"+bug/1981354>`_: Fixed Infinidat driver to return all configured and enabled "
+"iSCSI portals for a given network space."
+
msgid "Infortrend"
msgstr "Infortrend"
@@ -5694,6 +5768,32 @@ msgstr ""
"list_replication_targets."
msgid ""
+"Introduces microversion (MV) 3.63, which includes volume type ID in the "
+"volume details JSON response. This MV affects the volume detail list (``GET /"
+"v3/{project_id}/volumes/detail``), and volume-show (``GET /v3/{project_id}/"
+"volumes/{volume_id}``) calls."
+msgstr ""
+"Introduces microversion (MV) 3.63, which includes volume type ID in the "
+"volume details JSON response. This MV affects the volume detail list (``GET /"
+"v3/{project_id}/volumes/detail``), and volume-show (``GET /v3/{project_id}/"
+"volumes/{volume_id}``) calls."
+
+msgid ""
+"It is currently possible to manage a volume to an encrypted volume type, but "
+"that is not recommended because there is no way to supply an encryption key "
+"for the volume to cinder. Un-managing a volume of an encrypted volume type "
+"is already prevented, and it is expected that management to an encrypted "
+"type will similarly be blocked in a future release. This issue is being "
+"tracked as `Bug #1944577 <https://bugs.launchpad.net/cinder/+bug/1944577>`_."
+msgstr ""
+"It is currently possible to manage a volume to an encrypted volume type, but "
+"that is not recommended because there is no way to supply an encryption key "
+"for the volume to Cinder. Un-managing a volume of an encrypted volume type "
+"is already prevented, and it is expected that management of an encrypted "
+"type will similarly be blocked in a future release. This issue is being "
+"tracked as `Bug #1944577 <https://bugs.launchpad.net/cinder/+bug/1944577>`_."
+
+msgid ""
"It is faster to create a new volume from a snapshot. You may wish to "
"recommend this option to your users whose use cases do not strictly require "
"revert-to-snapshot."
@@ -5703,6 +5803,17 @@ msgstr ""
"revert-to-snapshot."
msgid ""
+"It is no longer possible to specify an sqlalchemy-migrate-based version. "
+"When the ``cinder-manage db sync`` command is run, all remaining sqlalchemy-"
+"migrate-based migrations will be automatically applied. Attempting to "
+"specify an sqlalchemy-migrate-based version will result in an error."
+msgstr ""
+"It is no longer possible to specify an sqlalchemy-migrate-based version. "
+"When the ``cinder-manage db sync`` command is run, all remaining sqlalchemy-"
+"migrate-based migrations will be automatically applied. Attempting to "
+"specify an sqlalchemy-migrate-based version will result in an error."
+
+msgid ""
"It is now possible to delete a volume and its snapshots by passing an "
"additional argument to volume delete, \"cascade=True\"."
msgstr ""
@@ -5732,6 +5843,30 @@ msgstr ""
"JSON ``null`` value."
msgid ""
+"JovianDSS driver: `Bug #1941746 <https://bugs.launchpad.net/cinder/"
+"+bug/1941746>`_: Fixed Fix ensure_export function failure in case of partial "
+"target recovery."
+msgstr ""
+"JovianDSS driver: `Bug #1941746 <https://bugs.launchpad.net/cinder/"
+"+bug/1941746>`_: Fixed Fix ensure_export function failure in case of partial "
+"target recovery."
+
+msgid ""
+"Just before release, `Bug #1965847 <https://bugs.launchpad.net/cinder/"
+"+bug/1965847>`_ was reported. When importing a backup record for a backup_id "
+"that currently exists, the import fails as expected. However, this "
+"operation has the unfortunate side effect that the existing backup record is "
+"deleted. Initial analysis of the bug indicates a small, isolated solution "
+"that should be backportable to stable branches."
+msgstr ""
+"Just before release, `Bug #1965847 <https://bugs.launchpad.net/cinder/"
+"+bug/1965847>`_ was reported. When importing a backup record for a backup_id "
+"that currently exists, the import fails as expected. However, this "
+"operation has the unfortunate side effect that the existing backup record is "
+"deleted. Initial analysis of the bug indicates a small, isolated solution "
+"that should be back portable to stable branches."
+
+msgid ""
"Kaminario FC and iSCSI drivers: Fixed `bug 1829398 <https://bugs.launchpad."
"net/cinder/+bug/1829398>`_ where force detach would fail."
msgstr ""
@@ -5757,6 +5892,15 @@ msgstr ""
"#1720147)."
msgid ""
+"Kaminario driver `bug #1951981 <https://bugs.launchpad.net/cinder/"
+"+bug/1951981>`_: Fixed create volume from volume or snapshot not using "
+"multipath configuration."
+msgstr ""
+"Kaminario driver `bug #1951981 <https://bugs.launchpad.net/cinder/"
+"+bug/1951981>`_: Fixed create volume from volume or snapshot not using "
+"multipath configuration."
+
+msgid ""
"Key migration is initiated on service startup, and entries in the cinder-"
"volume log will indicate the migration status. Log entries will indicate "
"when a volume's encryption key ID has been migrated to Barbican, and a "
@@ -5784,15 +5928,51 @@ msgstr ""
"of QEMU 2.10 and the qemu-img convert command."
msgid ""
+"LVM driver `bug #1901783 <https://bugs.launchpad.net/cinder/+bug/1901783>`_: "
+"Fix unexpected delete volume failure due to unexpected exit code 139 on "
+"``lvs`` command call."
+msgstr ""
+"LVM driver `bug #1901783 <https://bugs.launchpad.net/cinder/+bug/1901783>`_: "
+"Fix unexpected delete volume failure due to unexpected exit code 139 on "
+"``lvs`` command call."
+
+msgid ""
+"LVM driver: Added support for the NVMe TCP transport protocol. Configuration "
+"option is ``target_protocol = nvmet_tcp`` when using ``nvmet`` as the "
+"``target_helper``."
+msgstr ""
+"LVM driver: Added support for the NVMe TCP transport protocol. Configuration "
+"option is ``target_protocol = nvmet_tcp`` when using ``nvmet`` as the "
+"``target_helper``."
+
+msgid ""
"LVM iSCSI driver fix for IPv6 addresses for the different targets, IET, LIO, "
"TGT, CXT, and SCST."
msgstr ""
"LVM iSCSI driver fix for IPv6 addresses for the different targets, IET, LIO, "
"TGT, CXT, and SCST."
+msgid ""
+"Lenovo driver: Return additional configuration options from "
+"``get_driver_options`` call"
+msgstr ""
+"Lenovo driver: Return additional configuration options from "
+"``get_driver_options`` call"
+
msgid "Liberty Series Release Notes"
msgstr "Liberty Series Release Notes"
+msgid ""
+"Lightbits LightOS driver: new Cinder driver for Lightbits(TM) LightOS(R). "
+"Lightbits Labs (http://www.lightbitslabs.com) LightOS is software-defined, "
+"cloud native, high-performance, clustered scale-out and redundant NVMe/TCP "
+"storage that performs like local NVMe flash."
+msgstr ""
+"Lightbits LightOS driver: new Cinder driver for Lightbits(TM) LightOS(R). "
+"Lightbits Labs (http://www.lightbitslabs.com) LightOS is software-defined, "
+"cloud-native, high-performance, clustered scale-out and redundant NVMe/TCP "
+"storage that performs like local NVMe flash."
+
msgid "List CG Snapshots checks both the CG and the groups tables."
msgstr "List CG Snapshots checks both the CG and the groups tables."
@@ -5810,6 +5990,17 @@ msgid "Log VMAX specific metadata of a volume if debug is enabled."
msgstr "Log VMAX specific metadata of a volume if debug is enabled."
msgid ""
+"Log a warning from the volume service when a volume driver's "
+"get_volume_stats() call takes a long time to return. This can help "
+"deployers troubleshoot a cinder-volume service misbehaving due to a driver/"
+"backend performance issue."
+msgstr ""
+"Log a warning from the volume service when a volume driver's "
+"get_volume_stats() call takes a long time to return. This can help "
+"deployers troubleshoot a cinder-volume service misbehaving due to a driver/"
+"backend performance issue."
+
+msgid ""
"Logging path can now be configured for vzstorage driver in shares config "
"file (specified by vzstorage_shares_config option). To set custom logging "
"path add `'-l', '<path_to_log_file>'` to mount options array. Otherwise "
@@ -5838,6 +6029,19 @@ msgid "Many backend storage drivers have added features and fixed bugs."
msgstr "Many backend storage drivers have added features and fixed bugs."
msgid ""
+"Many policies had their default values changed and their previous values "
+"deprecated. These are indicated in the sample policy configuration file, "
+"which you can view in the `policy.yaml <https://docs.openstack.org/cinder/"
+"xena/configuration/block-storage/samples/policy.yaml.html>`_ section of the "
+"`Cinder Service Configuration Guide`."
+msgstr ""
+"Many policies had their default values changed and their previous values "
+"deprecated. These are indicated in the sample policy configuration file, "
+"which you can view in the `policy.yaml <https://docs.openstack.org/cinder/"
+"xena/configuration/block-storage/samples/policy.yaml.html>`_ section of the "
+"`Cinder Service Configuration Guide`."
+
+msgid ""
"Marked the ITRI DISCO driver option ``disco_wsdl_path`` as deprecated. The "
"new preferred protocol for array communication is REST and SOAP support will "
"be removed."
@@ -5866,6 +6070,52 @@ msgstr ""
"types>`_ section of the `Block Storage API v3 Reference <https://docs."
"openstack.org/api-ref/block-storage/v3/>`_ for more information."
+msgid ""
+"Microversion 3.65 includes the display of information in the volume or "
+"snapshot detail response to indicate whether that resource consumes quota, "
+"and adds the ability to filter a requested list of resources according to "
+"whether they consume quota or not."
+msgstr ""
+"Microversion 3.65 includes the display of information in the volume or "
+"snapshot detail response to indicate whether that resource consumes quota, "
+"and adds the ability to filter a requested list of resources according to "
+"whether they consume quota or not."
+
+msgid ""
+"Microversion 3.66 removes the necessity to add a 'force' flag when "
+"requesting a snapshot of an in-use volume, given that this is not a problem "
+"for modern storage systems."
+msgstr ""
+"Microversion 3.66 removes the necessity to add a 'force' flag when "
+"requesting a snapshot of an in-use volume, given that this is not a problem "
+"for modern storage systems."
+
+msgid ""
+"Microversion 3.67 is introduced as a marker to indicate that any instance of "
+"the Block Storage API 3.67 or greater treats a project_id in the URL as "
+"optional. This change is backward compatible: the API can handle legacy "
+"URLs containing a project_id as well as URLs without a project_id. This is "
+"the case regardless of what microversion specified in a request. See the "
+"\"New Features\" section for details."
+msgstr ""
+"Microversion 3.67 is introduced as a marker to indicate that any instance of "
+"the Block Storage API 3.67 or greater treats a project_id in the URL as "
+"optional. This change is backward compatible: the API can handle legacy "
+"URLs containing a project_id as well as URLs without a project_id. This is "
+"the case regardless of what microversion is specified in a request. See the "
+"\"New Features\" section for details."
+
+msgid ""
+"Microversion 3.68 introduces a new volume action, ``os-reimage``, that "
+"allows a user to replace the current content of a specified volume with the "
+"data of a specified image supplied by the Image service (glance). See the "
+"\"New Features\" section for details."
+msgstr ""
+"Microversion 3.68 introduces a new volume action, ``os-reimage``, that "
+"allows a user to replace the current content of a specified volume with the "
+"data of a specified image supplied by the Image service (glance). See the "
+"\"New Features\" section for details."
+
msgid "Mitaka Series Release Notes"
msgstr "Mitaka Series Release Notes"
@@ -5943,6 +6193,22 @@ msgstr ""
"NEC Driver: Deprecated ``nec_iscsi_portals_per_cont`` config option. The "
"option was used to limit number of portals and is no longer needed."
+msgid ""
+"NFS driver `bug #1860913 <https://bugs.launchpad.net/cinder/+bug/1860913>`_: "
+"Fixed instance uses base image file when it is rebooted after online "
+"snapshot creation."
+msgstr ""
+"NFS driver `bug #1860913 <https://bugs.launchpad.net/cinder/+bug/1860913>`_: "
+"Fixed instance uses base image file when it is rebooted after online "
+"snapshot creation."
+
+msgid ""
+"NFS driver `bug #1946059 <https://bugs.launchpad.net/cinder/+bug/1946059>`_: "
+"Fixed revert to snapshot operation."
+msgstr ""
+"NFS driver `bug #1946059 <https://bugs.launchpad.net/cinder/+bug/1946059>`_: "
+"Fixed revert to snapshot operation."
+
msgid "Naming convention change for Datera Volume Drivers"
msgstr "Naming convention change for Datera Volume Drivers"
@@ -5998,6 +6264,96 @@ msgid "NetApp ONTAP NFS multiattach capability enabled."
msgstr "NetApp ONTAP NFS multiattach capability enabled."
msgid ""
+"NetApp ONTAP `bug #1906291 <https://bugs.launchpad.net/cinder/"
+"+bug/1906291>`_: Fix volume losing its QoS policy on the backend after "
+"moving it (migrate or retype with migrate) to a NetApp NFS backend."
+msgstr ""
+"NetApp ONTAP `bug #1906291 <https://bugs.launchpad.net/cinder/"
+"+bug/1906291>`_: Fix volume losing its QoS policy on the backend after "
+"moving it (migrate or retype with migrate) to a NetApp NFS backend."
+
+msgid ""
+"NetApp ONTAP driver `bug #1955057 <https://bugs.launchpad.net/cinder/"
+"+bug/1955057>`_: Fixed the function get_ontap_version on Cinder NetApp "
+"driver, now it returns a tuple of integers instead of a string."
+msgstr ""
+"NetApp ONTAP driver `bug #1955057 <https://bugs.launchpad.net/cinder/"
+"+bug/1955057>`_: Fixed the function get_ontap_version on Cinder NetApp "
+"driver, now it returns a tuple of integers instead of a string."
+
+msgid ""
+"NetApp ONTAP driver: Added a new driver specific capability called "
+"`netapp_qos_min_support`. It is used to filter the pools that has support to "
+"the Qos minimum (floor) specs during the scheduler phase."
+msgstr ""
+"NetApp ONTAP driver: Added a new driver-specific capability called "
+"`netapp_qos_min_support`. It is used to filter the pools that have supported "
+"the QoS minimum (floor) specs during the scheduler phase."
+
+msgid ""
+"NetApp ONTAP driver: Added support for Adaptive QoS specs. The driver now "
+"accepts ``expectedIOPSperGiB``, ``peakIOPSperGiB``, "
+"``expectedIOPSAllocation``, ``peakIOPSAllocation``, ``absoluteMinIOPS`` and "
+"``blockSize``. The field ``peakIOPSperGiB`` and the field "
+"``expectedIOPSperGiB`` are required together. The ``expectedIOPSperGiB`` and "
+"``absoluteMinIOPS`` specs are only guaranteed by ONTAP AFF systems. All "
+"specs can only be used with ONTAP version equal or greater than 9.4, "
+"excepting the ``expectedIOPSAllocation`` and ``blockSize`` specs which "
+"require at least 9.5."
+msgstr ""
+"NetApp ONTAP driver: Added support for Adaptive QoS specs. The driver now "
+"accepts ``expectedIOPSperGiB``, ``peakIOPSperGiB``, "
+"``expectedIOPSAllocation``, ``peakIOPSAllocation``, ``absoluteMinIOPS`` and "
+"``blockSize``. The field ``peakIOPSperGiB`` and the field "
+"``expectedIOPSperGiB`` are required together. The ``expectedIOPSperGiB`` and "
+"``absoluteMinIOPS`` specs are only guaranteed by ONTAP AFF systems. All "
+"specs can only be used with ONTAP version equal or greater than 9.4, "
+"excepting the ``expectedIOPSAllocation`` and ``blockSize`` specs which "
+"require at least 9.5."
+
+msgid ""
+"NetApp ONTAP driver: Added support for QoS Min (floor) throughput specs. The "
+"driver now accepts ``minIOPS`` and ``minIOPSperGiB`` specs, which can be set "
+"either individually or along with Max (ceiling) throughput specs. The "
+"feature requires storage ONTAP All Flash FAS (AFF) with version equal or "
+"greater than 9.3 for NFS and 9.2 for iSCSI and FCP. It also works with "
+"Select Premium with SSD and C190 storages with at least ONTAP 9.6."
+msgstr ""
+"NetApp ONTAP driver: Added support for QoS Min (floor) throughput specs. The "
+"driver now accepts ``minIOPS`` and ``minIOPSperGiB`` specs, which can be set "
+"either individually or along with Max (ceiling) throughput specs. The "
+"feature requires storage ONTAP All Flash FAS (AFF) with versions equal to or "
+"greater than 9.3 for NFS and 9.2 for iSCSI and FCP. It also works with "
+"Select Premium with SSD and C190 storage with at least ONTAP 9.6."
+
+msgid ""
+"NetApp ONTAP driver: Added support to Revert to Snapshot for the iSCSI, FC "
+"and NFS drivers with FlexVol pool. This feature does not support FlexGroups "
+"and is limited to revert only to the most recent snapshot of a given Cinder "
+"volume."
+msgstr ""
+"NetApp ONTAP driver: Added support to Revert to Snapshot for the iSCSI, FC "
+"and NFS drivers with FlexVol pool. This feature does not support FlexGroups "
+"and is limited to reverting only to the most recent snapshot of a given "
+"Cinder volume."
+
+msgid ""
+"NetApp ONTAP driver: added option ´netapp_driver_reports_provisioned_capacity"
+"´, which enables the driver to calculate and report provisioned capacity to "
+"Cinder Scheduler based on volumes sizes in the storage system."
+msgstr ""
+"NetApp ONTAP driver: added option ´netapp_driver_reports_provisioned_capacity"
+"´, which enables the driver to calculate and report provisioned capacity to "
+"Cinder Scheduler based on volumes sizes in the storage system."
+
+msgid ""
+"NetApp ONTAP driver: added support for FlexGroup pool using the NFS mode. "
+"There are several considerations for using the driver with it:"
+msgstr ""
+"NetApp ONTAP driver: added support for FlexGroup pool using the NFS mode. "
+"There are several considerations for using the driver with it:"
+
+msgid ""
"NetApp ONTAP iSCSI (bug 1712651): Fix ONTAP NetApp iSCSI driver not raising "
"a proper exception when trying to extend an attached volume beyond its max "
"geometry."
@@ -6021,6 +6377,20 @@ msgstr ""
"with the following extra-specs::"
msgid ""
+"NetApp ONTAP: Added support for storage assisted migration within a same "
+"ONTAP cluster (iSCSI/FC/NFS)."
+msgstr ""
+"NetApp ONTAP: Added support for storage-assisted migration within the same "
+"ONTAP cluster (iSCSI/FC/NFS)."
+
+msgid ""
+"NetApp ONTAP: Fix check QoS minimum support for SVM scoped account. See: "
+"`Bug #1924798 <https://bugs.launchpad.net/cinder/+bug/1924798>`_."
+msgstr ""
+"NetApp ONTAP: Fix check QoS minimum support for SVM scoped account. See: "
+"`Bug #1924798 <https://bugs.launchpad.net/cinder/+bug/1924798>`_."
+
+msgid ""
"NetApp ONTAP: Fixes `bug 1839384 <https://bugs.launchpad.net/cinder/"
"+bug/1839384>`__ Detaching any instance from multiattached volume terminates "
"connection. Now the connection is terminated only if there're no other "
@@ -6066,6 +6436,40 @@ msgstr ""
"raising an exception."
msgid ""
+"NetApp SolidFire driver `Bug #1932964 <https://bugs.launchpad.net/cinder/"
+"+bug/1932964>`_: Fixed a name exception that occurs on any volume migration."
+msgstr ""
+"NetApp SolidFire driver `Bug #1932964 <https://bugs.launchpad.net/cinder/"
+"+bug/1932964>`_: Fixed a name exception that occurs on any volume migration."
+
+msgid ""
+"NetApp SolidFire driver `Bug #1934435 <https://bugs.launchpad.net/cinder/"
+"+bug/1934435>`_: Fixed errors that might occur when an operation is made to "
+"a volume at the same time as the Element OS upgrades."
+msgstr ""
+"NetApp SolidFire driver `Bug #1934435 <https://bugs.launchpad.net/cinder/"
+"+bug/1934435>`_: Fixed errors that might occur when an operation is made to "
+"a volume at the same time as the Element OS upgrades."
+
+msgid ""
+"NetApp SolidFire driver `Bug #1942090 <https://bugs.launchpad.net/cinder/"
+"+bug/1942090>`_: Fixed a status exception that occurs on volume retype with "
+"migration."
+msgstr ""
+"NetApp SolidFire driver `Bug #1942090 <https://bugs.launchpad.net/cinder/"
+"+bug/1942090>`_: Fixed a status exception that occurs on volume retype with "
+"migration."
+
+msgid ""
+"NetApp SolidFire driver `bug #1934459 <https://bugs.launchpad.net/cinder/"
+"+bug/1934459>`_: Fixed backend initialization failing with RecursionError "
+"error when OSProfiler is enabled."
+msgstr ""
+"NetApp SolidFire driver `bug #1934459 <https://bugs.launchpad.net/cinder/"
+"+bug/1934459>`_: Fixed backend initialisation failing with RecursionError "
+"error when OSProfiler is enabled."
+
+msgid ""
"NetApp SolidFire driver now supports optimized revert to snapshot operations."
msgstr ""
"NetApp SolidFire driver now supports optimized revert to snapshot operations."
@@ -6125,6 +6529,23 @@ msgstr ""
"extra-spec in volume types."
msgid ""
+"NetApp drivers: NFS, iSCSI and FCP drivers have now the option to request "
+"ONTAP operations through REST API. The new option `netapp_use_legacy_client` "
+"switch between the old ZAPI client approach and new REST client. It is "
+"default to `True`, meaning that the drivers will keep working as before "
+"using ZAPI operations. If desired, this option can be set to `False` "
+"connecting with new REST client that performs REST API operations if it is "
+"available, otherwise falls back to ZAPI."
+msgstr ""
+"NetApp drivers: NFS, iSCSI and FCP drivers have now the option to request "
+"ONTAP operations through REST API. The new option `netapp_use_legacy_client` "
+"switches between the old ZAPI client approach and the new REST client. It is "
+"defaulted to `True`, meaning that the drivers will keep working as before "
+"using ZAPI operations. If desired, this option can be set to `False` "
+"connecting with the new REST client that performs REST API operations if it "
+"is available, otherwise falls back to ZAPI."
+
+msgid ""
"NetApp iSCSI drivers no longer use the discovery mechanism for multipathing "
"and they always return all target/portals when attaching a volume. Thanks "
"to this, volumes will be successfully attached even if the target/portal "
@@ -6161,6 +6582,13 @@ msgid ""
msgstr ""
"New Cinder volume driver for Inspur InStorage. The new driver supports iSCSI."
+msgid ""
+"New Cinder volume driver for KIOXIA Kumoscale storage systems. The driver "
+"storage system supports NVMeOF."
+msgstr ""
+"New Cinder volume driver for KIOXIA Kumoscale storage systems. The driver "
+"storage system supports NVMeOF."
+
msgid "New Cinder volume driver for LINBIT LINSTOR resources."
msgstr "New Cinder volume driver for LINBIT LINSTOR resources."
@@ -6175,6 +6603,9 @@ msgstr "New FC Cinder volume driver for Inspur Instorage."
msgid "New FC Cinder volume driver for Kaminario K2 all-flash arrays."
msgstr "New FC Cinder volume driver for Kaminario K2 all-flash arrays."
+msgid "New FC cinder volume driver for TOYOU NetStor Storage."
+msgstr "New FC Cinder volume driver for TOYOU NetStor Storage."
+
msgid "New Features"
msgstr "New Features"
@@ -6219,6 +6650,15 @@ msgid ""
msgstr ""
"New config option to enable discard (trim/unmap) support for any backend."
+msgid ""
+"New configuration options have been added to enable mTLS between cinder and "
+"glance: use ``glance_certfile`` and ``glance_keyfile`` in the ``[DEFAULT]`` "
+"section of the cinder configuration file."
+msgstr ""
+"New configuration options have been added to enable mTLS between cinder and "
+"glance: use ``glance_certfile`` and ``glance_keyfile`` in the ``[DEFAULT]`` "
+"section of the cinder configuration file."
+
msgid "New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays."
msgstr "New iSCSI Cinder volume driver for Kaminario K2 all-flash arrays."
@@ -6251,6 +6691,26 @@ msgstr "Newton Series Release Notes"
msgid "NexentaStor5 iSCSI and NFS drivers multiattach capability enabled."
msgstr "NexentaStor5 iSCSI and NFS drivers multiattach capability enabled."
+msgid ""
+"Nimble driver `bug #1918099 <https://bugs.launchpad.net/cinder/"
+"+bug/1918099>`_: Fix revert to snapshot not working as expected."
+msgstr ""
+"Nimble driver `bug #1918099 <https://bugs.launchpad.net/cinder/"
+"+bug/1918099>`_: Fix revert to snapshot not working as expected."
+
+msgid ""
+"Nimble driver `bug #1918229 <https://bugs.launchpad.net/cinder/"
+"+bug/1918229>`_: Corrected an issue where the Nimble storage driver was "
+"inaccurately determining that there was no free space left in the storage "
+"array. The driver now relies on the storage array to report the amount of "
+"free space."
+msgstr ""
+"Nimble driver `bug #1918229 <https://bugs.launchpad.net/cinder/"
+"+bug/1918229>`_: Corrected an issue where the Nimble storage driver was "
+"inaccurately determining that there was no free space left in the storage "
+"array. The driver now relies on the storage array to report the amount of "
+"free space."
+
msgid "Nimble driver now supports discard."
msgstr "Nimble driver now supports discard."
@@ -6262,6 +6722,26 @@ msgstr ""
"extra-spec multiattach is added."
msgid ""
+"Nimble: Documented that existing driver supports the new Alletra 6k backend. "
+"Alletra 6k is newer version of existing Nimble backend."
+msgstr ""
+"Nimble: Documented that the existing driver supports the new Alletra 6k "
+"backend. Alletra 6k is a newer version of the existing Nimble backend."
+
+msgid ""
+"Not to put too fine a point on it, silent truncation is worse than failure, "
+"and the Cinder team will be addressing these issues in the next release. "
+"Additionally (as if that isn't bad enough!), we suspect that the above "
+"anomalies will also occur when using volume encryption with NFS-based "
+"storage backends, though this has not yet been reported or confirmed."
+msgstr ""
+"Not to put too fine a point on it, silent truncation is worse than failure, "
+"and the Cinder team will be addressing these issues in the next release. "
+"Additionally (as if that isn't bad enough!), we suspect that the above "
+"anomalies will also occur when using volume encryption with NFS-based "
+"storage backends, though this has not yet been reported or confirmed."
+
+msgid ""
"Note that a cluster scoped account must be used in the driver configuration "
"in order to use QoS in clustered ONTAP."
msgstr ""
@@ -6628,6 +7108,55 @@ msgstr ""
"Victoria is 9.2, so all the latest 92 REST endpoints will be used."
msgid ""
+"PowerMax driver `bug #1938572 <https://bugs.launchpad.net/cinder/"
+"+bug/1938572>`_ : Legacy PowerMax OS fix to convert an int to a string if "
+"the generation of snapVX is returned as an int from REST so that a 0 does "
+"not equate to False in python."
+msgstr ""
+"PowerMax driver `bug #1938572 <https://bugs.launchpad.net/cinder/"
+"+bug/1938572>`_ : Legacy PowerMax OS fix to convert an int to a string if "
+"the generation of snapVX is returned as an int from REST so that a 0 does "
+"not equate to False in python."
+
+msgid ""
+"PowerMax driver `bug #1939139 <https://bugs.launchpad.net/cinder/"
+"+bug/1939139>`_: Fix on create snapshot operation that exists when using "
+"PowerMax OS 5978.711 and later."
+msgstr ""
+"PowerMax driver `bug #1939139 <https://bugs.launchpad.net/cinder/"
+"+bug/1939139>`_: Fix on create snapshot operation that exists when using "
+"PowerMax OS 5978.711 and later."
+
+msgid ""
+"PowerMax driver `bug #1979668 <https://bugs.launchpad.net/cinder/"
+"+bug/1979668>`_: Fixed visibility of manageable volumes in multiple storage "
+"groups."
+msgstr ""
+"PowerMax driver `bug #1979668 <https://bugs.launchpad.net/cinder/"
+"+bug/1979668>`_: Fixed visibility of manageable volumes in multiple storage "
+"groups."
+
+msgid ""
+"PowerMax driver: Checking that the contents of the initiator group match the "
+"contents of the connector regardless of the initiator_check option being "
+"enabled. This will ensure an exception is raised if there is a mismatch, in "
+"all scenarios."
+msgstr ""
+"PowerMax driver: Checking that the contents of the initiator group match the "
+"contents of the connector regardless of the initiator_check option being "
+"enabled. This will ensure an exception is raised if there is a mismatch, in "
+"all scenarios."
+
+msgid ""
+"PowerMax driver: Enhancement to check the status of the ports in the port "
+"group so that any potential issue, like the ports being down, is highlighted "
+"early and clearly."
+msgstr ""
+"PowerMax driver: Enhancement to check the status of the ports in the port "
+"group so that any potential issue, like the ports being down, is highlighted "
+"early and clearly."
+
+msgid ""
"PowerMax for Cinder driver now supports Port Group and Port load balancing "
"when attaching Nova Compute instances to volumes on the backend PowerMax."
msgstr ""
@@ -6731,6 +7260,51 @@ msgid "ProphetStor drivers: FC and iSCSI"
msgstr "ProphetStor drivers: FC and iSCSI"
msgid ""
+"Pure Storage FlashArray driver `bug #1929219 <https://bugs.launchpad.net/"
+"cinder/+bug/1929219>`_: Fixes issue with incorrect internal mechanism for "
+"checking REST API of backend array. This has no external effect for users."
+msgstr ""
+"Pure Storage FlashArray driver `bug #1929219 <https://bugs.launchpad.net/"
+"cinder/+bug/1929219>`_: Fixes issue with an incorrect internal mechanism for "
+"checking REST API of backend array. This has no external effect for users."
+
+msgid ""
+"Pure Storage FlashArray driver `bug #1936663 <https://bugs.launchpad.net/"
+"cinder/+bug/1936663>`_: Fixes issue where cloning a consistency group "
+"containing volumes with very long names causes a crash - Required for "
+"PowerVC support"
+msgstr ""
+"Pure Storage FlashArray driver `bug #1936663 <https://bugs.launchpad.net/"
+"cinder/+bug/1936663>`_: Fixes the issue where cloning a consistency group "
+"containing volumes with very long names causes a crash - Required for "
+"PowerVC support"
+
+msgid ""
+"Pure Storage FlashArray driver `bug #1938579 <https://bugs.launchpad.net/"
+"cinder/+bug/1938579>`_: Fixes issue when cloning multiple volumes in PowerVC "
+"deployments."
+msgstr ""
+"Pure Storage FlashArray driver `bug #1938579 <https://bugs.launchpad.net/"
+"cinder/+bug/1938579>`_: Fixes issue when cloning multiple volumes in PowerVC "
+"deployments."
+
+msgid ""
+"Pure Storage FlashArray driver `bug 1910143 <https://bugs.launchpad.net/"
+"cinder/+bug/1910143>`_: Parameter ``pure_iscsi_cidr`` is now IPv4/v6 "
+"agnostic."
+msgstr ""
+"Pure Storage FlashArray driver `bug 1910143 <https://bugs.launchpad.net/"
+"cinder/+bug/1910143>`_: Parameter ``pure_iscsi_cidr`` is now IPv4/v6 "
+"agnostic."
+
+msgid ""
+"Pure Storage FlashArray driver fix to ensure cinder_tempest_plugin "
+"consistency group tests pass."
+msgstr ""
+"Pure Storage FlashArray driver fix to ensure cinder_tempest_plugin "
+"consistency group tests pass."
+
+msgid ""
"Pure Storage FlashArray driver has added configuration option "
"``pure_host_personality`` for setting the host personality upon host "
"creation (existing hosts are not affected)."
@@ -6770,6 +7344,13 @@ msgstr ""
"backends in clustered environments."
msgid ""
+"Pure Storage FlashArray minimum ``purestorage`` SDK version increased to "
+"1.17.0"
+msgstr ""
+"Pure Storage FlashArray minimum ``purestorage`` SDK version increased to "
+"1.17.0"
+
+msgid ""
"Pure Storage Volume Drivers can now utilize driver_ssl_cert_verify and "
"driver_ssl_cert_path config options to allow for secure https requests to "
"the FlashArray."
@@ -6779,6 +7360,65 @@ msgstr ""
"the FlashArray."
msgid ""
+"Pure Storage `bug #1930748 <https://bugs.launchpad.net/cinder/"
+"+bug/1930748>`_: Fixed issues with multiattched volumes being diconnected "
+"from a backend when still listed as an attachment to an instance."
+msgstr ""
+"Pure Storage `bug #1930748 <https://bugs.launchpad.net/cinder/"
+"+bug/1930748>`_: Fixed issues with multiattched volumes being disconnected "
+"from a backend when still listed as an attachment to an instance."
+
+msgid ""
+"Pure Storage adds a new driver to support NVMe-RoCE for the FlashArray. All "
+"features of the iSCSI and FC drivers are fully supported by this new driver."
+msgstr ""
+"Pure Storage adds a new driver to support NVMe-RoCE for the FlashArray. All "
+"features of the iSCSI and FC drivers are fully supported by this new driver."
+
+msgid ""
+"Pure Storage driver `Bug #1945824 <https://bugs.launchpad.net/cinder/"
+"+bug/1945824>`_: Fixed missing DB values when creating new consistency group "
+"from CG snapshot."
+msgstr ""
+"Pure Storage driver `Bug #1945824 <https://bugs.launchpad.net/cinder/"
+"+bug/1945824>`_: Fixed missing DB values when creating new consistency group "
+"from CG snapshot."
+
+msgid ""
+"Pure Storage driver: Add missing support for ``host_personality`` setting "
+"for FC-based hosts"
+msgstr ""
+"Pure Storage driver: Add missing support for ``host_personality`` setting "
+"for FC-based hosts"
+
+msgid ""
+"Pure Storage: FlashArray minimum Purity//FA version is increased to 5.3.0. "
+"All FlashArray backends must be at at least this minimum version or the "
+"driver will not initialize."
+msgstr ""
+"Pure Storage: FlashArray minimum Purity//FA version is increased to 5.3.0. "
+"All FlashArray backends must be at at least this minimum version or the "
+"driver will not initialise."
+
+msgid ""
+"Pure Storage: Minimum supported FlashArray Purity//FA is changed to 5.3.0. "
+"All FlashArray backends must be at at least this minimum version or the "
+"driver will not initialize."
+msgstr ""
+"Pure Storage: Minimum supported FlashArray Purity//FA is changed to 5.3.0. "
+"All FlashArray backends must be at at least this minimum version or the "
+"driver will not initialise."
+
+msgid ""
+"Pure Storage: Remove all API version checks in driver as the new minimum "
+"FlashArray Purity//FA version supports all previously version-gated features "
+"and functionality support."
+msgstr ""
+"Pure Storage: Remove all API version checks in the driver as the new minimum "
+"FlashArray Purity//FA version supports all previously version-gated features "
+"and functionality support."
+
+msgid ""
"Pure volume drivers will need 'purestorage' python module v1.6.0 or newer. "
"Support for 1.4.x has been removed."
msgstr ""
@@ -6801,6 +7441,13 @@ msgstr ""
"py2.7 is OpenStack Train. The minimum version of Python now supported by "
"Cinder is Python 3.6."
+msgid ""
+"Python 3.6 & 3.7 support has been dropped. The minimum version of Python now "
+"supported is Python 3.8."
+msgstr ""
+"Python 3.6 & 3.7 support has been dropped. The minimum version of Python now "
+"supported is Python 3.8."
+
msgid "QNAP"
msgstr "QNAP"
@@ -6836,6 +7483,13 @@ msgstr ""
"Now the flatten operation is executed in a different thread."
msgid ""
+"RBD driver `Bug #1922408 <https://bugs.launchpad.net/cinder/+bug/1922408>`_: "
+"Fixed create encrypted volume from encrypted snapshot."
+msgstr ""
+"RBD driver `Bug #1922408 <https://bugs.launchpad.net/cinder/+bug/1922408>`_: "
+"Fixed create encrypted volume from encrypted snapshot."
+
+msgid ""
"RBD driver `bug #1901241 <https://bugs.launchpad.net/cinder/+bug/1901241>`_: "
"Fixed an issue where decreasing the ``rbd_max_clone_depth`` configuration "
"option would prevent volumes that had already exceeded that depth from being "
@@ -6847,6 +7501,63 @@ msgstr ""
"cloned."
msgid ""
+"RBD driver `bug #1907964 <https://bugs.launchpad.net/cinder/+bug/1907964>`_: "
+"Add support for fast-diff on backup images stored in Ceph. Provided fast-"
+"diff is supported by the backend it will automatically be enabled and used. "
+"With fast-diff enabled, the generation of diffs between images and snapshots "
+"as well as determining the actual data usage of a snapshot is speed up "
+"significantly."
+msgstr ""
+"RBD driver `bug #1907964 <https://bugs.launchpad.net/cinder/+bug/1907964>`_: "
+"Add support for fast-diff on backup images stored in Ceph. Provided fast-"
+"diff is supported by the backend it will automatically be enabled and used. "
+"With fast-diff enabled, the generation of diffs between images and snapshots "
+"as well as determining the actual data usage of a snapshot is speed up "
+"significantly."
+
+msgid ""
+"RBD driver `bug #1916843 <https://bugs.launchpad.net/cinder/+bug/1916843>`_: "
+"Fixed rpc timeout when backing up RBD snapshot. We no longer flatten "
+"temporary volumes and snapshots."
+msgstr ""
+"RBD driver `bug #1916843 <https://bugs.launchpad.net/cinder/+bug/1916843>`_: "
+"Fixed RPC timeout when backing up RBD snapshot. We no longer flatten "
+"temporary volumes and snapshots."
+
+msgid ""
+"RBD driver `bug #1941815 <https://bugs.launchpad.net/cinder/+bug/1941815>`_: "
+"Fixed deleting volumes with snapshots/volumes in the ceph trash space."
+msgstr ""
+"RBD driver `bug #1941815 <https://bugs.launchpad.net/cinder/+bug/1941815>`_: "
+"Fixed deleting volumes with snapshots/volumes in the ceph trash space."
+
+msgid ""
+"RBD driver `bug #1942210 <https://bugs.launchpad.net/cinder/+bug/1942210>`_: "
+"When creating a volume from a snapshot, the operation could fail due to an "
+"uncaught exception being raised during a check to see if the backend Ceph "
+"installation supported the clone v2 API. The driver now handles this "
+"situation gracefully."
+msgstr ""
+"RBD driver `bug #1942210 <https://bugs.launchpad.net/cinder/+bug/1942210>`_: "
+"When creating a volume from a snapshot, the operation could fail due to an "
+"uncaught exception being raised during a check to see if the backend Ceph "
+"installation supported the clone v2 API. The driver now handles this "
+"situation gracefully."
+
+msgid ""
+"RBD driver `bug #1947518 <https://bugs.launchpad.net/cinder/+bug/1947518>`_: "
+"Corrected a regression caused by the fix for `Bug #1931004 <https://bugs."
+"launchpad.net/cinder/+bug/1931004>`_ that was attempting to access the "
+"glance images RBD pool with write privileges when creating a volume from an "
+"image."
+msgstr ""
+"RBD driver `bug #1947518 <https://bugs.launchpad.net/cinder/+bug/1947518>`_: "
+"Corrected a regression caused by the fix for `Bug #1931004 <https://bugs."
+"launchpad.net/cinder/+bug/1931004>`_ that was attempting to access the "
+"glance images RBD pool with write privileges when creating a volume from an "
+"image."
+
+msgid ""
"RBD driver can have bottlenecks if too many slow operations are happening at "
"the same time (for example many huge volume deletions), we can now use the "
"`backend_native_threads_pool_size` option in the RBD driver section to "
@@ -6976,6 +7687,17 @@ msgstr "Re-added QNAP Cinder volume driver."
msgid "Reduxio"
msgstr "Reduxio"
+msgid ""
+"Reimage a volume: ``POST /v3/volumes/{volume_id}/action`` with the ``os-"
+"reimage`` action. This call will result in a 202 (Accepted) response, but "
+"if the image's ``disk_format`` would require conversion to be written to the "
+"volume, the volume will go to ``error`` status."
+msgstr ""
+"Reimage a volume: ``POST /v3/volumes/{volume_id}/action`` with the ``os-"
+"reimage`` action. This call will result in a 202 (Accepted) response, but "
+"if the image's ``disk_format`` would require conversion to be written to the "
+"volume, the volume will go to ``error`` status."
+
msgid "Remove mirror policy parameter from huawei driver."
msgstr "Remove mirror policy parameter from Huawei driver."
@@ -7183,6 +7905,9 @@ msgstr ""
"volume extend operations to ensure the new size is rounded up to the nearest "
"size when needed."
+msgid "Seagate driver: Added support for ``get_driver_options`` api call"
+msgstr "Seagate driver: Added support for ``get_driver_options`` API call"
+
msgid "Security Issues"
msgstr "Security Issues"
@@ -7250,6 +7975,19 @@ msgid "Show CG checks both tables."
msgstr "Show CG checks both tables."
msgid ""
+"Similarly, when creating an encrypted volume from a snapshot of an encrypted "
+"volume, if the amount of data in the original volume at the time the "
+"snapshot was created is very close to the gibibyte boundary given by the "
+"volume's size, it is possible for the data in the new volume to be silently "
+"truncated."
+msgstr ""
+"Similarly, when creating an encrypted volume from a snapshot of an encrypted "
+"volume, if the amount of data in the original volume at the time the "
+"snapshot was created is very close to the gibibyte boundary given by the "
+"volume's size, it is possible for the data in the new volume to be silently "
+"truncated."
+
+msgid ""
"SmartCompression feature is disabled for the NexentaStor5 NFS driver. Thick "
"provisioned volumes created as files containing zeros are not being "
"compressed with standard compression if SmartCompression feature is enabled. "
@@ -7260,10 +7998,28 @@ msgstr ""
"compressed with standard compression if SmartCompression feature is enabled. "
"This functionality will be fixed in a later release."
+msgid ""
+"So far, the behavior isn't anomalous; it's basically what you'd expect once "
+"you are aware that the encryption metadata must be stored in the volume and "
+"that it consumes some space."
+msgstr ""
+"So far, the behaviour isn't anomalous; it's basically what you'd expect once "
+"you are aware that the encryption metadata must be stored in the volume and "
+"that it consumes some space."
+
msgid "SolidFire driver now supports IPv6 for management IP."
msgstr "SolidFire driver now supports IPv6 for management IP."
msgid ""
+"SolidFire driver: Driver no longer stores attach timestamp and instance as "
+"metadata on the storage array. Any metadata remaining in the array must be "
+"considered outdated and incorrect."
+msgstr ""
+"SolidFire driver: Driver no longer stores attach timestamp and instance as "
+"metadata on the storage array. Any metadata remaining in the array must be "
+"considered outdated and incorrect."
+
+msgid ""
"SolidFire supports Synchronous, Asynchronous and SnapshotsOnly replication "
"modes. This adds the config option `solidfire:replication_mode` to specify "
"the mode to be used by Cinder. Its value can be `Sync`, `Async` or "
@@ -7282,6 +8038,35 @@ msgstr ""
"with increased IOPS on the extended volume."
msgid ""
+"Some current policies that were over-general (that is, they governed both "
+"read and write operations on a resource) are being replaced by a set of new "
+"policies that provide greater granularity. The following policies are "
+"DEPRECATED and will be removed in the Yoga release:"
+msgstr ""
+"Some current policies that were over-general (that is, they governed both "
+"read and write operations on a resource) are being replaced by a set of new "
+"policies that provide greater granularity. The following policies are "
+"DEPRECATED and will be removed in the Yoga release:"
+
+msgid ""
+"Some current rules defined in the policy file are being DEPRECATED and will "
+"be removed in the Yoga release. You only need to worry about this if you "
+"have used any of these rules yourself in when writing custom policies, as "
+"you cannot rely on the following rules being pre-defined in the Yoga release."
+msgstr ""
+"Some current rules defined in the policy file are being DEPRECATED and will "
+"be removed in the Yoga release. You only need to worry about this if you "
+"have used any of these rules yourself when writing custom policies, as you "
+"cannot rely on the following rules being pre-defined in the Yoga release."
+
+msgid ""
+"Some new backend storage drivers have been added, and many current drivers "
+"have added features and fixed bugs."
+msgstr ""
+"Some new backend storage drivers have been added, and many current drivers "
+"have added features and fixed bugs."
+
+msgid ""
"Some of DISCO driver options were incorrectly read from ``[DEFAULT]`` "
"section in the cinder.conf. Now those are correctly read from "
"``[<backend_id>]`` section. This includes following options:"
@@ -7325,6 +8110,37 @@ msgstr ""
"create a volume directly from a backup. For instance, you can use the "
"command: ``cinder create <size> --backup-id <backup_id>`` in cinderclient."
+msgid ""
+"Starting with API microversion 3.64, an ``encryption_key_id`` attribute is "
+"included in the response body of volume and backup details when the "
+"associated volume is encrypted."
+msgstr ""
+"Starting with API microversion 3.64, an ``encryption_key_id`` attribute is "
+"included in the response body of volume and backup details when the "
+"associated volume is encrypted."
+
+msgid ""
+"Starting with API microversion 3.65, a ``consumes_quota`` field is included "
+"in the response body of volumes and snapshots to indicate whether the volume "
+"is using quota or not."
+msgstr ""
+"Starting with API microversion 3.65, a ``consumes_quota`` field is included "
+"in the response body of volumes and snapshots to indicate whether the volume "
+"is using quota or not."
+
+msgid ""
+"Starting with API microversion 3.70, encrypted volumes can be transferred to "
+"a user in a different project. Prior to microversion 3.70, the transfer is "
+"blocked due to the inability to transfer ownership of the volume's "
+"encryption key. With microverson 3.70, ownership of the encryption key is "
+"transferred when the volume is transferred."
+msgstr ""
+"Starting with API microversion 3.70, encrypted volumes can be transferred to "
+"a user in a different project. Prior to microversion 3.70, the transfer is "
+"blocked due to the inability to transfer ownership of the volume's "
+"encryption key. With microverson 3.70, ownership of the encryption key is "
+"transferred when the volume is transferred."
+
msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
@@ -7376,6 +8192,9 @@ msgstr "Support for Consistency Groups in the NetApp E-Series Volume Driver."
msgid "Support for Dot Hill AssuredSAN arrays has been removed."
msgstr "Support for Dot Hill AssuredSAN arrays has been removed."
+msgid "Support for MySQL 5.5 has been dropped."
+msgstr "Support for MySQL 5.5 has been dropped."
+
msgid ""
"Support for NetApp E-Series has been removed. The NetApp Unified driver can "
"now only be used with NetApp Clustered Data ONTAP."
@@ -7489,6 +8308,15 @@ msgstr ""
"<snapshot-id>``."
msgid ""
+"Support for the ``cinder.database.migration_backend`` entrypoint, which "
+"provided for configurable database migration backends, has been removed. "
+"This was never exercised and was a source of unnecessary complexity."
+msgstr ""
+"Support for the ``cinder.database.migration_backend`` entrypoint, which is "
+"provided for configurable database migration backends, has been removed. "
+"This was never exercised and was a source of unnecessary complexity."
+
+msgid ""
"Support for use of 'fc_southbound_protocol' configuration setting in the "
"Brocade FC SAN lookup service."
msgstr ""
@@ -7526,6 +8354,23 @@ msgid "Supported ``project_id`` admin filters to limits API."
msgstr "Supported ``project_id`` admin filters to limits API."
msgid ""
+"Swift backup driver: Added new configuration option "
+"``backup_swift_create_storage_policy`` for the Swift backup driver. If "
+"specified it will be used as the storage policy when creating the Swift "
+"Container, default value is None meaning it will not be used and Swift will "
+"use the system default. Please note that this only applies if a container "
+"doesn't exist as we cannot update the storage policy on an already existing "
+"container."
+msgstr ""
+"Swift backup driver: Added new configuration option "
+"``backup_swift_create_storage_policy`` for the Swift backup driver. If "
+"specified it will be used as the storage policy when creating the Swift "
+"Container, default value is None meaning it will not be used and Swift will "
+"use the system default. Please note that this only applies if a container "
+"doesn't exist as we cannot update the storage policy on an already existing "
+"container."
+
+msgid ""
"TSM backup driver is removed. Please, migrate your backups before upgrade."
msgstr ""
"TSM backup driver is removed. Please, migrate your backups before upgrade."
@@ -7576,6 +8421,32 @@ msgstr ""
"Clustered Data ONTAP and E-series, are unaffected."
msgid ""
+"The Block Storage API v2, which was deprecated in the Pike release, has been "
+"removed. If upgrading from a previous OpenStack release, it is recommended "
+"that you edit your ``/etc/cinder/api-paste.ini`` file to remove all "
+"references to v2. Additionally, the deprecated configuration option "
+"``enable_v2_api`` has been removed. If present in a configuration file, it "
+"will be silently ignored."
+msgstr ""
+"The Block Storage API v2, which was deprecated in the Pike release, has been "
+"removed. If upgrading from a previous OpenStack release, it is recommended "
+"that you edit your ``/etc/cinder/api-paste.ini`` file to remove all "
+"references to v2. Additionally, the deprecated configuration option "
+"``enable_v2_api`` has been removed. If present in a configuration file, it "
+"will be silently ignored."
+
+msgid ""
+"The Block Storage API v2, which was deprecated way back in the Pike release, "
+"has been removed. We gently remind you that Pike was a long time ago, and "
+"that version 3.0 of the Block Storage API was designed to be completely "
+"compatible with version 2."
+msgstr ""
+"The Block Storage API v2, which was deprecated way back in the Pike release, "
+"has been removed. We gently remind you that Pike was a long time ago and "
+"that version 3.0 of the Block Storage API was designed to be completely "
+"compatible with version 2."
+
+msgid ""
"The Blockbridge driver has been marked as unsupported and is now deprecated. "
"``enable_unsupported_drivers`` will need to be set to ``True`` in cinder."
"conf to continue to use it."
@@ -9312,6 +10183,17 @@ msgstr ""
"does not change, they will be removed in the Queens development cycle."
msgid ""
+"The Zadara VPSA Driver has been updated to support json format and "
+"reorganized with new code layout. The module path ``cinder.volume.drivers."
+"zadara.ZadaraVPSAISCSIDriver`` should now be updated to ``cinder.volume."
+"drivers.zadara.zadara.ZadaraVPSAISCSIDriver`` in ``cinder.conf``."
+msgstr ""
+"The Zadara VPSA Driver has been updated to support JSON format and "
+"reorganised with a new code layout. The module path ``cinder.volume.drivers."
+"zadara.ZadaraVPSAISCSIDriver`` should now be updated to ``cinder.volume."
+"drivers.zadara.zadara.ZadaraVPSAISCSIDriver`` in ``cinder.conf``."
+
+msgid ""
"The `Ceph RADOS Block Device (RBD) <https://docs.openstack.org/cinder/latest/"
"configuration/block-storage/drivers/ceph-rbd-volume-driver.html>`__ driver "
"documentation has been updated to reflect this policy and explains it in "
@@ -9336,6 +10218,17 @@ msgstr ""
"be updated to use ``cinder.volume.drivers.windows.iscsi.WindowsISCSIDriver``."
msgid ""
+"The ``[DEFAULT] db_driver`` config option has been removed. This was "
+"intended to allow configuration of the database driver, however, there is "
+"only one database driver present in-tree and out-of-tree database drivers "
+"are not supported."
+msgstr ""
+"The ``[DEFAULT] db_driver`` config option has been removed. This was "
+"intended to allow configuration of the database driver, however, there is "
+"only one database driver present in-tree and out-of-tree database drivers "
+"are not supported."
+
+msgid ""
"The ``__DEFAULT__`` volume type may safely be renamed (or renamed and "
"deleted) after you have run the online migrations as long as the "
"``default_volume_type`` configuration option is set to a valid existing "
@@ -9383,6 +10276,15 @@ msgstr ""
"work until it is replaced with this new mechanism."
msgid ""
+"The ``cinder.quota.NestedDbQuotaDriver`` quota driver was marked as "
+"deprecated in Train release and is eligible for removal since Ussuri "
+"release. This release removes the NestedQuotaDriver support."
+msgstr ""
+"The ``cinder.quota.NestedDbQuotaDriver`` quota driver was marked as "
+"deprecated in the Train release and is eligible for removal since the Ussuri "
+"release. This release removes the NestedQuotaDriver support."
+
+msgid ""
"The ``default_volume_type`` configuration option is now required to have a "
"value. The default value is ``__DEFAULT__``, so you should see no change in "
"behavior whether or not you have set a value for ``default_volume_type``. "
@@ -9448,6 +10350,17 @@ msgstr ""
"2013 July (Havana). Removed this filter and please use \"binary\" instead."
msgid ""
+"The ``storage_protocol`` treats all variants of the protocol name as the "
+"same regarding matches, so for example using FC, fc, or fibre_channel will "
+"be treated equally in the scheduler, be it when filtering using the volume "
+"type's extra specs or when using filter and goodness functions."
+msgstr ""
+"The ``storage_protocol`` treats all variants of the protocol name as the "
+"same regarding matches, so for example using FC, fc, or fibre_channel will "
+"be treated equally in the scheduler, be it when filtering using the volume "
+"type's extra specs or when using filter and goodness functions."
+
+msgid ""
"The ``volume_extension:volume_type_encryption`` policy, which was deprecated "
"in Stein, has been un-deprecated for the convenience of operators who would "
"like to set the policies for the create, get, update, and delete operations "
@@ -9513,6 +10426,52 @@ msgstr ""
"algorithm.)"
msgid ""
+"The cinder options associated with throttling are "
+"``volume_copy_blkio_cgroup_name`` and ``volume_copy_bps_limit``. They are "
+"described in the `sample cinder configuration file <https://docs.openstack."
+"org/cinder/wallaby/configuration/block-storage/samples/cinder.conf.html>`_ "
+"for the Wallaby release."
+msgstr ""
+"The Cinder options associated with throttling are "
+"``volume_copy_blkio_cgroup_name`` and ``volume_copy_bps_limit``. They are "
+"described in the `sample cinder configuration file <https://docs.openstack."
+"org/cinder/wallaby/configuration/block-storage/samples/cinder.conf.html>`_ "
+"for the Wallaby release."
+
+msgid ""
+"The cinder team is working on a throttling solution using cgroup v2, but it "
+"was not ready at the time of this release. The solution is expected to be "
+"backported to a future release in the Xena series. This issue is being "
+"tracked as `Bug #1942203 <https://bugs.launchpad.net/cinder/+bug/1942203>`_."
+msgstr ""
+"The Cinder team is working on a throttling solution using cgroup v2, but it "
+"was not ready at the time of this release. The solution is expected to be "
+"backported to a future release in the Xena series. This issue is being "
+"tracked as `Bug #1942203 <https://bugs.launchpad.net/cinder/+bug/1942203>`_."
+
+msgid ""
+"The cinder team is working on a throttling solution using cgroup v2, but it "
+"was not ready at the time of this release. The solution is expected to be "
+"backported to a future release in the Yoga series. This issue continues to "
+"be tracked as `Bug #1942203 <https://bugs.launchpad.net/cinder/"
+"+bug/1942203>`_."
+msgstr ""
+"The Cinder team is working on a throttling solution using cgroup v2, but it "
+"was not ready at the time of this release. The solution is expected to be "
+"backported to a future release in the Yoga series. This issue continues to "
+"be tracked as `Bug #1942203 <https://bugs.launchpad.net/cinder/"
+"+bug/1942203>`_."
+
+msgid ""
+"The cinder-manage command now includes a new ``quota`` category with two "
+"possible actions ``check`` and ``sync`` to help administrators manage out of "
+"sync quotas on long running deployments."
+msgstr ""
+"The cinder-manage command now includes a new ``quota`` category with two "
+"possible actions ``check`` and ``sync`` to help administrators manage out-of-"
+"sync quotas on long-running deployments."
+
+msgid ""
"The cinder-manage online_data_migrations command now prints a tabular "
"summary of completed and remaining records. The goal here is to get all your "
"numbers to zero. The previous execution return code behavior is retained for "
@@ -9524,6 +10483,48 @@ msgstr ""
"for scripting."
msgid ""
+"The cinder-volume service currently depends on `Linux Kernel Control Groups "
+"(cgroups) version 1 <https://www.kernel.org/doc/html/latest/admin-guide/"
+"cgroup-v1/cgroups.html>`_ to control i/o throttling during some volume-copy "
+"and image-convert operations. At the time of this release, some Linux "
+"distributions may have changed to using `cgroups v2 <https://www.kernel.org/"
+"doc/html/latest/admin-guide/cgroup-v2.html>`_ by default. Thus, you may "
+"need to take explicit steps to ensure that **cgroups v1** is enabled on any "
+"OpenStack nodes running the cinder-volume service. This may entail setting "
+"specific Linux kernel parameters for these nodes. Consult your Linux "
+"distribution's documentation for details."
+msgstr ""
+"The cinder-volume service currently depends on `Linux Kernel Control Groups "
+"(cgroups) version 1 <https://www.kernel.org/doc/html/latest/admin-guide/"
+"cgroup-v1/cgroups.html>`_ to control i/o throttling during some volume-copy "
+"and image-convert operations. At the time of this release, some Linux "
+"distributions may have changed to using `cgroups v2 <https://www.kernel.org/"
+"doc/html/latest/admin-guide/cgroup-v2.html>`_ by default. Thus, you may "
+"need to take explicit steps to ensure that **cgroups v1** is enabled on any "
+"OpenStack nodes running the cinder-volume service. This may entail setting "
+"specific Linux kernel parameters for these nodes. Consult your Linux "
+"distribution's documentation for details."
+
+msgid ""
+"The cinder-volume service currently depends on `Linux Kernel Control Groups "
+"(cgroups) version 1 <https://www.kernel.org/doc/html/latest/admin-guide/"
+"cgroup-v1/cgroups.html>`_ to control i/o throttling during some volume-copy "
+"and image-convert operations. Some Linux distributions, however, have "
+"changed to using `cgroup v2 <https://www.kernel.org/doc/html/latest/admin-"
+"guide/cgroup-v2.html>`_ by default and may have discontinued cgroups v1 "
+"support completely. Consult your Linux distribution's documentation for "
+"details."
+msgstr ""
+"The cinder-volume service currently depends on `Linux Kernel Control Groups "
+"(cgroups) version 1 <https://www.kernel.org/doc/html/latest/admin-guide/"
+"cgroup-v1/cgroups.html>`_ to control i/o throttling during some volume-copy "
+"and image-convert operations. Some Linux distributions, however, have "
+"changed to using `cgroup v2 <https://www.kernel.org/doc/html/latest/admin-"
+"guide/cgroup-v2.html>`_ by default and may have discontinued cgroups v1 "
+"support completely. Consult your Linux distribution's documentation for "
+"details."
+
+msgid ""
"The cinder-volume service depends on `Linux Kernel Control Groups (cgroups) "
"version 1 <https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v1/"
"cgroups.html>`_ to control i/o throttling during some volume-copy and image-"
@@ -9758,6 +10759,19 @@ msgid "The deprecated HP CLIQ proxy driver has now been removed."
msgstr "The deprecated HP CLIQ proxy driver has now been removed."
msgid ""
+"The details of this project are described in `Policy Personas and "
+"Permissions <https://docs.openstack.org/cinder/xena/configuration/block-"
+"storage/policy-personas.html>`_ in the `Cinder Service Configuration "
+"Guide`. We encourage you to read through that document. The following is "
+"only a summary."
+msgstr ""
+"The details of this project are described in `Policy Personas and "
+"Permissions <https://docs.openstack.org/cinder/xena/configuration/block-"
+"storage/policy-personas.html>`_ in the `Cinder Service Configuration "
+"Guide`. We encourage you to read through that document. The following is "
+"only a summary."
+
+msgid ""
"The driver for Datera's Storage Systems has been marked as unsupported and "
"is now deprecated. ``enable_unsupported_driver`` will need to be set to "
"``True`` in the driver's section in cinder.conf to continue to use it."
@@ -9892,6 +10906,17 @@ msgstr ""
"The following volume drivers were deprecated in the Pike release and have "
"now been removed:"
+msgid ""
+"The format version of a qcow2 can be determined by looking for the "
+"``compat`` field in the output of the ``qemu-img info`` command. A version 2 "
+"format image will report ``compat=0.10``, whereas a qcow2 in version 3 "
+"format will report ``compat=1.1``."
+msgstr ""
+"The format version of a qcow2 can be determined by looking for the "
+"``compat`` field in the output of the ``qemu-img info`` command. A version 2 "
+"format image will report ``compat=0.10``, whereas a qcow2 in version 3 "
+"format will report ``compat=1.1``."
+
msgid "The fss_pool option is deprecated. Use fss_pools instead."
msgstr "The fss_pool option is deprecated. Use fss_pools instead."
@@ -10040,6 +11065,19 @@ msgstr ""
"using this feature."
msgid ""
+"The optional driver feature \"Snapshot Attachment\" has been removed from "
+"the `Cinder Driver Support Matrix <https://docs.openstack.org/cinder/latest/"
+"reference/support-matrix.html>`_. It is an enhancment used for backups, it "
+"is not exposed via the Block Storage API, and its presence in the Support "
+"Matrix was misleading."
+msgstr ""
+"The optional driver feature \"Snapshot Attachment\" has been removed from "
+"the `Cinder Driver Support Matrix <https://docs.openstack.org/cinder/latest/"
+"reference/support-matrix.html>`_. It is an enhancement used for backups, it "
+"is not exposed via the Block Storage API, and its presence in the Support "
+"Matrix was misleading."
+
+msgid ""
"The os_privileged_xxx and nova_xxx in the [default] section are deprecated "
"in favor of the settings in the [nova] section."
msgstr ""
@@ -10072,6 +11110,13 @@ msgid "The policy is named ``group:reset_group_snapshot_status``."
msgstr "The policy is named ``group:reset_group_snapshot_status``."
msgid ""
+"The primary change in the Xena release is that cinder's default policy "
+"configuration will recognize the ``reader`` role on a project. Additionally,"
+msgstr ""
+"The primary change in the Xena release is that Cinder's default policy "
+"configuration will recognise the ``reader`` role on a project. Additionally,"
+
+msgid ""
"The qemu-img tool now has resource limits applied which prevent it from "
"using more than 1GB of address space or more than 2 seconds of CPU time. "
"This provides protection against denial of service attacks from maliciously "
@@ -10753,6 +11798,25 @@ msgstr ""
"GPFS driver, see the GPFS config reference. - http://docs.openstack.org/"
"liberty/config-reference/content/GPFS-driver.html"
+msgid ""
+"Using the v2 clone format for cloned volumes allows volumes with dependent "
+"images to be moved to the trash - where they remain until purged - and allow "
+"the RBD driver to postpone the deletion until the volume has no dependent "
+"images. Configuring the trash purge is recommended to avoid wasting space "
+"with these trashed volumes. Since the Ceph Octopus release, the trash can be "
+"configured to automatically purge on a defined schedule. See the ``rbd trash "
+"purge schedule`` commands in the `rbd manpage <https://docs.ceph.com/en/"
+"octopus/man/8/rbd/>`_."
+msgstr ""
+"Using the v2 clone format for cloned volumes allows volumes with dependent "
+"images to be moved to the Rubbish Bin - where they remain until purged - and "
+"allow the RBD driver to postpone the deletion until the volume has no "
+"dependent images. Configuring the Rubbish Bin purge is recommended to avoid "
+"wasting space with these volumes. Since the Ceph Octopus release, the "
+"Rubbish Bin can be configured to automatically purge on a defined schedule. "
+"See the ``rbd trash purge schedule`` commands in the `rbd manpage <https://"
+"docs.ceph.com/en/octopus/man/8/rbd/>`_."
+
msgid "Ussuri Series Release Notes"
msgstr "Ussuri Series Release Notes"
@@ -11300,6 +12364,19 @@ msgstr ""
"#1895035 <https://bugs.launchpad.net/cinder/+bug/1895035>`_."
msgid ""
+"When the Ceph backup driver is used for the backup service, restoring a "
+"backup to a volume created on a non-RBD backend fails. The cinder team is "
+"working on a solution which is expected to be backported to a future release "
+"in the Xena series. The issue is being tracked as `Bug #1895035 <https://"
+"bugs.launchpad.net/cinder/+bug/1895035>`_."
+msgstr ""
+"When the Ceph backup driver is used for the backup service, restoring a "
+"backup to a volume created on a non-RBD backend fails. The Cinder team is "
+"working on a solution which is expected to be backported to a future release "
+"in the Xena series. The issue is being tracked as `Bug #1895035 <https://"
+"bugs.launchpad.net/cinder/+bug/1895035>`_."
+
+msgid ""
"When uploading qcow2 images to Glance, image data will be compressed. This "
"will generally result in less data transferred to Glance at the expense of "
"higher CPU usage. This behavior is controlled by the "
@@ -11432,6 +12509,9 @@ msgstr "X-IO"
msgid "Xena Series Release Notes"
msgstr "Xena Series Release Notes"
+msgid "Yadro Tatlin Unified: Added initial version of the iSCSI driver."
+msgstr "Yadro Tatlin Unified: Added initial version of the iSCSI driver."
+
msgid "Yoga Series Release Notes"
msgstr "Yoga Series Release Notes"
@@ -12671,6 +13751,19 @@ msgstr ""
"snapshot, preventing accidental manual deletion of those resources."
msgid ""
+"`Bug #1978729 <https://bugs.launchpad.net/cinder/+bug/1978729>`_: Fixed "
+"context.message_action is None on errors by backup drivers. The message_* "
+"properties of the context were not passed during rpc, which caused a double "
+"exception when a backup driver raised an exception, masking the actual "
+"backup driver exception."
+msgstr ""
+"`Bug #1978729 <https://bugs.launchpad.net/cinder/+bug/1978729>`_: Fixed "
+"context.message_action is None on errors by backup drivers. The message_* "
+"properties of the context were not passed during RPC, which caused a double "
+"exception when a backup driver raised an exception, masking the actual "
+"backup driver exception."
+
+msgid ""
"`Bug #1979666 <https://bugs.launchpad.net/cinder/+bug/1979666>`_: PowerMax "
"driver : Fixed rare case where the SRP in the local and remote arrays are "
"different when managing volumes into OpenStack. For backward compatibility "
@@ -13116,6 +14209,17 @@ msgstr ""
msgid "only the libvirt compute driver supports this currently."
msgstr "only the libvirt compute driver supports this currently."
+msgid ""
+"os-brick file lock location can be specified independently of the Cinder "
+"service lock location using ``lock_path`` in the ``[os_brick]`` "
+"configuration section. Useful for HCI deployments and when running Cinder "
+"and Glance with Cinder backend on the same host."
+msgstr ""
+"os-brick file lock location can be specified independently of the Cinder "
+"service lock location using ``lock_path`` in the ``[os_brick]`` "
+"configuration section. Useful for HCI deployments and when running Cinder "
+"and Glance with the Cinder backend on the same host."
+
msgid "retype_volume"
msgstr "retype_volume"
diff --git a/requirements.txt b/requirements.txt
index bb2ff913d..10b066d5c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -56,7 +56,7 @@ tenacity>=6.3.1 # Apache-2.0
WebOb>=1.8.6 # MIT
oslo.i18n>=5.1.0 # Apache-2.0
oslo.vmware>=3.10.0 # Apache-2.0
-os-brick>=5.2.0 # Apache-2.0
+os-brick>=6.0.0 # Apache-2.0
os-win>=5.5.0 # Apache-2.0
tooz>=2.7.1 # Apache-2.0
google-api-python-client>=1.11.0 # Apache-2.0
diff --git a/test-requirements.txt b/test-requirements.txt
index fc599da7c..030196a60 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -18,5 +18,5 @@ SQLAlchemy-Utils>=0.37.8 # BSD License
testtools>=2.4.0 # MIT
doc8>=0.8.1 # Apache-2.0
-mypy>=0.942 # MIT
+mypy>=0.960 # MIT
moto>=2.2.5 # Apache-2.0
diff --git a/tools/config/cinder-config-generator.conf b/tools/config/cinder-config-generator.conf
index 0ebd6b1c5..44267420a 100644
--- a/tools/config/cinder-config-generator.conf
+++ b/tools/config/cinder-config-generator.conf
@@ -19,3 +19,4 @@ namespace = oslo.service.service
namespace = oslo.service.sslutils
namespace = oslo.service.wsgi
namespace = oslo.versionedobjects
+namespace = os_brick
diff --git a/tox.ini b/tox.ini
index 966717218..4000908ba 100644
--- a/tox.ini
+++ b/tox.ini
@@ -20,6 +20,17 @@ setenv =
PYTHONDONTWRITEBYTECODE=1
# TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0
SQLALCHEMY_WARN_20=1
+# NOTE: Do not move the constraints from the install_command into deps, as that
+# may result in tox using unconstrained/untested dependencies.
+# We use "usedevelop = True" for tox jobs (except bindep), so tox does 2
+# install calls, one for the deps and another for the cinder source code
+# as editable (pip -e).
+# Without the constraints in the install_command only the first
+# installation will honor the upper constraints, and the second install
+# for cinder itself will not know about the constraints which can result
+# in installing versions we don't want.
+# With constraints in the install_command tox will always honor our
+# constraints.
install_command =
python -m pip install -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
deps =
@@ -31,7 +42,7 @@ deps =
# the concurrency=<n> option.
# call ie: 'tox -epy27 -- --concurrency=4'
commands =
- stestr run {posargs}
+ stestr run --random {posargs}
stestr slowest
allowlist_externals =