summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.pylintrc2
-rw-r--r--.zuul.yaml2
-rw-r--r--api-ref/source/v2/parameters.yaml9
-rw-r--r--api-ref/source/v2/volumes-v2-volumes.inc1
-rw-r--r--api-ref/source/v3/parameters.yaml9
-rw-r--r--api-ref/source/v3/volumes-v3-volumes.inc1
-rw-r--r--cinder/api/schemas/volumes.py6
-rw-r--r--cinder/api/v2/volumes.py9
-rw-r--r--cinder/api/v3/volumes.py16
-rw-r--r--cinder/backup/api.py4
-rw-r--r--cinder/backup/chunkeddriver.py38
-rw-r--r--cinder/backup/driver.py2
-rw-r--r--cinder/backup/drivers/ceph.py39
-rw-r--r--cinder/backup/manager.py24
-rw-r--r--cinder/backup/rpcapi.py14
-rw-r--r--cinder/db/migrations/versions/daa98075b90d_add_resource_indexes.py60
-rw-r--r--cinder/db/sqlalchemy/models.py26
-rw-r--r--cinder/opts.py18
-rw-r--r--cinder/privsep/targets/nvmet.py18
-rw-r--r--cinder/scheduler/filter_scheduler.py13
-rw-r--r--cinder/tests/unit/api/contrib/test_backups.py2
-rw-r--r--cinder/tests/unit/api/v2/test_snapshots.py2
-rw-r--r--cinder/tests/unit/api/v2/test_volumes.py36
-rw-r--r--cinder/tests/unit/api/v3/test_volumes.py23
-rw-r--r--cinder/tests/unit/attachments/test_attachments_manager.py18
-rw-r--r--cinder/tests/unit/backup/drivers/test_backup_ceph.py31
-rw-r--r--cinder/tests/unit/backup/drivers/test_backup_google.py7
-rw-r--r--cinder/tests/unit/backup/drivers/test_backup_nfs.py13
-rw-r--r--cinder/tests/unit/backup/drivers/test_backup_posix.py150
-rw-r--r--cinder/tests/unit/backup/drivers/test_backup_s3.py11
-rw-r--r--cinder/tests/unit/backup/drivers/test_backup_swift.py9
-rw-r--r--cinder/tests/unit/backup/fake_service.py2
-rw-r--r--cinder/tests/unit/backup/test_backup.py41
-rw-r--r--cinder/tests/unit/backup/test_backup_messages.py10
-rw-r--r--cinder/tests/unit/backup/test_chunkeddriver.py2
-rw-r--r--cinder/tests/unit/backup/test_rpcapi.py16
-rw-r--r--cinder/tests/unit/db/test_migrations.py14
-rw-r--r--cinder/tests/unit/privsep/targets/fake_nvmet_lib.py3
-rw-r--r--cinder/tests/unit/privsep/targets/test_nvmet.py45
-rw-r--r--cinder/tests/unit/targets/test_base_iscsi_driver.py14
-rw-r--r--cinder/tests/unit/targets/test_nvmeof_driver.py140
-rw-r--r--cinder/tests/unit/targets/test_nvmet_driver.py645
-rw-r--r--cinder/tests/unit/targets/test_spdknvmf.py1
-rw-r--r--cinder/tests/unit/test_utils.py8
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py9
-rw-r--r--cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py30
-rw-r--r--cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py1561
-rw-r--r--cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py359
-rw-r--r--cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py174
-rw-r--r--cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py5
-rw-r--r--cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py13
-rw-r--r--cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py7
-rw-r--r--cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py75
-rw-r--r--cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py11
-rw-r--r--cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py5
-rw-r--r--cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py8
-rw-r--r--cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py8
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py42
-rw-r--r--cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py2
-rw-r--r--cinder/tests/unit/volume/drivers/test_lvm_driver.py87
-rw-r--r--cinder/tests/unit/volume/drivers/test_rbd.py45
-rw-r--r--cinder/tests/unit/volume/drivers/test_spdk.py5
-rw-r--r--cinder/tests/unit/volume/test_volume.py23
-rw-r--r--cinder/tests/unit/volume/test_volume_usage_audit.py21
-rw-r--r--cinder/tests/unit/windows/test_iscsi.py2
-rw-r--r--cinder/volume/api.py2
-rw-r--r--cinder/volume/driver.py25
-rw-r--r--cinder/volume/drivers/dell_emc/powerflex/driver.py6
-rw-r--r--cinder/volume/drivers/dell_emc/powermax/rest.py25
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_common.py406
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_fc.py40
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_iscsi.py40
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_replication.py989
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest.py453
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest_api.py313
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest_fc.py83
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py58
-rw-r--r--cinder/volume/drivers/hitachi/hbsd_utils.py255
-rw-r--r--cinder/volume/drivers/hpe/hpe_3par_common.py2
-rw-r--r--cinder/volume/drivers/hpe/xp/hpe_xp_rest.py9
-rw-r--r--cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py55
-rw-r--r--cinder/volume/drivers/lvm.py41
-rw-r--r--cinder/volume/drivers/nec/v/nec_v_rest.py7
-rw-r--r--cinder/volume/drivers/netapp/dataontap/nfs_base.py71
-rw-r--r--cinder/volume/drivers/netapp/dataontap/nfs_cmode.py1
-rw-r--r--cinder/volume/drivers/netapp/options.py5
-rw-r--r--cinder/volume/drivers/pure.py6
-rw-r--r--cinder/volume/drivers/rbd.py7
-rw-r--r--cinder/volume/drivers/remotefs.py4
-rw-r--r--cinder/volume/drivers/synology/synology_common.py4
-rw-r--r--cinder/volume/drivers/synology/synology_iscsi.py2
-rw-r--r--cinder/volume/drivers/windows/iscsi.py2
-rw-r--r--cinder/volume/drivers/yadro/tatlin_client.py2
-rw-r--r--cinder/volume/flows/api/create_volume.py12
-rw-r--r--cinder/volume/flows/manager/create_volume.py6
-rw-r--r--cinder/volume/targets/driver.py25
-rw-r--r--cinder/volume/targets/iscsi.py11
-rw-r--r--cinder/volume/targets/nvmeof.py131
-rw-r--r--cinder/volume/targets/nvmet.py345
-rw-r--r--cinder/volume/targets/spdknvmf.py7
-rw-r--r--cinder/volume/volume_utils.py5
-rw-r--r--doc/source/admin/troubleshoot.rst1
-rw-r--r--doc/source/admin/ts-db-cpu-spikes.rst37
-rw-r--r--doc/source/cli/cinder-manage.rst23
-rw-r--r--doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst4
-rw-r--r--doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst8
-rw-r--r--doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst8
-rw-r--r--doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst22
-rw-r--r--doc/source/configuration/block-storage/drivers/linstor-driver.rst10
-rw-r--r--doc/source/configuration/block-storage/drivers/nec-storage-v-series-driver.rst8
-rw-r--r--doc/source/configuration/block-storage/service-token.rst57
-rw-r--r--doc/source/contributor/releasecycle.rst4
-rw-r--r--doc/source/drivers-all-about.rst27
-rw-r--r--doc/source/install/cinder-storage-install-ubuntu.rst14
-rw-r--r--driver-requirements.txt3
-rw-r--r--releasenotes/notes/backup-sparse-f396b35bfe17332e.yaml7
-rw-r--r--releasenotes/notes/bug-1981420-dell-powermax-fix-for-force-flag-9320910dfbf998d2.yaml8
-rw-r--r--releasenotes/notes/bug-2008017-netapp-fix-native-threads-04d8f58f4c29b03d.yaml6
-rw-r--r--releasenotes/notes/bug-2008931-hpe-keyerror-on-migration-71d31e6c0a8ab0d9.yaml7
-rw-r--r--releasenotes/notes/db-resource-indexes-8010c9a881277503.yaml24
-rw-r--r--releasenotes/notes/dell-powerflex-bugfix-1998136-self-signed-certificates-62e3cb444ab7ff2b.yaml10
-rw-r--r--releasenotes/notes/handle-external-events-in-extend-6ae53b822baf0004.yaml2
-rw-r--r--releasenotes/notes/hitachi-vsp-add-gad-volume-514edf8ebeb2e983.yaml11
-rw-r--r--releasenotes/notes/hitachi-vsp-add-hostgroup-name-format-option-4c8e4a5ddd69b9bd.yaml59
-rw-r--r--releasenotes/notes/hitachi-vsp-add-multi-pool-4c4589b93399e641.yaml13
-rw-r--r--releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml6
-rw-r--r--releasenotes/notes/hitachi-vsp-support-dedup-comp-4e27d95b34681f66.yaml7
-rw-r--r--releasenotes/notes/ibm-svf-delete-volume-flag-support-for-deletevolumegroup-4224db1ca798a3bf.yaml6
-rw-r--r--releasenotes/notes/lvm-nvmet-new-conn_props-25320e34d6ca6ac7.yaml6
-rw-r--r--releasenotes/notes/nvmeof-premature-terminate-conn-63e3cc1fd1832874.yaml7
-rw-r--r--releasenotes/notes/nvmet-multipath-d35f55286f263e72.yaml16
-rw-r--r--releasenotes/notes/nvmet-shared-targets-20ed7279ef29f002.yaml6
-rw-r--r--releasenotes/notes/rbd-total_capacity-60f10b45e3a8c8ea.yaml8
-rw-r--r--releasenotes/notes/remove-multiattach-request-param-4444e02533f919da.yaml20
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--setup.cfg7
-rw-r--r--tox.ini2
138 files changed, 6808 insertions, 1164 deletions
diff --git a/.pylintrc b/.pylintrc
index 502fea6d4..db9ade98b 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -224,7 +224,7 @@ additional-builtins=_
[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems,alembic.context,alembic.op,
- alembic.config,pyxcli,storpool,oslo_privsep.capabilities
+ alembic.config,pyxcli,storpool,oslo_privsep.capabilities,nvmet
signature-mutators=unittest.mock.patch,unittest.mock.patch.object,sqlalchemy.util._preloaded.dependencies
# This is for cinder.objects.*, and requests.packages.*, but due to
diff --git a/.zuul.yaml b/.zuul.yaml
index 20dcd98fd..52bdf40a7 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -167,6 +167,8 @@
configure_swap_size: 4096
zuul_additional_subunit_dirs:
- "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}"
+ devstack_localrc:
+ CEPH_MIN_CLIENT_VERSION: "mimic"
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
diff --git a/api-ref/source/v2/parameters.yaml b/api-ref/source/v2/parameters.yaml
index 1b7fde4db..be357736f 100644
--- a/api-ref/source/v2/parameters.yaml
+++ b/api-ref/source/v2/parameters.yaml
@@ -1081,15 +1081,6 @@ mountpoint:
in: body
required: true
type: string
-multiattach_req:
- description: |
- To enable this volume to attach to more than one
- server, set this value to ``true``. Default is ``false``.
- Note that support for multiattach volumes depends on the volume
- type being used.
- in: body
- required: false
- type: boolean
multiattach_resp:
description: |
If true, this volume can attach to more than one
diff --git a/api-ref/source/v2/volumes-v2-volumes.inc b/api-ref/source/v2/volumes-v2-volumes.inc
index acd507943..07be3422e 100644
--- a/api-ref/source/v2/volumes-v2-volumes.inc
+++ b/api-ref/source/v2/volumes-v2-volumes.inc
@@ -183,7 +183,6 @@ Request
- size: size
- description: description_9
- imageRef: imageRef
- - multiattach: multiattach_req
- availability_zone: availability_zone
- source_volid: source_volid
- name: volume_name_optional
diff --git a/api-ref/source/v3/parameters.yaml b/api-ref/source/v3/parameters.yaml
index b3bffdf39..45271d7a9 100644
--- a/api-ref/source/v3/parameters.yaml
+++ b/api-ref/source/v3/parameters.yaml
@@ -2126,15 +2126,6 @@ multiattach:
in: body
required: false
type: string
-multiattach_req:
- description: |
- To enable this volume to attach to more than one
- server, set this value to ``true``. Default is ``false``.
- Note that support for multiattach volumes depends on the volume
- type being used. See :ref:`valid boolean values <valid-boolean-values>`
- in: body
- required: false
- type: boolean
multiattach_resp:
description: |
If true, this volume can attach to more than one
diff --git a/api-ref/source/v3/volumes-v3-volumes.inc b/api-ref/source/v3/volumes-v3-volumes.inc
index 41a99d145..99d116840 100644
--- a/api-ref/source/v3/volumes-v3-volumes.inc
+++ b/api-ref/source/v3/volumes-v3-volumes.inc
@@ -218,7 +218,6 @@ Request
- availability_zone: availability_zone
- source_volid: source_volid
- description: description_vol
- - multiattach: multiattach_req
- snapshot_id: snapshot_id
- backup_id: backup_id
- name: volume_name_optional
diff --git a/cinder/api/schemas/volumes.py b/cinder/api/schemas/volumes.py
index f3f689770..08678cd56 100644
--- a/cinder/api/schemas/volumes.py
+++ b/cinder/api/schemas/volumes.py
@@ -49,6 +49,12 @@ create = {
'consistencygroup_id': parameter_types.optional_uuid,
'size': parameter_types.volume_size_allows_null,
'availability_zone': parameter_types.availability_zone,
+ # The functionality to create a multiattach volume by the
+ # multiattach parameter is removed.
+ # We accept the parameter but raise a BadRequest stating the
+ # "new way" of creating multiattach volumes i.e. with a
+ # multiattach volume type so users using the "old way"
+ # have ease of moving into the new functionality.
'multiattach': parameter_types.optional_boolean,
'image_id': {'type': ['string', 'null'], 'minLength': 0,
'maxLength': 255},
diff --git a/cinder/api/v2/volumes.py b/cinder/api/v2/volumes.py
index ddff0471e..67d2d29b5 100644
--- a/cinder/api/v2/volumes.py
+++ b/cinder/api/v2/volumes.py
@@ -19,7 +19,6 @@ from http import HTTPStatus
from oslo_config import cfg
from oslo_log import log as logging
-from oslo_log import versionutils
from oslo_utils import uuidutils
import webob
from webob import exc
@@ -261,14 +260,6 @@ class VolumeController(wsgi.Controller):
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
- kwargs['multiattach'] = utils.get_bool_param('multiattach', volume)
-
- if kwargs.get('multiattach', False):
- msg = ("The option 'multiattach' "
- "is deprecated and will be removed in a future "
- "release. The default behavior going forward will "
- "be to specify multiattach enabled volume types.")
- versionutils.report_deprecated_feature(LOG, msg)
try:
new_volume = self.volume_api.create(
diff --git a/cinder/api/v3/volumes.py b/cinder/api/v3/volumes.py
index ed3fd5746..48982aa4e 100644
--- a/cinder/api/v3/volumes.py
+++ b/cinder/api/v3/volumes.py
@@ -15,7 +15,6 @@
from http import HTTPStatus
from oslo_log import log as logging
-from oslo_log import versionutils
from oslo_utils import timeutils
import webob
from webob import exc
@@ -387,15 +386,14 @@ class VolumeController(volumes_v2.VolumeController):
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
- multiattach = volume.get('multiattach', False)
- kwargs['multiattach'] = multiattach
-
+ multiattach = utils.get_bool_param('multiattach', volume)
if multiattach:
- msg = ("The option 'multiattach' "
- "is deprecated and will be removed in a future "
- "release. The default behavior going forward will "
- "be to specify multiattach enabled volume types.")
- versionutils.report_deprecated_feature(LOG, msg)
+ msg = _("multiattach parameter has been removed. The default "
+ "behavior is to use multiattach enabled volume types. "
+ "Contact your administrator to create a multiattach "
+ "enabled volume type and use it to create multiattach "
+ "volumes.")
+ raise exc.HTTPBadRequest(explanation=msg)
try:
new_volume = self.volume_api.create(
context, size, volume.get('display_name'),
diff --git a/cinder/backup/api.py b/cinder/backup/api.py
index fb3155cdf..c53ac42c5 100644
--- a/cinder/backup/api.py
+++ b/cinder/backup/api.py
@@ -404,6 +404,7 @@ class API(base.Base):
"backup %(backup_id)s.",
{'size': size, 'backup_id': backup_id})
volume = self.volume_api.create(context, size, name, description)
+ volume_is_new = True
volume_id = volume['id']
while True:
@@ -419,6 +420,7 @@ class API(base.Base):
raise exception.InvalidVolume(reason=msg)
else:
volume = self.volume_api.get(context, volume_id)
+ volume_is_new = False
if volume['status'] != "available":
msg = _('Volume to be restored to must be available')
@@ -447,7 +449,7 @@ class API(base.Base):
'restoring-backup'})
self.backup_rpcapi.restore_backup(context, backup.host, backup,
- volume_id)
+ volume_id, volume_is_new)
d = {'backup_id': backup_id,
'volume_id': volume_id,
diff --git a/cinder/backup/chunkeddriver.py b/cinder/backup/chunkeddriver.py
index 73e4f2eea..6d28fc399 100644
--- a/cinder/backup/chunkeddriver.py
+++ b/cinder/backup/chunkeddriver.py
@@ -66,6 +66,26 @@ CONF = cfg.CONF
CONF.register_opts(backup_opts)
+def _write_nonzero(volume_file, volume_offset, content):
+ """Write non-zero parts of `content` into `volume_file`."""
+ chunk_length = 1024 * 1024
+ for chunk_offset in range(0, len(content), chunk_length):
+ chunk_end = chunk_offset + chunk_length
+ chunk = content[chunk_offset:chunk_end]
+ # The len(chunk) may be smaller than chunk_length. It's okay.
+ if not volume_utils.is_all_zero(chunk):
+ volume_file.seek(volume_offset + chunk_offset)
+ volume_file.write(chunk)
+
+
+def _write_volume(volume_is_new, volume_file, volume_offset, content):
+ if volume_is_new:
+ _write_nonzero(volume_file, volume_offset, content)
+ else:
+ volume_file.seek(volume_offset)
+ volume_file.write(content)
+
+
# Object writer and reader returned by inheriting classes must not have any
# logging calls, as well as the compression libraries, as eventlet has a bug
# (https://github.com/eventlet/eventlet/issues/432) that would result in
@@ -666,7 +686,7 @@ class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta):
self._finalize_backup(backup, container, object_meta, object_sha256)
def _restore_v1(self, backup, volume_id, metadata, volume_file,
- requested_backup):
+ volume_is_new, requested_backup):
"""Restore a v1 volume backup.
Raises BackupRestoreCancel on any requested_backup status change, we
@@ -717,16 +737,17 @@ class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta):
body = reader.read()
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
- volume_file.seek(obj['offset'])
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm',
compression_algorithm)
decompressed = decompressor.decompress(body)
body = None # Allow Python to free it
- volume_file.write(decompressed)
+ _write_volume(volume_is_new,
+ volume_file, obj['offset'], decompressed)
decompressed = None # Allow Python to free it
else:
- volume_file.write(body)
+ _write_volume(volume_is_new,
+ volume_file, obj['offset'], body)
body = None # Allow Python to free it
# force flush every write to avoid long blocking write on close
@@ -748,7 +769,7 @@ class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta):
LOG.debug('v1 volume backup restore of %s finished.',
backup_id)
- def restore(self, backup, volume_id, volume_file):
+ def restore(self, backup, volume_id, volume_file, volume_is_new):
"""Restore the given volume backup from backup repository.
Raises BackupRestoreCancel on any backup status change.
@@ -757,13 +778,15 @@ class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta):
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('starting restore of backup %(object_prefix)s '
- 'container: %(container)s, to volume %(volume_id)s, '
+ 'container: %(container)s, '
+ 'to %(new)s volume %(volume_id)s, '
'backup: %(backup_id)s.',
{
'object_prefix': object_prefix,
'container': container,
'volume_id': volume_id,
'backup_id': backup_id,
+ 'new': 'new' if volume_is_new else 'existing',
})
metadata = self._read_metadata(backup)
metadata_version = metadata['version']
@@ -794,7 +817,8 @@ class ChunkedBackupDriver(driver.BackupDriver, metaclass=abc.ABCMeta):
backup1 = backup_list[index]
index = index - 1
metadata = self._read_metadata(backup1)
- restore_func(backup1, volume_id, metadata, volume_file, backup)
+ restore_func(backup1, volume_id, metadata, volume_file,
+ volume_is_new, backup)
volume_meta = metadata.get('volume_meta', None)
try:
diff --git a/cinder/backup/driver.py b/cinder/backup/driver.py
index f4dc24d69..32bbe0bf6 100644
--- a/cinder/backup/driver.py
+++ b/cinder/backup/driver.py
@@ -374,7 +374,7 @@ class BackupDriver(base.Base, metaclass=abc.ABCMeta):
return
@abc.abstractmethod
- def restore(self, backup, volume_id, volume_file):
+ def restore(self, backup, volume_id, volume_file, volume_is_new):
"""Restore a saved backup.
Some I/O operations may block greenthreads, so in order to prevent
diff --git a/cinder/backup/drivers/ceph.py b/cinder/backup/drivers/ceph.py
index d21e458bf..1cb5727c2 100644
--- a/cinder/backup/drivers/ceph.py
+++ b/cinder/backup/drivers/ceph.py
@@ -64,6 +64,7 @@ from cinder import interface
from cinder import objects
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver
+from cinder.volume import volume_utils
try:
import rados
@@ -350,7 +351,6 @@ class CephBackupDriver(driver.BackupDriver):
Incremental backups use a new base name so we support old and new style
format.
"""
- # Ensure no unicode
if not backup:
return "volume-%s.backup.base" % volume_id
@@ -414,7 +414,8 @@ class CephBackupDriver(driver.BackupDriver):
src_name: str,
dest: linuxrbd.RBDVolumeIOWrapper,
dest_name: str,
- length: int) -> None:
+ length: int,
+ discard_zeros: bool = False) -> None:
"""Transfer data between files (Python IO objects)."""
LOG.debug("Transferring data between '%(src)s' and '%(dest)s'",
{'src': src_name, 'dest': dest_name})
@@ -435,13 +436,17 @@ class CephBackupDriver(driver.BackupDriver):
return
- dest.write(data)
- dest.flush()
+ if (discard_zeros and volume_utils.is_all_zero(data)):
+ action = "Discarded"
+ else:
+ dest.write(data)
+ dest.flush()
+ action = "Transferred"
delta = (time.time() - before)
rate = (self.chunk_size / delta) / 1024
- LOG.debug("Transferred chunk %(chunk)s of %(chunks)s "
- "(%(rate)dK/s)",
- {'chunk': chunk + 1,
+ LOG.debug("%(action)s chunk %(chunk)s of %(chunks)s (%(rate)dK/s)",
+ {'action': action,
+ 'chunk': chunk + 1,
'chunks': chunks,
'rate': rate})
@@ -1076,6 +1081,7 @@ class CephBackupDriver(driver.BackupDriver):
dest_file,
dest_name: str,
length: int,
+ volume_is_new: bool,
src_snap=None) -> None:
"""Restore volume using full copy i.e. all extents.
@@ -1104,7 +1110,8 @@ class CephBackupDriver(driver.BackupDriver):
self._ceph_backup_conf)
rbd_fd = linuxrbd.RBDVolumeIOWrapper(rbd_meta)
self._transfer_data(eventlet.tpool.Proxy(rbd_fd), backup_name,
- dest_file, dest_name, length)
+ dest_file, dest_name, length,
+ discard_zeros=volume_is_new)
finally:
src_rbd.close()
@@ -1280,7 +1287,8 @@ class CephBackupDriver(driver.BackupDriver):
def _restore_volume(self,
backup: 'objects.Backup',
volume: 'objects.Volume',
- volume_file: linuxrbd.RBDVolumeIOWrapper) -> None:
+ volume_file: linuxrbd.RBDVolumeIOWrapper,
+ volume_is_new: bool) -> None:
"""Restore volume from backup using diff transfer if possible.
Attempts a differential restore and reverts to full copy if diff fails.
@@ -1314,7 +1322,7 @@ class CephBackupDriver(driver.BackupDriver):
# Otherwise full copy
LOG.debug("Running full restore.")
self._full_restore(backup, volume_file, volume.name,
- length, src_snap=restore_point)
+ length, volume_is_new, src_snap=restore_point)
def _restore_metadata(self,
backup: 'objects.Backup',
@@ -1341,18 +1349,21 @@ class CephBackupDriver(driver.BackupDriver):
def restore(self,
backup: 'objects.Backup',
volume_id: str,
- volume_file: linuxrbd.RBDVolumeIOWrapper) -> None:
+ volume_file: linuxrbd.RBDVolumeIOWrapper,
+ volume_is_new: bool) -> None:
"""Restore volume from backup in Ceph object store.
If volume metadata is available this will also be restored.
"""
target_volume = self.db.volume_get(self.context, volume_id)
LOG.debug('Starting restore from Ceph backup=%(src)s to '
- 'volume=%(dest)s',
- {'src': backup.id, 'dest': target_volume.name})
+ 'volume=%(dest)s new=%(new)s',
+ {'src': backup.id, 'dest': target_volume.name,
+ 'new': volume_is_new})
try:
- self._restore_volume(backup, target_volume, volume_file)
+ self._restore_volume(backup, target_volume, volume_file,
+ volume_is_new)
# Be tolerant of IO implementations that do not support fileno()
try:
diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py
index a978b8d7a..0ac16102e 100644
--- a/cinder/backup/manager.py
+++ b/cinder/backup/manager.py
@@ -607,8 +607,15 @@ class BackupManager(manager.SchedulerDependentManager):
return False
@utils.limit_operations
- def restore_backup(self, context, backup, volume_id):
- """Restore volume backups from configured backup service."""
+ def restore_backup(self, context, backup, volume_id, volume_is_new):
+ """Restore volume backups from configured backup service.
+
+ :param context: RequestContext for the restore operation
+ :param backup: Backup that we're restoring
+ :param volume_id: The ID of the volume into which we're restoring
+ :param volume_is_new: The volume does not have stale data, so
+ sparse backups can be restored as such.
+ """
context.message_resource_id = backup.id
context.message_resource_type = message_field.Resource.VOLUME_BACKUP
context.message_action = message_field.Action.BACKUP_RESTORE
@@ -683,7 +690,7 @@ class BackupManager(manager.SchedulerDependentManager):
canceled = False
try:
- self._run_restore(context, backup, volume)
+ self._run_restore(context, backup, volume, volume_is_new)
except exception.BackupRestoreCancel:
canceled = True
except Exception:
@@ -725,7 +732,7 @@ class BackupManager(manager.SchedulerDependentManager):
'volume_id': volume_id})
self._notify_about_backup_usage(context, backup, "restore.end")
- def _run_restore(self, context, backup, volume):
+ def _run_restore(self, context, backup, volume, volume_is_new):
message_created = False
orig_key_id = volume.encryption_key_id
backup_service = self.service(context)
@@ -754,16 +761,19 @@ class BackupManager(manager.SchedulerDependentManager):
if secure_enabled:
with open(device_path, open_mode) as device_file:
backup_service.restore(backup, volume.id,
- tpool.Proxy(device_file))
+ tpool.Proxy(device_file),
+ volume_is_new)
else:
with utils.temporary_chown(device_path):
with open(device_path, open_mode) as device_file:
backup_service.restore(backup, volume.id,
- tpool.Proxy(device_file))
+ tpool.Proxy(device_file),
+ volume_is_new)
# device_path is already file-like so no need to open it
else:
backup_service.restore(backup, volume.id,
- tpool.Proxy(device_path))
+ tpool.Proxy(device_path),
+ volume_is_new)
except exception.BackupRestoreCancel:
raise
except Exception:
diff --git a/cinder/backup/rpcapi.py b/cinder/backup/rpcapi.py
index 06a310458..cee45c487 100644
--- a/cinder/backup/rpcapi.py
+++ b/cinder/backup/rpcapi.py
@@ -48,9 +48,10 @@ class BackupAPI(rpc.RPCAPI):
2.1 - Adds set_log_levels and get_log_levels
2.2 - Adds publish_service_capabilities
2.3 - Adds continue_backup call
+ 2.4 - Add the volume_is_new flag to the restore_backup method
"""
- RPC_API_VERSION = '2.3'
+ RPC_API_VERSION = '2.4'
RPC_DEFAULT_VERSION = '2.0'
TOPIC = constants.BACKUP_TOPIC
BINARY = 'cinder-backup'
@@ -66,11 +67,16 @@ class BackupAPI(rpc.RPCAPI):
cctxt.cast(ctxt, 'continue_backup', backup=backup,
backup_device=backup_device)
- def restore_backup(self, ctxt, backup_host, backup, volume_id):
+ def restore_backup(self, ctxt, backup_host, backup, volume_id,
+ volume_is_new):
LOG.debug("restore_backup in rpcapi backup_id %s", backup.id)
cctxt = self._get_cctxt(server=backup_host)
- cctxt.cast(ctxt, 'restore_backup', backup=backup,
- volume_id=volume_id)
+ if self.client.can_send_version('2.4'):
+ cctxt.cast(ctxt, 'restore_backup', backup=backup,
+ volume_id=volume_id, volume_is_new=volume_is_new)
+ else:
+ cctxt.cast(ctxt, 'restore_backup', backup=backup,
+ volume_id=volume_id)
def delete_backup(self, ctxt, backup):
LOG.debug("delete_backup rpcapi backup_id %s", backup.id)
diff --git a/cinder/db/migrations/versions/daa98075b90d_add_resource_indexes.py b/cinder/db/migrations/versions/daa98075b90d_add_resource_indexes.py
new file mode 100644
index 000000000..b1135c6a7
--- /dev/null
+++ b/cinder/db/migrations/versions/daa98075b90d_add_resource_indexes.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add resource indexes
+
+Revision ID: daa98075b90d
+Revises: 9c74c1c6971f
+Create Date: 2021-11-26 10:26:41.883072
+"""
+
+from alembic import op
+from oslo_db.sqlalchemy import enginefacade
+from oslo_db.sqlalchemy import utils
+from oslo_log import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+# revision identifiers, used by Alembic.
+revision = 'daa98075b90d'
+down_revision = 'c92a3e68beed'
+branch_labels = None
+depends_on = None
+
+INDEXES = (
+ ('groups', 'groups_deleted_project_id_idx', ('deleted', 'project_id')),
+
+ ('group_snapshots', 'group_snapshots_deleted_project_id_idx',
+ ('deleted', 'project_id')),
+
+ ('volumes', 'volumes_deleted_project_id_idx', ('deleted', 'project_id')),
+ ('volumes', 'volumes_deleted_host_idx', ('deleted', 'host')),
+
+ ('backups', 'backups_deleted_project_id_idx', ('deleted', 'project_id')),
+
+ ('snapshots', 'snapshots_deleted_project_id_idx', ('deleted',
+ 'project_id')),
+)
+
+
+def upgrade():
+ engine = enginefacade.reader.get_engine()
+ is_mysql = engine.dialect.name == 'mysql'
+
+ for table, idx_name, fields in INDEXES:
+ # Skip creation in mysql if it already has the index
+ if is_mysql and utils.index_exists(engine, table, idx_name):
+ LOG.info('Skipping index %s, already exists', idx_name)
+ else:
+ op.create_index(idx_name, table, fields)
diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py
index b349ede24..5df343609 100644
--- a/cinder/db/sqlalchemy/models.py
+++ b/cinder/db/sqlalchemy/models.py
@@ -218,6 +218,11 @@ class Group(BASE, CinderBase):
"""Represents a generic volume group."""
__tablename__ = 'groups'
+ __table_args__ = (
+ # Speed up normal listings
+ sa.Index('groups_deleted_project_id_idx', 'deleted', 'project_id'),
+ CinderBase.__table_args__,
+ )
id = sa.Column(sa.String(36), primary_key=True)
@@ -272,6 +277,12 @@ class GroupSnapshot(BASE, CinderBase):
"""Represents a group snapshot."""
__tablename__ = 'group_snapshots'
+ __table_args__ = (
+ # Speed up normal listings
+ sa.Index('group_snapshots_deleted_project_id_idx',
+ 'deleted', 'project_id'),
+ CinderBase.__table_args__,
+ )
id = sa.Column(sa.String(36), primary_key=True)
@@ -304,6 +315,11 @@ class Volume(BASE, CinderBase):
__tablename__ = 'volumes'
__table_args__ = (
sa.Index('volumes_service_uuid_idx', 'deleted', 'service_uuid'),
+ # Speed up normal listings
+ sa.Index('volumes_deleted_project_id_idx', 'deleted', 'project_id'),
+ # Speed up service start, create volume from image when using direct
+ # urls, host REST API, and the cinder-manage update host cmd
+ sa.Index('volumes_deleted_host_idx', 'deleted', 'host'),
CinderBase.__table_args__,
)
@@ -894,6 +910,11 @@ class Snapshot(BASE, CinderBase):
"""Represents a snapshot of volume."""
__tablename__ = 'snapshots'
+ __table_args__ = (
+ # Speed up normal listings
+ sa.Index('snapshots_deleted_project_id_idx', 'deleted', 'project_id'),
+ CinderBase.__table_args__,
+ )
id = sa.Column(sa.String(36), primary_key=True)
# TODO: (Y release) Change nullable to False
@@ -996,6 +1017,11 @@ class Backup(BASE, CinderBase):
"""Represents a backup of a volume to Swift."""
__tablename__ = 'backups'
+ __table_args__ = (
+ # Speed up normal listings
+ sa.Index('backups_deleted_project_id_idx', 'deleted', 'project_id'),
+ CinderBase.__table_args__,
+ )
id = sa.Column(sa.String(36), primary_key=True)
# Backups don't have use_quota field since we don't have temporary backups
diff --git a/cinder/opts.py b/cinder/opts.py
index 9ef804512..9549c9e2a 100644
--- a/cinder/opts.py
+++ b/cinder/opts.py
@@ -106,6 +106,8 @@ from cinder.volume.drivers.fusionstorage import dsware as \
cinder_volume_drivers_fusionstorage_dsware
from cinder.volume.drivers.hitachi import hbsd_common as \
cinder_volume_drivers_hitachi_hbsdcommon
+from cinder.volume.drivers.hitachi import hbsd_replication as \
+ cinder_volume_drivers_hitachi_hbsdreplication
from cinder.volume.drivers.hitachi import hbsd_rest as \
cinder_volume_drivers_hitachi_hbsdrest
from cinder.volume.drivers.hitachi import hbsd_rest_fc as \
@@ -283,6 +285,7 @@ def list_opts():
[cinder_volume_api.az_cache_time_opt],
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
+ cinder_volume_driver.nvmeof_opts,
cinder_volume_driver.nvmet_opts,
cinder_volume_driver.scst_opts,
cinder_volume_driver.backup_opts,
@@ -290,6 +293,17 @@ def list_opts():
cinder_volume_drivers_datera_dateraiscsi.d_opts,
cinder_volume_drivers_fungible_driver.fungible_opts,
cinder_volume_drivers_fusionstorage_dsware.volume_opts,
+ cinder_volume_drivers_hitachi_hbsdreplication._REP_OPTS,
+ cinder_volume_drivers_hitachi_hbsdreplication.
+ COMMON_MIRROR_OPTS,
+ cinder_volume_drivers_hitachi_hbsdreplication.
+ ISCSI_MIRROR_OPTS,
+ cinder_volume_drivers_hitachi_hbsdreplication.
+ REST_MIRROR_OPTS,
+ cinder_volume_drivers_hitachi_hbsdreplication.
+ REST_MIRROR_API_OPTS,
+ cinder_volume_drivers_hitachi_hbsdreplication.
+ REST_MIRROR_SSL_OPTS,
cinder_volume_drivers_infortrend_raidcmd_cli_commoncli.
infortrend_opts,
cinder_volume_drivers_inspur_as13000_as13000driver.
@@ -329,6 +343,7 @@ def list_opts():
itertools.chain(
cinder_volume_driver.volume_opts,
cinder_volume_driver.iser_opts,
+ cinder_volume_driver.nvmeof_opts,
cinder_volume_driver.nvmet_opts,
cinder_volume_driver.scst_opts,
cinder_volume_driver.image_opts,
@@ -354,7 +369,10 @@ def list_opts():
FJ_ETERNUS_DX_OPT_opts,
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_VOLUME_OPTS,
cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PORT_OPTS,
+ cinder_volume_drivers_hitachi_hbsdcommon.COMMON_PAIR_OPTS,
+ cinder_volume_drivers_hitachi_hbsdcommon.COMMON_NAME_OPTS,
cinder_volume_drivers_hitachi_hbsdrest.REST_VOLUME_OPTS,
+ cinder_volume_drivers_hitachi_hbsdrest.REST_PAIR_OPTS,
cinder_volume_drivers_hitachi_hbsdrestfc.FC_VOLUME_OPTS,
cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts,
cinder_volume_drivers_hpe_nimble.nimble_opts,
diff --git a/cinder/privsep/targets/nvmet.py b/cinder/privsep/targets/nvmet.py
index b6a516bf3..186a6a42a 100644
--- a/cinder/privsep/targets/nvmet.py
+++ b/cinder/privsep/targets/nvmet.py
@@ -174,7 +174,6 @@ def privsep_setup(cls_name, *args, **kwargs):
###################
# Classes that don't currently have privsep support
-Namespace = nvmet.Namespace
Host = nvmet.Host
Referral = nvmet.Referral
ANAGroup = nvmet.ANAGroup
@@ -188,6 +187,18 @@ ANAGroup = nvmet.ANAGroup
NotFound = nvmet.nvme.CFSNotFound
+class Namespace(nvmet.Namespace):
+ def __init__(self, subsystem, nsid=None, mode='lookup'):
+ super().__init__(subsystem=subsystem, nsid=nsid, mode=mode)
+
+ @classmethod
+ def setup(cls, subsys, n, err_func=None):
+ privsep_setup(cls.__name__, serialize(subsys), n, err_func)
+
+ def delete(self):
+ do_privsep_call(serialize(self), 'delete')
+
+
class Subsystem(nvmet.Subsystem):
def __init__(self, nqn=None, mode='lookup'):
super().__init__(nqn=nqn, mode=mode)
@@ -199,6 +210,11 @@ class Subsystem(nvmet.Subsystem):
def delete(self):
do_privsep_call(serialize(self), 'delete')
+ @property
+ def namespaces(self):
+ for d in os.listdir(self.path + '/namespaces/'):
+ yield Namespace(self, os.path.basename(d))
+
class Port(nvmet.Port):
def __init__(self, portid, mode='lookup'):
diff --git a/cinder/scheduler/filter_scheduler.py b/cinder/scheduler/filter_scheduler.py
index 32be969f8..db53196e8 100644
--- a/cinder/scheduler/filter_scheduler.py
+++ b/cinder/scheduler/filter_scheduler.py
@@ -335,19 +335,6 @@ class FilterScheduler(driver.Scheduler):
self.populate_filter_properties(request_spec,
filter_properties)
- # If multiattach is enabled on a volume, we need to add
- # multiattach to extra specs, so that the capability
- # filtering is enabled.
- multiattach = request_spec['volume_properties'].get('multiattach',
- False)
- if multiattach and 'multiattach' not in resource_type.get(
- 'extra_specs', {}):
- if 'extra_specs' not in resource_type:
- resource_type['extra_specs'] = {}
-
- resource_type['extra_specs'].update(
- multiattach='<is> True')
-
# Revert volume consumed capacity if it's a rescheduled request
retry = filter_properties.get('retry', {})
if retry.get('backends', []):
diff --git a/cinder/tests/unit/api/contrib/test_backups.py b/cinder/tests/unit/api/contrib/test_backups.py
index 7189cba0c..743742729 100644
--- a/cinder/tests/unit/api/contrib/test_backups.py
+++ b/cinder/tests/unit/api/contrib/test_backups.py
@@ -2006,7 +2006,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(volume.id, res_dict['restore']['volume_id'])
self.assertEqual(volume_name, res_dict['restore']['volume_name'])
mock_restore_backup.assert_called_once_with(mock.ANY, 'testhost',
- mock.ANY, volume.id)
+ mock.ANY, volume.id, False)
# Manually check if restore_backup was called with appropriate backup.
self.assertEqual(backup.id, mock_restore_backup.call_args[0][2].id)
diff --git a/cinder/tests/unit/api/v2/test_snapshots.py b/cinder/tests/unit/api/v2/test_snapshots.py
index ce58abb40..d85ae2d33 100644
--- a/cinder/tests/unit/api/v2/test_snapshots.py
+++ b/cinder/tests/unit/api/v2/test_snapshots.py
@@ -528,7 +528,7 @@ class SnapshotApiTest(test.TestCase):
@mock.patch.object(db, 'snapshot_get_all_by_project')
@mock.patch('cinder.db.snapshot_metadata_get', return_value=dict())
- def test_list_snpashots_with_wrong_limit_and_offset(self,
+ def test_list_snapshots_with_wrong_limit_and_offset(self,
mock_metadata_get,
mock_snapshot_get_all):
"""Test list with negative and non numeric limit and offset."""
diff --git a/cinder/tests/unit/api/v2/test_volumes.py b/cinder/tests/unit/api/v2/test_volumes.py
index 39e3e0768..7139a15c4 100644
--- a/cinder/tests/unit/api/v2/test_volumes.py
+++ b/cinder/tests/unit/api/v2/test_volumes.py
@@ -158,8 +158,7 @@ class VolumeApiTest(test.TestCase):
consistencygroup_id=None,
volume_type=None,
image_ref=None,
- image_id=None,
- multiattach=False):
+ image_id=None):
vol = {"size": size,
"name": name,
"description": description,
@@ -168,7 +167,6 @@ class VolumeApiTest(test.TestCase):
"source_volid": source_volid,
"consistencygroup_id": consistencygroup_id,
"volume_type": volume_type,
- "multiattach": multiattach,
}
if image_id is not None:
@@ -240,7 +238,6 @@ class VolumeApiTest(test.TestCase):
'consistencygroup': None,
'availability_zone': availability_zone,
'scheduler_hints': None,
- 'multiattach': False,
}
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full',
@@ -570,37 +567,6 @@ class VolumeApiTest(test.TestCase):
req,
body=body)
- def test_volume_create_with_invalid_multiattach(self):
- vol = self._vol_in_request_body(multiattach="InvalidBool")
- body = {"volume": vol}
- req = fakes.HTTPRequest.blank('/v3/volumes')
-
- self.assertRaises(exception.ValidationError,
- self.controller.create,
- req,
- body=body)
-
- @mock.patch.object(volume_api.API, 'create', autospec=True)
- @mock.patch.object(volume_api.API, 'get', autospec=True)
- @mock.patch.object(db.sqlalchemy.api, '_volume_type_get_full',
- autospec=True)
- def test_volume_create_with_valid_multiattach(self,
- volume_type_get,
- get, create):
- create.side_effect = v2_fakes.fake_volume_api_create
- get.side_effect = v2_fakes.fake_volume_get
- volume_type_get.side_effect = v2_fakes.fake_volume_type_get
-
- vol = self._vol_in_request_body(multiattach=True)
- body = {"volume": vol}
-
- ex = self._expected_vol_from_controller(multiattach=True)
-
- req = fakes.HTTPRequest.blank('/v3/volumes')
- res_dict = self.controller.create(req, body=body)
-
- self.assertEqual(ex, res_dict)
-
@ddt.data({'a' * 256: 'a'},
{'a': 'a' * 256},
{'': 'a'},
diff --git a/cinder/tests/unit/api/v3/test_volumes.py b/cinder/tests/unit/api/v3/test_volumes.py
index f20e61d65..52d5f3fc9 100644
--- a/cinder/tests/unit/api/v3/test_volumes.py
+++ b/cinder/tests/unit/api/v3/test_volumes.py
@@ -619,7 +619,6 @@ class VolumeApiTest(test.TestCase):
'consistencygroup': None,
'availability_zone': availability_zone,
'scheduler_hints': None,
- 'multiattach': False,
'group': test_group,
}
@@ -1189,3 +1188,25 @@ class VolumeApiTest(test.TestCase):
volumes = res_dict['volumes']
self.assertEqual(1, len(volumes))
self.assertEqual(vols[1].id, volumes[0]['id'])
+
+ def test_create_volume_with_multiattach_param(self):
+ """Tests creating a volume with multiattach=True but no multiattach
+
+ volume type.
+
+ This test verifies that providing the multiattach parameter will error
+ out the request since it is removed and the recommended way is to
+ create a multiattach volume using a multiattach volume type.
+ """
+ req = fakes.HTTPRequest.blank('/v3/volumes')
+
+ body = {'volume': {
+ 'name': 'test name',
+ 'description': 'test desc',
+ 'size': 1,
+ 'multiattach': True}}
+ exc = self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ req, body=body)
+ self.assertIn("multiattach parameter has been removed",
+ exc.explanation)
diff --git a/cinder/tests/unit/attachments/test_attachments_manager.py b/cinder/tests/unit/attachments/test_attachments_manager.py
index 37e75eded..6e5def3d3 100644
--- a/cinder/tests/unit/attachments/test_attachments_manager.py
+++ b/cinder/tests/unit/attachments/test_attachments_manager.py
@@ -195,12 +195,6 @@ class AttachmentManagerTestCase(test.TestCase):
mock_db_detached, mock_db_meta_delete, mock_get_attachment):
mock_elevated.return_value = self.context
mock_con_term.return_value = False
- mock_db_detached.return_value = (
- {'status': 'available',
- 'attach_status': fields.VolumeAttachStatus.DETACHED},
- {'attach_status': fields.VolumeAttachStatus.DETACHED,
- 'deleted': True}
- )
# test single attachment. This should call
# detach and remove_export
@@ -208,10 +202,8 @@ class AttachmentManagerTestCase(test.TestCase):
self.manager.attachment_delete(self.context, attachment1.id, vref)
- mock_db_detached.called_once_with(self.context, vref,
- attachment1.id)
- mock_db_meta_delete.called_once_with(self.context, vref.id,
- 'attached_mode')
+ mock_db_detached.assert_not_called()
+ mock_db_meta_delete.assert_not_called()
mock_rm_export.assert_called_once_with(self.context, vref)
# test more than 1 attachment. This should skip
@@ -226,10 +218,8 @@ class AttachmentManagerTestCase(test.TestCase):
self.manager.attachment_delete(self.context, attachment2.id, vref)
mock_rm_export.assert_not_called()
- mock_db_detached.called_once_with(self.context, vref,
- attachment2.id)
- mock_db_meta_delete.called_once_with(self.context, vref.id,
- 'attached_mode')
+ mock_db_detached.assert_not_called()
+ mock_db_meta_delete.assert_not_called()
_test()
def test_connection_terminate_no_connector_force_false(self):
diff --git a/cinder/tests/unit/backup/drivers/test_backup_ceph.py b/cinder/tests/unit/backup/drivers/test_backup_ceph.py
index 9bd350703..e01ca9c67 100644
--- a/cinder/tests/unit/backup/drivers/test_backup_ceph.py
+++ b/cinder/tests/unit/backup/drivers/test_backup_ceph.py
@@ -17,6 +17,7 @@
import hashlib
import json
import os
+import subprocess
import tempfile
import threading
from unittest import mock
@@ -136,8 +137,7 @@ class BackupCephTestCase(test.TestCase):
'user_foo', 'conf_foo')
return linuxrbd.RBDVolumeIOWrapper(rbd_meta)
- def _setup_mock_popen(self, mock_popen, retval=None, p1hook=None,
- p2hook=None):
+ def _setup_mock_popen(self, retval=None, p1hook=None, p2hook=None):
class MockPopen(object):
hooks = [p2hook, p1hook]
@@ -157,7 +157,7 @@ class BackupCephTestCase(test.TestCase):
self.callstack.append('communicate')
return retval
- mock_popen.side_effect = MockPopen
+ subprocess.Popen.side_effect = MockPopen
def setUp(self):
global RAISED_EXCEPTIONS
@@ -468,8 +468,7 @@ class BackupCephTestCase(test.TestCase):
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
- @mock.patch('subprocess.Popen', spec=True)
- def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl):
+ def test_backup_volume_from_rbd(self, mock_fnctl):
"""Test full RBD backup generated successfully."""
backup_name = self.service._get_backup_base_name(self.volume_id,
self.alt_backup)
@@ -485,8 +484,7 @@ class BackupCephTestCase(test.TestCase):
self.callstack.append('read')
return self.volume_file.read(self.data_length)
- self._setup_mock_popen(mock_popen,
- ['out', 'err'],
+ self._setup_mock_popen(['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
@@ -647,8 +645,7 @@ class BackupCephTestCase(test.TestCase):
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
- @mock.patch('subprocess.Popen', spec=True)
- def test_backup_volume_from_rbd_fail(self, mock_popen, mock_fnctl):
+ def test_backup_volume_from_rbd_fail(self, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In _backup_rbd(), after an exception.BackupRBDOperationFailed
@@ -670,8 +667,7 @@ class BackupCephTestCase(test.TestCase):
self.callstack.append('read')
return self.volume_file.read(self.data_length)
- self._setup_mock_popen(mock_popen,
- ['out', 'err'],
+ self._setup_mock_popen(['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
@@ -726,8 +722,7 @@ class BackupCephTestCase(test.TestCase):
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
- @mock.patch('subprocess.Popen', spec=True)
- def test_backup_volume_from_rbd_fail2(self, mock_popen, mock_fnctl):
+ def test_backup_volume_from_rbd_fail2(self, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception.BackupOperationError occurs in
@@ -748,8 +743,7 @@ class BackupCephTestCase(test.TestCase):
self.callstack.append('read')
return self.volume_file.read(self.data_length)
- self._setup_mock_popen(mock_popen,
- ['out', 'err'],
+ self._setup_mock_popen(['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
@@ -922,7 +916,7 @@ class BackupCephTestCase(test.TestCase):
self.volume_file.seek(0)
self.service.restore(self.alt_backup, self.volume_id,
- test_file)
+ test_file, False)
checksum = hashlib.sha256()
test_file.seek(0)
@@ -1324,10 +1318,9 @@ class BackupCephTestCase(test.TestCase):
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
- @mock.patch('subprocess.Popen', spec=True)
- def test_piped_execute(self, mock_popen, mock_fcntl):
+ def test_piped_execute(self, mock_fcntl):
mock_fcntl.return_value = 0
- self._setup_mock_popen(mock_popen, ['out', 'err'])
+ self._setup_mock_popen(['out', 'err'])
self.service._piped_execute(['foo'], ['bar'])
self.assertEqual(['popen_init', 'popen_init',
'stdout_close', 'communicate'], self.callstack)
diff --git a/cinder/tests/unit/backup/drivers/test_backup_google.py b/cinder/tests/unit/backup/drivers/test_backup_google.py
index b9efb8b43..8788702a9 100644
--- a/cinder/tests/unit/backup/drivers/test_backup_google.py
+++ b/cinder/tests/unit/backup/drivers/test_backup_google.py
@@ -504,7 +504,7 @@ class GoogleBackupDriverTestCase(test.TestCase):
service = google_dr.GoogleBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
- service.restore(backup, volume_id, volume_file)
+ service.restore(backup, volume_id, volume_file, False)
@gcs_client
def test_restore_fail(self):
@@ -517,7 +517,7 @@ class GoogleBackupDriverTestCase(test.TestCase):
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(google_dr.GCSConnectionFailure,
service.restore,
- backup, volume_id, volume_file)
+ backup, volume_id, volume_file, False)
@gcs_client2
def test_restore_delta(self):
@@ -548,8 +548,7 @@ class GoogleBackupDriverTestCase(test.TestCase):
service2.backup(deltabackup, self.volume_file, True)
with tempfile.NamedTemporaryFile() as restored_file:
- service2.restore(deltabackup, volume_id,
- restored_file)
+ service2.restore(deltabackup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
diff --git a/cinder/tests/unit/backup/drivers/test_backup_nfs.py b/cinder/tests/unit/backup/drivers/test_backup_nfs.py
index f033f7168..2ada9293c 100644
--- a/cinder/tests/unit/backup/drivers/test_backup_nfs.py
+++ b/cinder/tests/unit/backup/drivers/test_backup_nfs.py
@@ -698,7 +698,7 @@ class BackupNFSTestCase(test.TestCase):
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
backup.status = objects.fields.BackupStatus.RESTORING
backup.save()
- service.restore(backup, volume_id, restored_file)
+ service.restore(backup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@@ -721,7 +721,7 @@ class BackupNFSTestCase(test.TestCase):
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
backup.status = objects.fields.BackupStatus.RESTORING
backup.save()
- service.restore(backup, volume_id, restored_file)
+ service.restore(backup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@@ -747,7 +747,7 @@ class BackupNFSTestCase(test.TestCase):
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
- service.restore(backup, volume_id, restored_file)
+ service.restore(backup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@@ -773,7 +773,7 @@ class BackupNFSTestCase(test.TestCase):
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
- service.restore(backup, volume_id, restored_file)
+ service.restore(backup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@@ -842,7 +842,7 @@ class BackupNFSTestCase(test.TestCase):
self.assertRaises(exception.BackupRestoreCancel,
service.restore, backup, volume_id,
- restored_file)
+ restored_file, False)
def test_restore_delta(self):
volume_id = fake.VOLUME_ID
@@ -890,8 +890,7 @@ class BackupNFSTestCase(test.TestCase):
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
- service.restore(backup, volume_id,
- restored_file)
+ service.restore(backup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
diff --git a/cinder/tests/unit/backup/drivers/test_backup_posix.py b/cinder/tests/unit/backup/drivers/test_backup_posix.py
index 794c1e555..1f098feff 100644
--- a/cinder/tests/unit/backup/drivers/test_backup_posix.py
+++ b/cinder/tests/unit/backup/drivers/test_backup_posix.py
@@ -16,14 +16,19 @@
import builtins
import os
+import shutil
+import tempfile
from unittest import mock
+import uuid
from cinder.backup.drivers import posix
+from cinder.common import config
from cinder import context
from cinder import objects
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
+CONF = config.CONF
FAKE_FILE_SIZE = 52428800
FAKE_SHA_BLOCK_SIZE_BYTES = 1024
@@ -177,3 +182,148 @@ class PosixBackupDriverTestCase(test.TestCase):
timestamp,
backup.id)
self.assertEqual(expected, res)
+
+
+class PosixBackupTestWithData(test.TestCase):
+
+ def _create_volume_db_entry(self, display_name='test_volume',
+ display_description='this is a test volume',
+ status='backing-up',
+ previous_status='available',
+ size=1,
+ host='testhost',
+ encryption_key_id=None,
+ project_id=None):
+ """Create a volume entry in the DB.
+
+ Return the entry ID
+ """
+ vol = {}
+ vol['size'] = size
+ vol['host'] = host
+ vol['user_id'] = fake.USER_ID
+ vol['project_id'] = project_id or fake.PROJECT_ID
+ vol['status'] = status
+ vol['display_name'] = display_name
+ vol['display_description'] = display_description
+ vol['attach_status'] = objects.fields.VolumeAttachStatus.DETACHED
+ vol['availability_zone'] = '1'
+ vol['previous_status'] = previous_status
+ vol['encryption_key_id'] = encryption_key_id
+ vol['volume_type_id'] = fake.VOLUME_TYPE_ID
+ volume = objects.Volume(context=self.ctxt, **vol)
+ volume.create()
+ return volume.id
+
+ def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()),
+ restore_volume_id=None,
+ display_name='test_backup',
+ display_description='this is a test backup',
+ container='volumebackups',
+ status=objects.fields.BackupStatus.CREATING,
+ size=1,
+ object_count=0,
+ project_id=str(uuid.uuid4()),
+ service=None,
+ temp_volume_id=None,
+ temp_snapshot_id=None,
+ snapshot_id=None,
+ metadata=None,
+ parent_id=None,
+ encryption_key_id=None):
+ """Create a backup entry in the DB.
+
+ Return the entry ID
+ """
+ kwargs = {}
+ kwargs['volume_id'] = volume_id
+ kwargs['restore_volume_id'] = restore_volume_id
+ kwargs['user_id'] = str(uuid.uuid4())
+ kwargs['project_id'] = project_id
+ kwargs['host'] = 'testhost'
+ kwargs['availability_zone'] = '1'
+ kwargs['display_name'] = display_name
+ kwargs['display_description'] = display_description
+ kwargs['container'] = container
+ kwargs['status'] = status
+ kwargs['fail_reason'] = ''
+ kwargs['service'] = service or CONF.backup_driver
+ kwargs['snapshot_id'] = snapshot_id
+ kwargs['parent_id'] = parent_id
+ kwargs['size'] = size
+ kwargs['object_count'] = object_count
+ kwargs['temp_volume_id'] = temp_volume_id
+ kwargs['temp_snapshot_id'] = temp_snapshot_id
+ kwargs['metadata'] = metadata or {}
+ kwargs['encryption_key_id'] = encryption_key_id
+ backup = objects.Backup(context=self.ctxt, **kwargs)
+ backup.create()
+ return backup
+
+ def setUp(self):
+ super(PosixBackupTestWithData, self).setUp()
+
+ self.tempdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tempdir)
+
+ backup_path = os.path.join(self.tempdir, "backup-dir")
+ os.mkdir(backup_path)
+
+ self.ctxt = context.get_admin_context()
+
+ self.override_config('backup_file_size',
+ FAKE_FILE_SIZE)
+ self.override_config('backup_sha_block_size_bytes',
+ FAKE_SHA_BLOCK_SIZE_BYTES)
+ self.override_config('backup_enable_progress_timer',
+ FAKE_BACKUP_ENABLE_PROGRESS_TIMER)
+ self.override_config('backup_posix_path', backup_path)
+ self.mock_object(posix, 'LOG')
+
+ self.driver = posix.PosixBackupDriver(self.ctxt)
+
+ mock_volume_filename = "restore-volume"
+ self.vol_path = os.path.join(self.tempdir, mock_volume_filename)
+
+ def test_restore_backup_with_sparseness(self):
+ """Test a sparse backup restoration."""
+
+ vol_size = 1
+ vol_id = self._create_volume_db_entry(status='restoring-backup',
+ size=vol_size)
+
+ chunk_size = 1024 * 1024
+
+ obj_data = b'01234567890123456789'
+
+ backup = self._create_backup_db_entry(
+ volume_id=vol_id,
+ status=objects.fields.BackupStatus.RESTORING)
+
+ with tempfile.NamedTemporaryFile() as volume_file:
+
+ # First, we create a fake volume with a hole. Although we know that
+ # the driver only detects zeroes, we create a real file with a hole
+ # as a way to future-proof this a little. Also, it's easier.
+ # Miraclously, tmpfs supports files with actual holes.
+ volume_file.seek(3 * chunk_size)
+ volume_file.write(obj_data)
+
+ # And then, we immediately run a backup on the fake volume.
+ # We don't attempt to re-create the backup volume by hand.
+ volume_file.seek(0)
+ self.driver.backup(backup, volume_file)
+
+ # Next, we restore, excercising the code under test.
+ with open(self.vol_path, 'wb') as volume_file:
+ self.driver.restore(backup, vol_id, volume_file, True)
+
+ # Finally, we examine the fake volume into which we restored.
+ with open(self.vol_path, 'rb') as volume_file:
+ volume_file.seek(3 * chunk_size)
+ question_data = volume_file.read(len(obj_data))
+
+ self.assertEqual(obj_data, question_data)
+
+ statb = os.stat(self.vol_path)
+ self.assertLess(statb.st_blocks * 512, (3 * chunk_size + 512) / 512)
diff --git a/cinder/tests/unit/backup/drivers/test_backup_s3.py b/cinder/tests/unit/backup/drivers/test_backup_s3.py
index 04d097bad..bf4c95e20 100644
--- a/cinder/tests/unit/backup/drivers/test_backup_s3.py
+++ b/cinder/tests/unit/backup/drivers/test_backup_s3.py
@@ -528,7 +528,7 @@ class BackupS3TestCase(test.TestCase):
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as volume_file:
- service.restore(backup, volume_id, volume_file)
+ service.restore(backup, volume_id, volume_file, False)
@mock_s3
def test_restore_delta(self):
@@ -555,8 +555,7 @@ class BackupS3TestCase(test.TestCase):
service2.backup(deltabackup, self.volume_file, True)
with tempfile.NamedTemporaryFile() as restored_file:
- service2.restore(deltabackup, volume_id,
- restored_file)
+ service2.restore(deltabackup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@@ -571,7 +570,7 @@ class BackupS3TestCase(test.TestCase):
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(s3_dr.S3ClientError,
service.restore,
- backup, volume_id, volume_file)
+ backup, volume_id, volume_file, False)
@s3_client
def test_restore_faili2(self):
@@ -584,7 +583,7 @@ class BackupS3TestCase(test.TestCase):
with tempfile.NamedTemporaryFile() as volume_file:
self.assertRaises(s3_dr.S3ConnectionFailure,
service.restore,
- backup, volume_id, volume_file)
+ backup, volume_id, volume_file, False)
@mock_s3
def test_backup_md5_validation(self):
@@ -618,4 +617,4 @@ class BackupS3TestCase(test.TestCase):
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as volume_file:
- service.restore(backup, volume_id, volume_file)
+ service.restore(backup, volume_id, volume_file, False)
diff --git a/cinder/tests/unit/backup/drivers/test_backup_swift.py b/cinder/tests/unit/backup/drivers/test_backup_swift.py
index 2eadfae23..750e7ba13 100644
--- a/cinder/tests/unit/backup/drivers/test_backup_swift.py
+++ b/cinder/tests/unit/backup/drivers/test_backup_swift.py
@@ -799,7 +799,7 @@ class BackupSwiftTestCase(test.TestCase):
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
backup.status = objects.fields.BackupStatus.RESTORING
backup.save()
- service.restore(backup, volume_id, volume_file)
+ service.restore(backup, volume_id, volume_file, False)
def test_restore_delta(self):
volume_id = '04d83506-bcf7-4ff5-9c65-00000051bd2e'
@@ -849,8 +849,7 @@ class BackupSwiftTestCase(test.TestCase):
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP2_ID)
backup.status = objects.fields.BackupStatus.RESTORING
backup.save()
- service.restore(backup, volume_id,
- restored_file)
+ service.restore(backup, volume_id, restored_file, False)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
@@ -865,7 +864,7 @@ class BackupSwiftTestCase(test.TestCase):
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertRaises(exception.SwiftConnectionFailed,
service.restore,
- backup, volume_id, volume_file)
+ backup, volume_id, volume_file, False)
def test_restore_unsupported_version(self):
volume_id = '390db8c1-32d3-42ca-82c9-00000010c703'
@@ -878,7 +877,7 @@ class BackupSwiftTestCase(test.TestCase):
backup = objects.Backup.get_by_id(self.ctxt, fake.BACKUP_ID)
self.assertRaises(exception.InvalidBackup,
service.restore,
- backup, volume_id, volume_file)
+ backup, volume_id, volume_file, False)
def test_delete(self):
volume_id = '9ab256c8-3175-4ad8-baa1-0000007f9d31'
diff --git a/cinder/tests/unit/backup/fake_service.py b/cinder/tests/unit/backup/fake_service.py
index fa6537403..bcc40a1ff 100644
--- a/cinder/tests/unit/backup/fake_service.py
+++ b/cinder/tests/unit/backup/fake_service.py
@@ -23,7 +23,7 @@ class FakeBackupService(driver.BackupDriver):
def backup(self, backup, volume_file):
pass
- def restore(self, backup, volume_id, volume_file):
+ def restore(self, backup, volume_id, volume_file, volume_is_new):
pass
def delete_backup(self, backup):
diff --git a/cinder/tests/unit/backup/test_backup.py b/cinder/tests/unit/backup/test_backup.py
index d63271e73..4e0c7b91d 100644
--- a/cinder/tests/unit/backup/test_backup.py
+++ b/cinder/tests/unit/backup/test_backup.py
@@ -1139,7 +1139,8 @@ class BackupTestCase(BaseBackupTest):
self.backup_mgr.restore_backup,
self.ctxt,
backup,
- vol_id)
+ vol_id,
+ False)
backup = db.backup_get(self.ctxt, backup.id)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
@@ -1159,7 +1160,8 @@ class BackupTestCase(BaseBackupTest):
self.backup_mgr.restore_backup,
self.ctxt,
backup,
- vol_id)
+ vol_id,
+ False)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
@@ -1180,7 +1182,8 @@ class BackupTestCase(BaseBackupTest):
self.backup_mgr.restore_backup,
self.ctxt,
backup,
- vol_id)
+ vol_id,
+ False)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error_restoring', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
@@ -1200,7 +1203,7 @@ class BackupTestCase(BaseBackupTest):
mock_run_restore.side_effect = exception.BackupRestoreCancel(
vol_id=vol_id, back_id=backup.id)
# We shouldn't raise an exception on the call, it's OK to cancel
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual('error', vol.status)
backup.refresh()
@@ -1217,7 +1220,7 @@ class BackupTestCase(BaseBackupTest):
mock_run_restore = self.mock_object(
self.backup_mgr,
'_run_restore')
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual(fields.VolumeStatus.AVAILABLE, vol.status)
self.assertIsNotNone(vol.launched_at)
@@ -1238,7 +1241,7 @@ class BackupTestCase(BaseBackupTest):
mock_run_restore.side_effect = exception.BackupRestoreCancel(
vol_id=vol_id, back_id=backup.id)
# We shouldn't raise an exception on the call, it's OK to cancel
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
vol = objects.Volume.get_by_id(self.ctxt, vol_id)
self.assertEqual(fields.VolumeStatus.ERROR, vol.status)
backup.refresh()
@@ -1262,7 +1265,8 @@ class BackupTestCase(BaseBackupTest):
self.backup_mgr.restore_backup,
self.ctxt,
backup,
- vol_id)
+ vol_id,
+ False)
vol = db.volume_get(self.ctxt, vol_id)
self.assertEqual('error', vol['status'])
backup = db.backup_get(self.ctxt, backup.id)
@@ -1299,7 +1303,7 @@ class BackupTestCase(BaseBackupTest):
mock_attach_device.return_value = attach_info
with mock.patch('os.name', os_name):
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
mock_open.assert_called_once_with('/dev/null', exp_open_mode)
mock_temporary_chown.assert_called_once_with('/dev/null')
@@ -1359,14 +1363,15 @@ class BackupTestCase(BaseBackupTest):
mock_attach_device.return_value = attach_info
with mock.patch('os.name', os_name):
- self.backup_mgr.restore_backup(self.ctxt, backup, new_vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, new_vol_id,
+ False)
backup.status = "restoring"
db.backup_update(self.ctxt, backup.id, {"status": "restoring"})
vol.status = 'available'
vol.obj_reset_changes()
with mock.patch('os.name', os_name):
- self.backup_mgr.restore_backup(self.ctxt, backup, vol2_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol2_id, False)
vol2.refresh()
old_src_backup_id = vol2.metadata["src_backup_id"]
@@ -1376,7 +1381,7 @@ class BackupTestCase(BaseBackupTest):
vol2.obj_reset_changes()
with mock.patch('os.name', os_name):
- self.backup_mgr.restore_backup(self.ctxt, backup2, vol2_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup2, vol2_id, False)
vol2.status = 'available'
vol2.obj_reset_changes()
@@ -1399,7 +1404,7 @@ class BackupTestCase(BaseBackupTest):
status=fields.BackupStatus.RESTORING, volume_id=vol_id)
self.backup_mgr._run_restore = mock.Mock()
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
self.assertEqual(2, notify.call_count)
@mock.patch('cinder.volume.volume_utils.clone_encryption_key')
@@ -1429,7 +1434,7 @@ class BackupTestCase(BaseBackupTest):
'_attach_device')
mock_attach_device.return_value = {'device': {'path': '/dev/null'}}
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
volume = db.volume_get(self.ctxt, vol_id)
self.assertEqual(fake.UUID1, volume.encryption_key_id)
mock_clone_encryption_key.assert_not_called()
@@ -1470,13 +1475,13 @@ class BackupTestCase(BaseBackupTest):
# Mimic the driver's side effect where it updates the volume's
# metadata. For backups of encrypted volumes, this will essentially
# overwrite the volume's encryption key ID prior to the restore.
- def restore_side_effect(backup, volume_id, volume_file):
+ def restore_side_effect(backup, volume_id, volume_file, volume_is_new):
db.volume_update(self.ctxt,
volume_id,
{'encryption_key_id': fake.UUID4})
mock_backup_driver_restore.side_effect = restore_side_effect
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
# Volume's original encryption key ID should be deleted
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
@@ -1527,13 +1532,13 @@ class BackupTestCase(BaseBackupTest):
# Mimic the driver's side effect where it updates the volume's
# metadata. For backups of encrypted volumes, this will essentially
# overwrite the volume's encryption key ID prior to the restore.
- def restore_side_effect(backup, volume_id, volume_file):
+ def restore_side_effect(backup, volume_id, volume_file, volume_is_new):
db.volume_update(self.ctxt,
volume_id,
{'encryption_key_id': fake.UUID4})
mock_backup_driver_restore.side_effect = restore_side_effect
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
# Volume's original encryption key ID should be deleted
mock_delete_encryption_key.assert_called_once_with(self.ctxt,
@@ -1931,7 +1936,7 @@ class BackupTestCase(BaseBackupTest):
backup = self._create_backup_db_entry(
volume_id=vol_id, status=fields.BackupStatus.RESTORING)
- self.backup_mgr.restore_backup(self.ctxt, backup, vol_id)
+ self.backup_mgr.restore_backup(self.ctxt, backup, vol_id, False)
self.assertEqual(1, mock_sem.__enter__.call_count)
self.assertEqual(1, mock_restore.call_count)
diff --git a/cinder/tests/unit/backup/test_backup_messages.py b/cinder/tests/unit/backup/test_backup_messages.py
index 136f4bfd0..3ed0e9ad2 100644
--- a/cinder/tests/unit/backup/test_backup_messages.py
+++ b/cinder/tests/unit/backup/test_backup_messages.py
@@ -379,7 +379,7 @@ class BackupUserMessagesTest(test.TestCase):
self.assertRaises(
exception.InvalidVolume, manager.restore_backup,
- fake_context, fake_backup, fake.VOLUME_ID)
+ fake_context, fake_backup, fake.VOLUME_ID, False)
mock_msg_create.assert_called_once_with(
fake_context,
action=message_field.Action.BACKUP_RESTORE,
@@ -409,7 +409,7 @@ class BackupUserMessagesTest(test.TestCase):
self.assertRaises(
exception.InvalidBackup, manager.restore_backup,
- fake_context, fake_backup, fake.VOLUME_ID)
+ fake_context, fake_backup, fake.VOLUME_ID, False)
self.assertEqual(message_field.Action.BACKUP_RESTORE,
fake_context.message_action)
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
@@ -455,7 +455,7 @@ class BackupUserMessagesTest(test.TestCase):
self.assertRaises(
exception.InvalidBackup, manager.restore_backup,
- fake_context, fake_backup, fake.VOLUME_ID)
+ fake_context, fake_backup, fake.VOLUME_ID, False)
self.assertEqual(message_field.Action.BACKUP_RESTORE,
fake_context.message_action)
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
@@ -505,7 +505,7 @@ class BackupUserMessagesTest(test.TestCase):
self.assertRaises(
exception.InvalidBackup, manager.restore_backup,
- fake_context, fake_backup, fake.VOLUME_ID)
+ fake_context, fake_backup, fake.VOLUME_ID, False)
self.assertEqual(message_field.Action.BACKUP_RESTORE,
fake_context.message_action)
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
@@ -555,7 +555,7 @@ class BackupUserMessagesTest(test.TestCase):
self.assertRaises(
exception.InvalidBackup, manager.restore_backup,
- fake_context, fake_backup, fake.VOLUME_ID)
+ fake_context, fake_backup, fake.VOLUME_ID, False)
self.assertEqual(message_field.Action.BACKUP_RESTORE,
fake_context.message_action)
self.assertEqual(message_field.Resource.VOLUME_BACKUP,
diff --git a/cinder/tests/unit/backup/test_chunkeddriver.py b/cinder/tests/unit/backup/test_chunkeddriver.py
index 403deb7b5..3bf1ae2b6 100644
--- a/cinder/tests/unit/backup/test_chunkeddriver.py
+++ b/cinder/tests/unit/backup/test_chunkeddriver.py
@@ -465,7 +465,7 @@ class ChunkedDriverTestCase(test.TestCase):
self.volume, parent_id=self.backup.id)
with mock.patch.object(self.driver, 'put_metadata') as mock_put:
- self.driver.restore(backup, self.volume, volume_file)
+ self.driver.restore(backup, self.volume, volume_file, False)
self.assertEqual(2, mock_put.call_count)
restore_test.assert_called()
diff --git a/cinder/tests/unit/backup/test_rpcapi.py b/cinder/tests/unit/backup/test_rpcapi.py
index 144b3cd27..b92fdfe3c 100644
--- a/cinder/tests/unit/backup/test_rpcapi.py
+++ b/cinder/tests/unit/backup/test_rpcapi.py
@@ -31,6 +31,9 @@ class BackupRPCAPITestCase(test.RPCAPITestCase):
self.rpcapi = backup_rpcapi.BackupAPI
self.fake_backup_obj = fake_backup.fake_backup_obj(self.context)
+ self.can_send_version_mock = self.patch(
+ 'oslo_messaging.RPCClient.can_send_version', return_value=True)
+
def test_create_backup(self):
self._test_rpc_api('create_backup',
rpc_method='cast',
@@ -43,7 +46,18 @@ class BackupRPCAPITestCase(test.RPCAPITestCase):
server='fake_volume_host',
backup_host='fake_volume_host',
backup=self.fake_backup_obj,
- volume_id=fake.VOLUME_ID)
+ volume_id=fake.VOLUME_ID,
+ volume_is_new=True)
+
+ with mock.patch('cinder.rpc.LAST_RPC_VERSIONS',
+ {'cinder-backup': '2.0'}):
+ self._test_rpc_api('restore_backup',
+ rpc_method='cast',
+ server='fake_volume_host',
+ backup_host='fake_volume_host',
+ backup=self.fake_backup_obj,
+ volume_id=fake.VOLUME_ID,
+ volume_is_new=False)
def test_delete_backup(self):
self._test_rpc_api('delete_backup',
diff --git a/cinder/tests/unit/db/test_migrations.py b/cinder/tests/unit/db/test_migrations.py
index bf9a1038b..3c8ed6dbd 100644
--- a/cinder/tests/unit/db/test_migrations.py
+++ b/cinder/tests/unit/db/test_migrations.py
@@ -34,6 +34,7 @@ from sqlalchemy.engine import reflection
import cinder.db.legacy_migrations
from cinder.db import migration
+from cinder.db.sqlalchemy import api
from cinder.db.sqlalchemy import models
from cinder.tests import fixtures as cinder_fixtures
from cinder.tests.unit import utils as test_utils
@@ -58,6 +59,7 @@ class CinderModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
self.useFixture(cinder_fixtures.StandardLogging())
self.engine = enginefacade.writer.get_engine()
+ self.patch(api, 'get_engine', self.get_engine)
def db_sync(self, engine):
migration.db_sync(engine=self.engine)
@@ -140,6 +142,7 @@ class MigrationsWalk(
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
+ self.patch(api, 'get_engine', lambda: self.engine)
self.config = migration._find_alembic_conf()
self.init_version = migration.ALEMBIC_INIT_VERSION
@@ -213,6 +216,17 @@ class MigrationsWalk(
# But it's nullable
self.assertTrue(table.c.shared_targets.nullable)
+ def _check_daa98075b90d(self, connection):
+ """Test resources have indexes."""
+ for table in ('groups', 'group_snapshots', 'volumes', 'snapshots',
+ 'backups'):
+ db_utils.index_exists(connection,
+ table,
+ f'{table}_deleted_project_id_idx')
+
+ db_utils.index_exists(connection,
+ 'volumes', 'volumes_deleted_host_idx')
+
class TestMigrationsWalkSQLite(
MigrationsWalk,
diff --git a/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py b/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py
index 8b0ebd649..b652e264c 100644
--- a/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py
+++ b/cinder/tests/unit/privsep/targets/fake_nvmet_lib.py
@@ -30,7 +30,8 @@ except ImportError:
Root=type('Root', (mock.Mock, ), {}),
Subsystem=type('Subsystem', (mock.Mock, ), {}),
Port=type('Port', (mock.Mock, ), {}),
- Namespace=type('Namespace', (mock.Mock, ), {}),
+ Namespace=type('Namespace', (mock.Mock, ),
+ {'MAX_NSID': 8192}),
Host=type('Host', (mock.Mock, ), {}),
ANAGroup=type('ANAGroup', (mock.Mock, ), {}),
Referral=type('Referral', (mock.Mock, ), {}),
diff --git a/cinder/tests/unit/privsep/targets/test_nvmet.py b/cinder/tests/unit/privsep/targets/test_nvmet.py
index 8503b8cb7..cadde585d 100644
--- a/cinder/tests/unit/privsep/targets/test_nvmet.py
+++ b/cinder/tests/unit/privsep/targets/test_nvmet.py
@@ -298,7 +298,7 @@ class TestPrivsep(test.TestCase):
@ddt.ddt
class TestNvmetClasses(test.TestCase):
- @ddt.data('Namespace', 'Host', 'Referral', 'ANAGroup')
+ @ddt.data('Host', 'Referral', 'ANAGroup')
def test_same_classes(self, cls_name):
self.assertEqual(getattr(nvmet, cls_name),
getattr(nvmet.nvmet, cls_name))
@@ -330,6 +330,22 @@ class TestNvmetClasses(test.TestCase):
mock_privsep.assert_called_once_with(mock_serialize.return_value,
'delete')
+ @mock.patch('os.listdir',
+ return_value=['/path/namespaces/1', '/path/namespaces/2'])
+ @mock.patch.object(nvmet, 'Namespace')
+ def test_subsystem_namespaces(self, mock_nss, mock_listdir):
+ subsys = nvmet.Subsystem(mock.sentinel.nqn)
+ subsys.path = '/path' # Set by the parent nvmet library Root class
+
+ res = list(subsys.namespaces)
+
+ self.assertEqual([mock_nss.return_value, mock_nss.return_value], res)
+
+ mock_listdir.assert_called_once_with('/path/namespaces/')
+ self.assertEqual(2, mock_nss.call_count)
+ mock_nss.assert_has_calls((mock.call(subsys, '1'),
+ mock.call(subsys, '2')))
+
def test_port_init(self):
port = nvmet.Port('portid')
self.assertIsInstance(port, nvmet.nvmet.Port)
@@ -397,3 +413,30 @@ class TestNvmetClasses(test.TestCase):
mock_listdir.assert_called_once_with('/path/ports/')
self.assertEqual(2, mock_port.call_count)
mock_port.assert_has_calls((mock.call('1'), mock.call('2')))
+
+ def test_namespace_init(self):
+ ns = nvmet.Namespace('subsystem', 'nsid')
+ self.assertIsInstance(ns, nvmet.nvmet.Namespace)
+ self.assertIsInstance(ns, nvmet.Namespace)
+ self.assertEqual('subsystem', ns.subsystem)
+ self.assertEqual('nsid', ns.nsid)
+ self.assertEqual('lookup', ns.mode)
+
+ @mock.patch.object(nvmet, 'serialize')
+ @mock.patch.object(nvmet, 'privsep_setup')
+ def test_namespace_setup(self, mock_setup, mock_serialize):
+ nvmet.Namespace.setup(mock.sentinel.subsys,
+ mock.sentinel.n)
+ mock_serialize.assert_called_once_with(mock.sentinel.subsys)
+ mock_setup.assert_called_once_with('Namespace',
+ mock_serialize.return_value,
+ mock.sentinel.n, None)
+
+ @mock.patch.object(nvmet, 'serialize')
+ @mock.patch.object(nvmet, 'do_privsep_call')
+ def test_namespace_delete(self, mock_privsep, mock_serialize):
+ ns = nvmet.Namespace('subsystem', 'nsid')
+ ns.delete()
+ mock_serialize.assert_called_once_with(ns)
+ mock_privsep.assert_called_once_with(mock_serialize.return_value,
+ 'delete')
diff --git a/cinder/tests/unit/targets/test_base_iscsi_driver.py b/cinder/tests/unit/targets/test_base_iscsi_driver.py
index 4536b3028..4d5e9544a 100644
--- a/cinder/tests/unit/targets/test_base_iscsi_driver.py
+++ b/cinder/tests/unit/targets/test_base_iscsi_driver.py
@@ -12,6 +12,7 @@
from unittest import mock
+import ddt
from oslo_config import cfg
from cinder import context
@@ -28,6 +29,7 @@ class FakeIncompleteDriver(iscsi.ISCSITarget):
pass
+@ddt.ddt
class TestBaseISCSITargetDriver(tf.TargetDriverFixture):
def setUp(self):
@@ -183,3 +185,15 @@ class TestBaseISCSITargetDriver(tf.TargetDriverFixture):
self.testvol))
self.target.db.volume_get.assert_called_once_with(
ctxt, self.testvol['id'])
+
+ def test_are_same_connector(self):
+ res = self.target.are_same_connector({'initiator': 'iqn'},
+ {'initiator': 'iqn'})
+ self.assertTrue(res)
+
+ @ddt.data(({}, {}), ({}, {'initiator': 'iqn'}), ({'initiator': 'iqn'}, {}),
+ ({'initiator': 'iqn1'}, {'initiator': 'iqn2'}))
+ @ddt.unpack
+ def test_are_same_connector_different(self, a_conn_props, b_conn_props):
+ res = self.target.are_same_connector(a_conn_props, b_conn_props)
+ self.assertFalse(bool(res))
diff --git a/cinder/tests/unit/targets/test_nvmeof_driver.py b/cinder/tests/unit/targets/test_nvmeof_driver.py
index 62d707c94..21d30b739 100644
--- a/cinder/tests/unit/targets/test_nvmeof_driver.py
+++ b/cinder/tests/unit/targets/test_nvmeof_driver.py
@@ -12,6 +12,7 @@
from unittest import mock
+import ddt
from oslo_utils import timeutils
from cinder import context
@@ -25,15 +26,11 @@ class FakeNVMeOFDriver(nvmeof.NVMeOF):
def __init__(self, *args, **kwargs):
super(FakeNVMeOFDriver, self).__init__(*args, **kwargs)
- def create_nvmeof_target(
- self, target_name, target_ip, target_port,
- transport_type, ns_id, volume_path):
- pass
-
def delete_nvmeof_target(self, target_name):
pass
+@ddt.ddt
class TestNVMeOFDriver(tf.TargetDriverFixture):
def setUp(self):
@@ -65,7 +62,7 @@ class TestNVMeOFDriver(tf.TargetDriverFixture):
"ngn.%s-%s" % (
self.nvmet_subsystem_name,
self.fake_volume_id),
- self.target_ip,
+ [self.target_ip],
self.target_port,
self.nvme_transport_type,
self.nvmet_ns_id
@@ -75,16 +72,18 @@ class TestNVMeOFDriver(tf.TargetDriverFixture):
'created_at': timeutils.utcnow(),
'host': 'fake_host@lvm#lvm'})
- def test_initialize_connection(self):
+ @mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties_from_vol')
+ def test_initialize_connection(self, mock_get_conn):
mock_connector = {'initiator': 'fake_init'}
mock_testvol = self.testvol
expected_return = {
'driver_volume_type': 'nvmeof',
- 'data': self.target._get_connection_properties(mock_testvol)
+ 'data': mock_get_conn.return_value
}
self.assertEqual(expected_return,
self.target.initialize_connection(mock_testvol,
mock_connector))
+ mock_get_conn.assert_called_once_with(mock_testvol)
@mock.patch.object(FakeNVMeOFDriver, 'create_nvmeof_target')
def test_create_export(self, mock_create_nvme_target):
@@ -93,7 +92,7 @@ class TestNVMeOFDriver(tf.TargetDriverFixture):
mock_create_nvme_target.assert_called_once_with(
self.fake_volume_id,
self.configuration.target_prefix,
- self.target_ip,
+ [self.target_ip],
self.target_port,
self.nvme_transport_type,
self.nvmet_port_id,
@@ -109,17 +108,86 @@ class TestNVMeOFDriver(tf.TargetDriverFixture):
self.testvol
)
- def test_get_connection_properties(self):
+ @mock.patch.object(nvmeof.NVMeOF, '_get_nvme_uuid')
+ @mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties')
+ def test__get_connection_properties(self, mock_get_conn_props, mock_uuid):
+ """Test connection properties from a volume."""
+ res = self.target._get_connection_properties_from_vol(self.testvol)
+ self.assertEqual(mock_get_conn_props.return_value, res)
+ mock_uuid.assert_called_once_with(self.testvol)
+ mock_get_conn_props.assert_called_once_with(
+ f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}',
+ [self.target_ip],
+ str(self.target_port),
+ self.nvme_transport_type,
+ str(self.nvmet_ns_id),
+ mock_uuid.return_value)
+
+ @mock.patch.object(nvmeof.NVMeOF, '_get_nvme_uuid')
+ @mock.patch.object(nvmeof.NVMeOF, '_get_connection_properties')
+ def test__get_connection_properties_multiple_addresses(
+ self, mock_get_conn_props, mock_uuid):
+ """Test connection properties from a volume with multiple ips."""
+ self.testvol['provider_location'] = self.target.get_nvmeof_location(
+ f"ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}",
+ [self.target_ip, '127.0.0.1'],
+ self.target_port,
+ self.nvme_transport_type,
+ self.nvmet_ns_id
+ )
+
+ res = self.target._get_connection_properties_from_vol(self.testvol)
+ self.assertEqual(mock_get_conn_props.return_value, res)
+ mock_uuid.assert_called_once_with(self.testvol)
+ mock_get_conn_props.assert_called_once_with(
+ f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}',
+ [self.target_ip, '127.0.0.1'],
+ str(self.target_port),
+ self.nvme_transport_type,
+ str(self.nvmet_ns_id),
+ mock_uuid.return_value)
+
+ def test__get_connection_properties_old(self):
+ """Test connection properties with the old NVMe-oF format."""
+ nqn = f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}'
expected_return = {
'target_portal': self.target_ip,
'target_port': str(self.target_port),
- 'nqn': "ngn.%s-%s" % (
- self.nvmet_subsystem_name, self.fake_volume_id),
+ 'nqn': nqn,
'transport_type': self.nvme_transport_type,
'ns_id': str(self.nvmet_ns_id)
}
- self.assertEqual(expected_return,
- self.target._get_connection_properties(self.testvol))
+ res = self.target._get_connection_properties(nqn,
+ [self.target_ip],
+ str(self.target_port),
+ self.nvme_transport_type,
+ str(self.nvmet_ns_id),
+ mock.sentinel.uuid)
+ self.assertEqual(expected_return, res)
+
+ @ddt.data(('rdma', 'RoCEv2'), ('tcp', 'tcp'))
+ @ddt.unpack
+ def test__get_connection_properties_new(
+ self, transport, expected_transport):
+ """Test connection properties with the new NVMe-oF format."""
+ nqn = f'ngn.{self.nvmet_subsystem_name}-{self.fake_volume_id}'
+ self.configuration.nvmeof_conn_info_version = 2
+
+ expected_return = {
+ 'target_nqn': nqn,
+ 'vol_uuid': mock.sentinel.uuid,
+ 'ns_id': str(self.nvmet_ns_id),
+ 'portals': [(self.target_ip,
+ str(self.target_port),
+ expected_transport)],
+ }
+ res = self.target._get_connection_properties(nqn,
+ [self.target_ip],
+ str(self.target_port),
+ transport,
+ str(self.nvmet_ns_id),
+ mock.sentinel.uuid)
+ self.assertEqual(expected_return, res)
def test_validate_connector(self):
mock_connector = {'initiator': 'fake_init'}
@@ -137,3 +205,47 @@ class TestNVMeOFDriver(tf.TargetDriverFixture):
FakeNVMeOFDriver,
root_helper=utils.get_root_helper(),
configuration=self.configuration)
+
+ def test_invalid_secondary_ips_old_conn_info_combination(self):
+ """Secondary IPS are only supported with new connection information."""
+ self.configuration.target_secondary_ip_addresses = ['127.0.0.1']
+ self.configuration.nvmeof_conn_info_version = 1
+ self.assertRaises(exception.InvalidConfigurationValue,
+ FakeNVMeOFDriver,
+ root_helper=utils.get_root_helper(),
+ configuration=self.configuration)
+
+ def test_valid_secondary_ips_old_conn_info_combination(self):
+ """Secondary IPS are supported with new connection information."""
+ self.configuration.target_secondary_ip_addresses = ['127.0.0.1']
+ self.configuration.nvmeof_conn_info_version = 2
+ FakeNVMeOFDriver(root_helper=utils.get_root_helper(),
+ configuration=self.configuration)
+
+ def test_are_same_connector(self):
+ res = self.target.are_same_connector({'nqn': 'nvme'}, {'nqn': 'nvme'})
+ self.assertTrue(res)
+
+ @ddt.data(({}, {}), ({}, {'nqn': 'nvmE'}), ({'nqn': 'nvmeE'}, {}),
+ ({'nqn': 'nvme1'}, {'nqn': 'nvme2'}))
+ @ddt.unpack
+ def test_are_same_connector_different(self, a_conn_props, b_conn_props):
+ res = self.target.are_same_connector(a_conn_props, b_conn_props)
+ self.assertFalse(bool(res))
+
+ def test_get_nvmeof_location(self):
+ """Serialize connection information into location."""
+ result = self.target.get_nvmeof_location(
+ 'ngn.subsys_name-vol_id', ['127.0.0.1'], 4420, 'tcp', 10)
+
+ expected = '127.0.0.1:4420 tcp ngn.subsys_name-vol_id 10'
+ self.assertEqual(expected, result)
+
+ def test_get_nvmeof_location_multiple_ips(self):
+ """Serialize connection information with multiple ips into location."""
+ result = self.target.get_nvmeof_location(
+ 'ngn.subsys_name-vol_id', ['127.0.0.1', '192.168.1.1'], 4420,
+ 'tcp', 10)
+
+ expected = '127.0.0.1,192.168.1.1:4420 tcp ngn.subsys_name-vol_id 10'
+ self.assertEqual(expected, result)
diff --git a/cinder/tests/unit/targets/test_nvmet_driver.py b/cinder/tests/unit/targets/test_nvmet_driver.py
index 26241653e..abfd138b7 100644
--- a/cinder/tests/unit/targets/test_nvmet_driver.py
+++ b/cinder/tests/unit/targets/test_nvmet_driver.py
@@ -14,6 +14,7 @@ from unittest import mock
import ddt
+from cinder import exception
from cinder.tests.unit.privsep.targets import fake_nvmet_lib
from cinder.tests.unit.targets import targets_fixture as tf
from cinder import utils
@@ -32,141 +33,326 @@ class TestNVMETDriver(tf.TargetDriverFixture):
self.configuration.target_protocol = 'nvmet_rdma'
self.target = nvmet.NVMET(root_helper=utils.get_root_helper(),
configuration=self.configuration)
+ self.target.share_targets = False
fake_nvmet_lib.reset_mock()
- @mock.patch.object(nvmet.NVMET, 'create_nvmeof_target')
- def test_create_export(self, mock_create_target):
- """Test that the nvmeof class calls the nvmet method."""
- res = self.target.create_export(mock.sentinel.ctxt,
- self.testvol,
- mock.sentinel.volume_path)
- self.assertEqual(mock_create_target.return_value, res)
- mock_create_target.assert_called_once_with(
- self.testvol['id'],
- self.target.configuration.target_prefix,
- self.target.target_ip,
+ def test_supports_shared(self):
+ self.assertTrue(self.target.SHARED_TARGET_SUPPORT)
+
+ @mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection')
+ @mock.patch.object(nvmet.NVMET, '_map_volume')
+ def test_initialize_connection_non_shared(self, mock_map, mock_init_conn):
+ """Non shared initialize doesn't do anything (calls NVMeOF)."""
+ res = self.target.initialize_connection(mock.sentinel.volume,
+ mock.sentinel.connector)
+ self.assertEqual(mock_init_conn.return_value, res)
+ mock_init_conn.assert_called_once_with(mock.sentinel.volume,
+ mock.sentinel.connector)
+ mock_map.assert_not_called()
+
+ @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
+ @mock.patch('os.path.exists')
+ @mock.patch.object(nvmet.NVMET, '_get_connection_properties')
+ @mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection')
+ @mock.patch.object(nvmet.NVMET, '_map_volume')
+ def test_initialize_connection_shared(
+ self, mock_map, mock_init_conn, mock_get_conn_props, mock_exists,
+ mock_uuid):
+ """When sharing, the initialization maps the volume."""
+ self.mock_object(self.target, 'share_targets', True)
+ mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid)
+ vol = mock.Mock()
+ res = self.target.initialize_connection(vol, mock.sentinel.connector)
+
+ expected = {'driver_volume_type': 'nvmeof',
+ 'data': mock_get_conn_props.return_value}
+ self.assertEqual(expected, res)
+
+ mock_init_conn.assert_not_called()
+ mock_exists.assert_called_once_with(vol.provider_location)
+ mock_map.assert_called_once_with(vol,
+ vol.provider_location,
+ mock.sentinel.connector)
+ mock_uuid.assert_called_once_with(vol)
+ mock_get_conn_props.assert_called_once_with(
+ mock.sentinel.nqn,
+ self.target.target_ips,
self.target.target_port,
self.target.nvme_transport_type,
- self.target.nvmet_port_id,
- self.target.nvmet_ns_id,
- mock.sentinel.volume_path)
+ mock.sentinel.nsid,
+ mock_uuid.return_value)
+
+ @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
+ @mock.patch('os.path.exists', return_value=False)
+ @mock.patch.object(nvmet.NVMET, '_get_connection_properties')
+ @mock.patch.object(nvmet.nvmeof.NVMeOF, 'initialize_connection')
+ @mock.patch.object(nvmet.NVMET, '_map_volume')
+ def test_initialize_connection_shared_no_path(
+ self, mock_map, mock_init_conn, mock_get_conn_props, mock_exists,
+ mock_uuid):
+ """Fails if the provided path is not present in the system."""
+ self.mock_object(self.target, 'share_targets', True)
+ mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid)
+ vol = mock.Mock()
+ self.assertRaises(exception.InvalidConfigurationValue,
+ self.target.initialize_connection,
+ vol, mock.sentinel.connector)
+
+ mock_init_conn.assert_not_called()
+ mock_exists.assert_called_once_with(vol.provider_location)
+ mock_map.assert_not_called()
+ mock_uuid.assert_not_called()
+ mock_get_conn_props.assert_not_called()
@mock.patch.object(nvmet.NVMET, 'get_nvmeof_location')
- @mock.patch.object(nvmet.NVMET, '_ensure_port_exports')
- @mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists')
- @mock.patch.object(nvmet.NVMET, '_get_target_nqn')
- def test_create_nvmeof_target(self, mock_nqn, mock_subsys, mock_port,
- mock_location):
- """Normal create target execution."""
- mock_nqn.return_value = mock.sentinel.nqn
+ @mock.patch.object(nvmet.NVMET, '_map_volume')
+ def test_create_export(self, mock_map, mock_location):
+ """When not sharing, the export maps the volume."""
+ mock_map.return_value = (mock.sentinel.nqn, mock.sentinel.nsid)
- res = self.target.create_nvmeof_target(mock.sentinel.vol_id,
- mock.sentinel.target_prefix,
- mock.sentinel.target_ip,
- mock.sentinel.target_port,
- mock.sentinel.transport_type,
- mock.sentinel.port_id,
- mock.sentinel.ns_id,
- mock.sentinel.volume_path)
+ res = self.target.create_export(mock.sentinel.context,
+ mock.sentinel.vol,
+ mock.sentinel.volume_path)
self.assertEqual({'location': mock_location.return_value, 'auth': ''},
res)
- mock_nqn.assert_called_once_with(mock.sentinel.vol_id)
- mock_subsys.assert_called_once_with(mock.sentinel.nqn,
- mock.sentinel.ns_id,
- mock.sentinel.volume_path)
- mock_port.assert_called_once_with(mock.sentinel.nqn,
- mock.sentinel.target_ip,
- mock.sentinel.target_port,
- mock.sentinel.transport_type,
- mock.sentinel.port_id)
-
+ mock_map.assert_called_once_with(mock.sentinel.vol,
+ mock.sentinel.volume_path)
mock_location.assert_called_once_with(mock.sentinel.nqn,
- mock.sentinel.target_ip,
- mock.sentinel.target_port,
- mock.sentinel.transport_type,
- mock.sentinel.ns_id)
+ self.target.target_ips,
+ self.target.target_port,
+ self.target.nvme_transport_type,
+ mock.sentinel.nsid)
+
+ @mock.patch.object(nvmet.NVMET, 'get_nvmeof_location')
+ @mock.patch.object(nvmet.NVMET, '_map_volume')
+ def test_create_export_shared(self, mock_map, mock_location):
+ """When sharing, the export just stores the volume path as location."""
+ self.mock_object(self.target, 'share_targets', True)
+
+ res = self.target.create_export(mock.sentinel.context,
+ mock.sentinel.vol,
+ mock.sentinel.volume_path)
+
+ self.assertEqual({'location': mock.sentinel.volume_path, 'auth': ''},
+ res)
+ mock_map.assert_not_called()
+ mock_location.assert_not_called()
+
+ @mock.patch('oslo_concurrency.lockutils.lock')
+ @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
+ @mock.patch.object(nvmet.NVMET, '_ensure_port_exports')
+ @mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists')
+ @mock.patch.object(nvmet.NVMET, '_get_target_nqn')
+ def test__map_volume(self, mock_nqn, mock_subsys, mock_port, mock_uuid,
+ mock_lock):
+ """Normal volume mapping."""
+ vol = mock.Mock()
+ res = self.target._map_volume(vol, mock.sentinel.volume_path,
+ mock.sentinel.connector)
+
+ expected = (mock_nqn.return_value, mock_subsys.return_value)
+ self.assertEqual(res, expected)
+
+ mock_nqn.assert_called_once_with(vol.id, mock.sentinel.connector)
+ mock_uuid.assert_called_once_with(vol)
+ mock_subsys.assert_called_once_with(mock_nqn.return_value,
+ mock.sentinel.volume_path,
+ mock_uuid.return_value)
+ mock_port.assert_called_once_with(mock_nqn.return_value,
+ self.target.target_ips,
+ self.target.target_port,
+ self.target.nvme_transport_type,
+ self.target.nvmet_port_id)
+ mock_lock.assert_called()
@ddt.data((ValueError, None), (None, IndexError))
@ddt.unpack
- @mock.patch.object(nvmet.NVMET, 'get_nvmeof_location')
+ @mock.patch('oslo_concurrency.lockutils.lock')
+ @mock.patch.object(nvmet.NVMET, '_get_nvme_uuid')
@mock.patch.object(nvmet.NVMET, '_ensure_port_exports')
@mock.patch.object(nvmet.NVMET, '_ensure_subsystem_exists')
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
- def test_create_nvmeof_target_error(self, subsys_effect, port_effect,
- mock_nqn, mock_subsys, mock_port,
- mock_location):
+ def test__map_volume_error(self, subsys_effect, port_effect, mock_nqn,
+ mock_subsys, mock_port, mock_uuid, mock_lock):
"""Failing create target executing subsystem or port creation."""
mock_subsys.side_effect = subsys_effect
mock_port.side_effect = port_effect
mock_nqn.return_value = mock.sentinel.nqn
+ mock_uuid.return_value = mock.sentinel.uuid
+ vol = mock.Mock()
self.assertRaises(nvmet.NVMETTargetAddError,
- self.target.create_nvmeof_target,
- mock.sentinel.vol_id,
- mock.sentinel.target_prefix,
- mock.sentinel.target_ip,
- mock.sentinel.target_port,
- mock.sentinel.transport_type,
- mock.sentinel.port_id,
- mock.sentinel.ns_id,
- mock.sentinel.volume_path)
-
- mock_nqn.assert_called_once_with(mock.sentinel.vol_id)
+ self.target._map_volume,
+ vol,
+ mock.sentinel.volume_path,
+ mock.sentinel.connector)
+
+ mock_nqn.assert_called_once_with(vol.id, mock.sentinel.connector)
+ mock_uuid.assert_called_once_with(vol)
mock_subsys.assert_called_once_with(mock.sentinel.nqn,
- mock.sentinel.ns_id,
- mock.sentinel.volume_path)
+ mock.sentinel.volume_path,
+ mock.sentinel.uuid)
if subsys_effect:
mock_port.assert_not_called()
else:
mock_port.assert_called_once_with(mock.sentinel.nqn,
- mock.sentinel.target_ip,
- mock.sentinel.target_port,
- mock.sentinel.transport_type,
- mock.sentinel.port_id)
- mock_location.assert_not_called()
+ self.target.target_ips,
+ self.target.target_port,
+ self.target.nvme_transport_type,
+ self.target.nvmet_port_id)
+ mock_lock.assert_called()
+ @mock.patch.object(nvmet.NVMET, '_ensure_namespace_exists')
@mock.patch.object(priv_nvmet, 'Subsystem')
- def test__ensure_subsystem_exists_already_exists(self, mock_subsys):
+ def test__ensure_subsystem_exists_already_exists(self, mock_subsys,
+ mock_namespace):
"""Skip subsystem creation if already exists."""
nqn = 'nqn.nvme-subsystem-1-uuid'
- self.target._ensure_subsystem_exists(nqn, mock.sentinel.ns_id,
- mock.sentinel.vol_path)
+ res = self.target._ensure_subsystem_exists(nqn,
+ mock.sentinel.vol_path,
+ mock.sentinel.uuid)
+ self.assertEqual(mock_namespace.return_value, res)
mock_subsys.assert_called_once_with(nqn)
mock_subsys.setup.assert_not_called()
+ mock_namespace.assert_called_once_with(mock_subsys.return_value,
+ mock.sentinel.vol_path,
+ mock.sentinel.uuid)
+ @mock.patch.object(nvmet.NVMET, '_ensure_namespace_exists')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
@mock.patch.object(priv_nvmet, 'Subsystem')
- def test__ensure_subsystem_exists(self, mock_subsys, mock_uuid):
+ def test__ensure_subsystem_exists(self, mock_subsys, mock_uuid,
+ mock_namespace):
"""Create subsystem when it doesn't exist."""
mock_subsys.side_effect = priv_nvmet.NotFound
mock_uuid.return_value = 'uuid'
nqn = 'nqn.nvme-subsystem-1-uuid'
- self.target._ensure_subsystem_exists(nqn, mock.sentinel.ns_id,
- mock.sentinel.vol_path)
+ self.target._ensure_subsystem_exists(nqn,
+ mock.sentinel.vol_path,
+ mock.sentinel.uuid)
mock_subsys.assert_called_once_with(nqn)
expected_section = {
'allowed_hosts': [],
'attr': {'allow_any_host': '1'},
'namespaces': [{'device': {'nguid': 'uuid',
+ 'uuid': mock.sentinel.uuid,
'path': mock.sentinel.vol_path},
'enable': 1,
- 'nsid': mock.sentinel.ns_id}],
+ 'nsid': self.target.nvmet_ns_id}],
'nqn': nqn
}
mock_subsys.setup.assert_called_once_with(expected_section)
+ mock_namespace.assert_not_called()
+
+ @mock.patch('oslo_utils.uuidutils.generate_uuid')
+ def test__namespace_dict(self, mock_uuid):
+ """For not shared nguid is randomly generated."""
+ res = self.target._namespace_dict(mock.sentinel.uuid,
+ mock.sentinel.volume_path,
+ mock.sentinel.ns_id)
+ expected = {"device": {"nguid": str(mock_uuid.return_value),
+ "uuid": mock.sentinel.uuid,
+ "path": mock.sentinel.volume_path},
+ "enable": 1,
+ "nsid": mock.sentinel.ns_id}
+ self.assertEqual(expected, res)
+ mock_uuid.assert_called_once()
+
+ @mock.patch('oslo_utils.uuidutils.generate_uuid')
+ def test__namespace_dict_shared(self, mock_uuid):
+ """For shared uuid = nguid."""
+ self.mock_object(self.target, 'share_targets', True)
+ res = self.target._namespace_dict(mock.sentinel.uuid,
+ mock.sentinel.volume_path,
+ mock.sentinel.ns_id)
+ expected = {"device": {"nguid": mock.sentinel.uuid,
+ "uuid": mock.sentinel.uuid,
+ "path": mock.sentinel.volume_path},
+ "enable": 1,
+ "nsid": mock.sentinel.ns_id}
+ self.assertEqual(expected, res)
+ mock_uuid.assert_not_called
+
+ def test__ensure_namespace_exist_exists(self):
+ """Nothing to do if the namespace is already mapped."""
+ base_path = '/dev/stack-volumes-lvmdriver-1/volume-'
+ volume_path = f'{base_path}uuid2'
+ subsys = mock.Mock()
+ ns_other = mock.Mock(**{'get_attr.return_value': f'{base_path}uuid1'})
+ ns_found = mock.Mock(**{'get_attr.return_value': volume_path})
+ # nw_other appears twice to confirm we stop when found
+ subsys.namespaces = [ns_other, ns_found, ns_other]
+ res = self.target._ensure_namespace_exists(subsys, volume_path,
+ mock.sentinel.uuid)
+ self.assertEqual(ns_found.nsid, res)
+ ns_other.get_attr.assert_called_once_with('device', 'path')
+ ns_found.get_attr.assert_called_once_with('device', 'path')
+
+ @mock.patch.object(priv_nvmet, 'Namespace')
+ @mock.patch.object(nvmet.NVMET, '_namespace_dict')
+ @mock.patch.object(nvmet.NVMET, '_get_available_namespace_id')
+ def test__ensure_namespace_exist_create(self, mock_get_nsid, mock_ns_dict,
+ mock_ns):
+ """Create the namespace when the path is not mapped yet."""
+ base_path = '/dev/stack-volumes-lvmdriver-1/volume-'
+ subsys = mock.Mock()
+ ns_other = mock.Mock(**{'get_attr.return_value': f'{base_path}uuid1'})
+ subsys.namespaces = [ns_other]
+ res = self.target._ensure_namespace_exists(subsys,
+ mock.sentinel.volume_path,
+ mock.sentinel.uuid)
+ self.assertEqual(mock_get_nsid.return_value, res)
+ ns_other.get_attr.assert_called_once_with('device', 'path')
+ mock_get_nsid.assert_called_once_with(subsys)
+ mock_ns_dict.assert_called_once_with(mock.sentinel.uuid,
+ mock.sentinel.volume_path,
+ mock_get_nsid.return_value)
+ mock_ns.setup.assert_called_once_with(subsys,
+ mock_ns_dict.return_value)
+
+ def test__get_available_namespace_id(self):
+ """For non shared we always return the value from the config."""
+ res = self.target._get_available_namespace_id(mock.Mock())
+ self.assertEqual(self.target.nvmet_ns_id, res)
+
+ def test__get_available_namespace_id_none_used(self):
+ """For shared, on empty subsystem return the configured value."""
+ self.mock_object(self.target, 'share_targets', True)
+ subsys = mock.Mock(namespaces=[])
+ res = self.target._get_available_namespace_id(subsys)
+ self.assertEqual(self.target.nvmet_ns_id, res)
+
+ def test__get_available_namespace_id_no_gaps(self):
+ """For shared, if there are no gaps in ids return next."""
+ self.mock_object(self.target, 'share_targets', True)
+ expected = self.target.nvmet_ns_id + 2
+ subsys = mock.Mock(namespaces=[mock.Mock(nsid=expected - 1),
+ mock.Mock(nsid=expected - 2)])
+ res = self.target._get_available_namespace_id(subsys)
+ self.assertEqual(expected, res)
+
+ def test__get_available_namespace_id_gap_value(self):
+ """For shared, if there is a gap any of them is valid."""
+ self.mock_object(self.target, 'share_targets', True)
+ lower = self.target.nvmet_ns_id
+ subsys = mock.Mock(namespaces=[mock.Mock(nsid=lower + 3),
+ mock.Mock(nsid=lower)])
+ res = self.target._get_available_namespace_id(subsys)
+ self.assertTrue(res in [lower + 2, lower + 1])
@mock.patch.object(priv_nvmet, 'Port')
def test__ensure_port_exports_already_does(self, mock_port):
"""Skips port creation and subsystem export since they both exist."""
nqn = 'nqn.nvme-subsystem-1-uuid'
+ port_id = 1
mock_port.return_value.subsystems = [nqn]
self.target._ensure_port_exports(nqn,
- mock.sentinel.addr,
+ [mock.sentinel.addr],
mock.sentinel.port,
mock.sentinel.transport,
- mock.sentinel.port_id)
- mock_port.assert_called_once_with(mock.sentinel.port_id)
+ port_id)
+ mock_port.assert_called_once_with(port_id)
mock_port.setup.assert_not_called()
mock_port.return_value.add_subsystem.assert_not_called()
@@ -174,13 +360,14 @@ class TestNVMETDriver(tf.TargetDriverFixture):
def test__ensure_port_exports_port_exists_not_exported(self, mock_port):
"""Skips port creation if exists but exports subsystem."""
nqn = 'nqn.nvme-subsystem-1-vol-2-uuid'
+ port_id = 1
mock_port.return_value.subsystems = ['nqn.nvme-subsystem-1-vol-1-uuid']
self.target._ensure_port_exports(nqn,
- mock.sentinel.addr,
+ [mock.sentinel.addr],
mock.sentinel.port,
mock.sentinel.transport,
- mock.sentinel.port_id)
- mock_port.assert_called_once_with(mock.sentinel.port_id)
+ port_id)
+ mock_port.assert_called_once_with(port_id)
mock_port.setup.assert_not_called()
mock_port.return_value.add_subsystem.assert_called_once_with(nqn)
@@ -188,80 +375,274 @@ class TestNVMETDriver(tf.TargetDriverFixture):
def test__ensure_port_exports_port(self, mock_port):
"""Creates the port and export the subsystem when they don't exist."""
nqn = 'nqn.nvme-subsystem-1-vol-2-uuid'
+ port_id = 1
mock_port.side_effect = priv_nvmet.NotFound
self.target._ensure_port_exports(nqn,
- mock.sentinel.addr,
+ [mock.sentinel.addr,
+ mock.sentinel.addr2],
mock.sentinel.port,
mock.sentinel.transport,
- mock.sentinel.port_id)
- mock_port.assert_called_once_with(mock.sentinel.port_id)
- new_port = {'addr': {'adrfam': 'ipv4',
- 'traddr': mock.sentinel.addr,
- 'treq': 'not specified',
- 'trsvcid': mock.sentinel.port,
- 'trtype': mock.sentinel.transport},
- 'portid': mock.sentinel.port_id,
- 'referrals': [],
- 'subsystems': [nqn]}
- mock_port.setup.assert_called_once_with(self.target._nvmet_root,
- new_port)
+ port_id)
+ new_port1 = {'addr': {'adrfam': 'ipv4',
+ 'traddr': mock.sentinel.addr,
+ 'treq': 'not specified',
+ 'trsvcid': mock.sentinel.port,
+ 'trtype': mock.sentinel.transport},
+ 'portid': port_id,
+ 'referrals': [],
+ 'subsystems': [nqn]}
+ new_port2 = new_port1.copy()
+ new_port2['portid'] = port_id + 1
+ new_port2['addr'] = new_port1['addr'].copy()
+ new_port2['addr']['traddr'] = mock.sentinel.addr2
+
+ self.assertEqual(2, mock_port.call_count)
+ self.assertEqual(2, mock_port.setup.call_count)
+ mock_port.assert_has_calls([
+ mock.call(port_id),
+ mock.call.setup(self.target._nvmet_root, new_port1),
+ mock.call(port_id + 1),
+ mock.call.setup(self.target._nvmet_root, new_port2)
+ ])
mock_port.return_value.assert_not_called()
- @mock.patch.object(nvmet.NVMET, 'delete_nvmeof_target')
- def test_remove_export(self, mock_delete_target):
- """Test that the nvmeof class calls the nvmet method."""
- res = self.target.remove_export(mock.sentinel.ctxt,
- mock.sentinel.volume)
- self.assertEqual(mock_delete_target.return_value, res)
- mock_delete_target.assert_called_once_with(mock.sentinel.volume)
+ @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
+ def test_terminate_connection(self, mock_unmap):
+ """For non shared there's nothing to do."""
+ self.target.terminate_connection(mock.sentinel.vol,
+ mock.sentinel.connector)
+ mock_unmap.assert_not_called()
- @mock.patch.object(priv_nvmet, 'Subsystem')
+ @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
+ def test_terminate_connection_shared(self, mock_unmap):
+ """For shared the volume must be unmapped."""
+ self.mock_object(self.target, 'share_targets', True)
+ vol = mock.Mock()
+ self.target.terminate_connection(vol,
+ mock.sentinel.connector)
+ mock_unmap.assert_called_once_with(vol,
+ mock.sentinel.connector)
+
+ @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
+ def test_remove_export(self, mock_unmap):
+ """For non shared the volume must be unmapped."""
+ vol = mock.Mock()
+ self.target.remove_export(mock.sentinel.context,
+ vol)
+ mock_unmap.assert_called_once_with(vol)
+
+ @mock.patch.object(nvmet.NVMET, '_locked_unmap_volume')
+ def test_remove_export_shared(self, mock_unmap):
+ """For shared there's nothing to do."""
+ self.mock_object(self.target, 'share_targets', True)
+ self.target.remove_export(mock.sentinel.context,
+ mock.sentinel.vol)
+ mock_unmap.assert_not_called()
+
+ @mock.patch('oslo_concurrency.lockutils.lock')
+ @mock.patch.object(nvmet.NVMET, '_get_nqns_for_location', return_value=[])
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
- def test_delete_nvmeof_target_nothing_present(self, mock_nqn, mock_subsys):
- """Delete doesn't do anything because there is nothing to do."""
- mock_nqn.return_value = mock.sentinel.nqn
+ @mock.patch.object(nvmet.NVMET, '_unmap_volume')
+ def test__locked_unmap_volume_no_nqn(self, mock_unmap, mock_nqn, mock_nqns,
+ mock_lock):
+ """Nothing to do with no subsystem when sharing and no connector."""
+ self.mock_object(self.target, 'share_targets', True)
+
+ vol = mock.Mock()
+ self.target._locked_unmap_volume(vol, connector=None)
+
+ mock_lock.assert_called()
+ mock_nqn.assert_not_called()
+ mock_nqns.assert_called_once_with(vol.provider_location)
+ mock_unmap.assert_not_called()
+
+ @mock.patch('oslo_concurrency.lockutils.lock')
+ @mock.patch.object(nvmet.NVMET, '_get_nqns_for_location')
+ @mock.patch.object(nvmet.NVMET, '_get_target_nqn')
+ @mock.patch.object(nvmet.NVMET, '_unmap_volume')
+ def test__locked_unmap_volume_non_shared(self, mock_unmap, mock_nqn,
+ mock_nqns, mock_lock):
+ """Unmap locked with non sharing and no connector."""
+ vol = mock.Mock()
+ self.target._locked_unmap_volume(vol, connector=None)
+
+ mock_lock.assert_called()
+ mock_nqn.assert_called_once_with(vol.id, None)
+ mock_nqns.assert_not_called()
+ mock_unmap.assert_called_once_with(vol, mock_nqn.return_value)
+
+ @mock.patch('oslo_concurrency.lockutils.lock')
+ @mock.patch.object(nvmet.NVMET, '_get_nqns_for_location')
+ @mock.patch.object(nvmet.NVMET, '_get_target_nqn')
+ @mock.patch.object(nvmet.NVMET, '_unmap_volume')
+ def test__locked_unmap_volume_shared_multiple(self, mock_unmap, mock_nqn,
+ mock_nqns, mock_lock):
+ """Unmap locked with sharing and no connector, having multiple nqns."""
+ self.mock_object(self.target, 'share_targets', True)
+ vol = mock.Mock()
+ mock_nqns.return_value = [mock.sentinel.nqn1, mock.sentinel.nqn2]
+
+ self.target._locked_unmap_volume(vol, connector=None)
+
+ mock_lock.assert_called()
+ mock_nqn.assert_not_called()
+ mock_nqns.assert_called_once_with(vol.provider_location)
+
+ expected = [mock.call(vol, mock.sentinel.nqn1),
+ mock.call(vol, mock.sentinel.nqn2)]
+ mock_unmap.assert_has_calls(expected)
+ self.assertEqual(2, mock_unmap.call_count)
+
+ @mock.patch.object(nvmet.NVMET, '_get_target_nqn')
+ @mock.patch.object(priv_nvmet, 'Subsystem')
+ def test__unmap_volume_no_subsys(self, mock_subsys, mock_nqn):
+ """Nothing to do it there is no subsystem."""
mock_subsys.side_effect = priv_nvmet.NotFound
+ vol = mock.Mock()
+ # This port is used just to confirm we don't reach that part
+ port = mock.Mock(subsystems=[mock.sentinel.port])
+ self.mock_object(priv_nvmet.Root, 'ports', [port])
+
+ self.target._unmap_volume(vol, mock.sentinel.nqn)
+ mock_subsys.assert_called_once_with(mock.sentinel.nqn)
+
+ port.remove_subsystem.assert_not_called()
+
+ @mock.patch.object(priv_nvmet, 'Subsystem')
+ def test__unmap_volume_not_shared(self, mock_subsys):
+ """Non shared assumes the subsystem is empty."""
+ vol = mock.Mock()
+ # The ns is used to confirm we don't check it
+ ns = mock.Mock(**{'get_attr.return_value': vol.provider_location})
+ subsys = mock_subsys.return_value
+ subsys.nqn = mock.sentinel.nqn
+ subsys.namespaces = [ns]
+
+ port = mock.Mock(subsystems=[subsys.nqn])
+ self.mock_object(priv_nvmet.Root, 'ports', [port])
+
+ self.target._unmap_volume(vol, mock.sentinel.nqn)
- port1 = mock.Mock(subsystems=[])
- port2 = mock.Mock(subsystems=['subs1'])
- self.mock_object(priv_nvmet.Root, 'ports', [port1, port2])
+ mock_subsys.assert_called_once_with(mock.sentinel.nqn)
- volume = mock.Mock(id='vol-uuid')
- self.target.delete_nvmeof_target(volume)
+ ns.get_attr.assert_not_called()
+ ns.delete.assert_not_called()
- mock_nqn.assert_called_once_with(volume.id)
- port1.remove_subsystem.assert_not_called()
- port2.remove_subsystem.assert_not_called()
- mock_subsys.delete.assert_not_called()
+ port.remove_subsystem.assert_called_once_with(mock.sentinel.nqn)
+ subsys.delete.assert_called_once_with()
@mock.patch.object(priv_nvmet, 'Subsystem')
+ def test__unmap_volume_shared_more_ns(self, mock_subsys):
+ """For shared don't unexport subsys if there are other ns."""
+ self.mock_object(self.target, 'share_targets', True)
+ vol = mock.Mock()
+
+ ns = mock.Mock(**{'get_attr.return_value': vol.provider_location})
+ subsys = mock_subsys.return_value
+ subsys.namespaces = [ns]
+
+ # Use this port to confirm we don't reach that point
+ port = mock.Mock(subsystems=[subsys])
+ self.mock_object(priv_nvmet.Root, 'ports', [port])
+
+ self.target._unmap_volume(vol, mock.sentinel.nqn)
+
+ mock_subsys.assert_called_once_with(mock.sentinel.nqn)
+
+ ns.get_attr.assert_called_once_with('device', 'path')
+ ns.delete.assert_called_once_with()
+
+ port.remove_subsystem.assert_not_called()
+ mock_subsys.return_value.delete.assert_not_called()
+
+ @mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch.object(nvmet.NVMET, '_get_target_nqn')
- def test_delete_nvmeof_target(self, mock_nqn, mock_subsys):
- """Delete removes subsystems from port and the subsystem."""
- mock_nqn.return_value = mock.sentinel.nqn
+ @mock.patch.object(priv_nvmet, 'Subsystem')
+ def test__unmap_volume_shared_last_ns(self, mock_subsys, mock_nqn,
+ mock_lock):
+ """For shared unexport subsys if there are no other ns."""
+ self.mock_object(self.target, 'share_targets', True)
+ vol = mock.Mock()
+
+ ns = mock.Mock(**{'get_attr.return_value': vol.provider_location})
+ nss = [ns]
+ ns.delete.side_effect = nss.clear
+ subsys = mock_subsys.return_value
+ subsys.nqn = mock.sentinel.nqn
+ subsys.namespaces = nss
- port1 = mock.Mock(subsystems=[])
- port2 = mock.Mock(subsystems=[mock.sentinel.nqn])
- port3 = mock.Mock(subsystems=['subs1'])
- self.mock_object(priv_nvmet.Root, 'ports', [port1, port2, port3])
+ port = mock.Mock(subsystems=[subsys.nqn])
+ self.mock_object(priv_nvmet.Root, 'ports', [port])
- volume = mock.Mock(id='vol-uuid')
- self.target.delete_nvmeof_target(volume)
+ self.target._unmap_volume(vol, mock.sentinel.nqn)
- mock_nqn.assert_called_once_with(volume.id)
- port1.remove_subsystem.assert_not_called()
- port2.remove_subsystem.assert_called_once_with(mock.sentinel.nqn)
- port3.remove_subsystem.assert_not_called()
mock_subsys.assert_called_once_with(mock.sentinel.nqn)
- mock_subsys.return_value.delete.assert_called_once_with()
- @mock.patch.object(priv_nvmet, 'Root')
- def test__get_available_nvmf_subsystems(self, mock_root):
- res = self.target._get_available_nvmf_subsystems()
- mock_dump = mock_root.return_value.dump
- self.assertEqual(mock_dump.return_value, res)
- mock_dump.assert_called_once_with()
+ ns.get_attr.assert_called_once_with('device', 'path')
+ ns.delete.assert_called_once_with()
+
+ port.remove_subsystem.assert_called_once_with(mock.sentinel.nqn)
+ mock_subsys.return_value.delete.assert_called_once_with()
def test__get_target_nqn(self):
- res = self.target._get_target_nqn('volume_id')
+ """Non shared uses volume id for subsystem name."""
+ res = self.target._get_target_nqn('volume_id', None)
self.assertEqual('nqn.nvme-subsystem-1-volume_id', res)
+
+ def test__get_target_nqn_shared(self):
+ """Shared uses connector's hostname for subsystem name."""
+ self.mock_object(self.target, 'share_targets', True)
+ res = self.target._get_target_nqn('volume_id', {'host': 'localhost'})
+ self.assertEqual('nqn.nvme-subsystem-1-localhost', res)
+
+ def test__get_nvme_uuid(self):
+ vol = mock.Mock()
+ res = self.target._get_nvme_uuid(vol)
+ self.assertEqual(vol.name_id, res)
+
+ def test__get_nqns_for_location_no_subsystems(self):
+ self.mock_object(self.target._nvmet_root, 'subsystems', iter([]))
+ res = self.target._get_nqns_for_location(mock.sentinel.location)
+ self.assertListEqual([], res)
+
+ def test__get_nqns_for_location_no_subsystems_found(self):
+ ns1 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1})
+ subsys1 = mock.Mock(namespaces=iter([ns1]))
+
+ ns2 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location2})
+ subsys2 = mock.Mock(namespaces=iter([ns2]))
+
+ subsys = iter([subsys1, subsys2])
+ self.mock_object(self.target._nvmet_root, 'subsystems', subsys)
+
+ res = self.target._get_nqns_for_location(mock.sentinel.location3)
+
+ self.assertListEqual([], res)
+ ns1.get_attr.assert_called_once_with('device', 'path')
+ ns2.get_attr.assert_called_once_with('device', 'path')
+
+ def test__get_nqns_for_location_subsystems_found(self):
+ ns1 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1})
+ subsys1 = mock.Mock(namespaces=iter([ns1]))
+
+ ns2 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location2})
+ ns1b = mock.Mock(**{'get_attr.return_value': mock.sentinel.location1})
+ ns3 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location3})
+ subsys2 = mock.Mock(namespaces=iter([ns2, ns1b, ns3]))
+
+ ns4 = mock.Mock(**{'get_attr.return_value': mock.sentinel.location4})
+ subsys3 = mock.Mock(namespaces=iter([ns4]))
+
+ subsys4 = mock.Mock(namespaces=iter([]))
+
+ subsys = iter([subsys1, subsys2, subsys3, subsys4])
+ self.mock_object(self.target._nvmet_root, 'subsystems', subsys)
+
+ res = self.target._get_nqns_for_location(mock.sentinel.location1)
+
+ self.assertListEqual([subsys1.nqn, subsys2.nqn, subsys4.nqn], res)
+ ns1.get_attr.assert_called_once_with('device', 'path')
+ ns2.get_attr.assert_called_once_with('device', 'path')
+ ns1b.get_attr.assert_called_once_with('device', 'path')
+ ns3.get_attr.assert_not_called()
+ ns4.get_attr.assert_called_once_with('device', 'path')
diff --git a/cinder/tests/unit/targets/test_spdknvmf.py b/cinder/tests/unit/targets/test_spdknvmf.py
index e99923991..d64d52759 100644
--- a/cinder/tests/unit/targets/test_spdknvmf.py
+++ b/cinder/tests/unit/targets/test_spdknvmf.py
@@ -349,6 +349,7 @@ class SpdkNvmfDriverTestCase(test.TestCase):
super(SpdkNvmfDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.target_ip_address = '192.168.0.1'
+ self.configuration.target_secondary_ip_addresses = []
self.configuration.target_port = '4420'
self.configuration.target_prefix = ""
self.configuration.nvmet_port_id = "1"
diff --git a/cinder/tests/unit/test_utils.py b/cinder/tests/unit/test_utils.py
index 6676f2181..29dbbd848 100644
--- a/cinder/tests/unit/test_utils.py
+++ b/cinder/tests/unit/test_utils.py
@@ -62,14 +62,14 @@ class ExecuteTestCase(test.TestCase):
run_as_root=True,
root_helper=mock_helper)
- @mock.patch('cinder.utils.get_root_helper')
- @mock.patch('cinder.utils.processutils.execute')
+ @mock.patch('cinder.utils.get_root_helper', autospec=True)
+ @mock.patch('cinder.utils.processutils.execute', autospec=True)
def test_execute_root_and_helper(self, mock_putils_exe, mock_get_helper):
- mock_helper = mock.Mock()
+ mock_helper = mock.sentinel
output = utils.execute('a', 1, foo='bar', run_as_root=True,
root_helper=mock_helper)
self.assertEqual(mock_putils_exe.return_value, output)
- self.assertFalse(mock_get_helper.called)
+ mock_get_helper.assert_not_called()
mock_putils_exe.assert_called_once_with('a', 1, foo='bar',
run_as_root=True,
root_helper=mock_helper)
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py
index ab83a9883..544676539 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_masking.py
@@ -165,11 +165,18 @@ class PowerMaxMaskingTest(test.TestCase):
rest.PowerMaxRest, 'get_num_vols_in_sg', side_effect=[2, 1, 1])
def test_move_volume_between_storage_groups(
self, mock_num, mock_parent, mock_rm, mck_mod):
+ extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123',
+ 'slo': 'Diamond',
+ 'workload': 'DSS',
+ 'srp': 'SRP_1',
+ 'array': '000197800123',
+ 'interval': 3,
+ 'retries': 120}
for x in range(0, 3):
self.driver.masking.move_volume_between_storage_groups(
self.data.array, self.data.device_id,
self.data.storagegroup_name_i, self.data.storagegroup_name_f,
- self.data.extra_specs)
+ extra_specs)
mock_rm.assert_called_once()
ref_payload = (
{"executionOption": "ASYNCHRONOUS",
diff --git a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py
index 8de2dd2be..d3c1a2ae8 100644
--- a/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py
+++ b/cinder/tests/unit/volume/drivers/dell_emc/powermax/test_powermax_rest.py
@@ -1285,9 +1285,8 @@ class PowerMaxRestTest(test.TestCase):
target_id = self.data.volume_snap_vx[
'snapshotSrcs'][0]['linkedDevices'][0]['targetDevice']
snap_name = self.data.volume_snap_vx['snapshotSrcs'][0]['snapshotName']
- extra_specs = self.data.extra_specs
- if extra_specs.get(utils.FORCE_VOL_EDIT):
- del extra_specs[utils.FORCE_VOL_EDIT]
+ extra_specs = deepcopy(self.data.extra_specs)
+ extra_specs.pop(utils.FORCE_VOL_EDIT, None)
payload = {'deviceNameListSource': [{'name': source_id}],
'deviceNameListTarget': [
{'name': target_id}],
@@ -2554,3 +2553,28 @@ class PowerMaxRestTest(test.TestCase):
self.data.array, self.data.device_id,
self.data.test_snapshot_snap_name)
self.assertEqual('0', snap_id)
+
+ def test_check_force(self):
+ extra_specs = {'pool_name': 'Diamond+DSS+SRP_1+000197800123',
+ 'slo': 'Diamond',
+ 'srp': 'SRP_1',
+ 'array': '000123456789',
+ 'interval': 3,
+ 'retries': 120}
+ self.assertEqual(
+ 'false', self.rest._check_force(extra_specs))
+ self.assertEqual(
+ 'false', self.rest._check_force(
+ extra_specs, force_flag=False))
+ self.assertEqual(
+ 'true', self.rest._check_force(
+ extra_specs, force_flag=True))
+ extra_specs[utils.FORCE_VOL_EDIT] = True
+ self.assertEqual(
+ 'true', self.rest._check_force(extra_specs))
+ self.assertEqual(
+ 'true', self.rest._check_force(
+ extra_specs, force_flag=False))
+ self.assertEqual(
+ 'true', self.rest._check_force(
+ extra_specs, force_flag=True))
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py
new file mode 100644
index 000000000..e65c2fde7
--- /dev/null
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_mirror_fc.py
@@ -0,0 +1,1561 @@
+# Copyright (C) 2022, 2023, Hitachi, Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+"""Unit tests for Hitachi HBSD Driver."""
+
+import json
+from unittest import mock
+
+from oslo_config import cfg
+import requests
+
+from cinder import context as cinder_context
+from cinder.db.sqlalchemy import api as sqlalchemy_api
+from cinder import exception
+from cinder.objects import group_snapshot as obj_group_snap
+from cinder.objects import snapshot as obj_snap
+from cinder.tests.unit import fake_group
+from cinder.tests.unit import fake_group_snapshot
+from cinder.tests.unit import fake_snapshot
+from cinder.tests.unit import fake_volume
+from cinder.tests.unit import test
+from cinder.volume import configuration as conf
+from cinder.volume import driver
+from cinder.volume.drivers.hitachi import hbsd_common
+from cinder.volume.drivers.hitachi import hbsd_fc
+from cinder.volume.drivers.hitachi import hbsd_rest
+from cinder.volume.drivers.hitachi import hbsd_rest_api
+from cinder.volume.drivers.hitachi import hbsd_utils
+from cinder.volume import volume_types
+from cinder.volume import volume_utils
+from cinder.zonemanager import utils as fczm_utils
+
+# Configuration parameter values
+CONFIG_MAP = {
+ 'serial': '886000123456',
+ 'my_ip': '127.0.0.1',
+ 'rest_server_ip_addr': '172.16.18.108',
+ 'rest_server_ip_port': '23451',
+ 'port_id': 'CL1-A',
+ 'host_grp_name': 'HBSD-0123456789abcdef',
+ 'host_mode': 'LINUX/IRIX',
+ 'host_wwn': '0123456789abcdef',
+ 'target_wwn': '1111111123456789',
+ 'user_id': 'user',
+ 'user_pass': 'password',
+ 'pool_name': 'test_pool',
+ 'auth_user': 'auth_user',
+ 'auth_password': 'auth_password',
+}
+
+REMOTE_CONFIG_MAP = {
+ 'serial': '886000456789',
+ 'my_ip': '127.0.0.1',
+ 'rest_server_ip_addr': '172.16.18.107',
+ 'rest_server_ip_port': '334',
+ 'port_id': 'CL2-B',
+ 'host_grp_name': 'HBSD-0123456789abcdef',
+ 'host_mode': 'LINUX/IRIX',
+ 'host_wwn': '0123456789abcdef',
+ 'target_wwn': '2222222234567891',
+ 'user_id': 'remote-user',
+ 'user_pass': 'remote-password',
+ 'pool_name': 'remote_pool',
+ 'auth_user': 'remote_user',
+ 'auth_password': 'remote_password',
+}
+
+# Dummy response for FC zoning device mapping
+DEVICE_MAP = {
+ 'fabric_name': {
+ 'initiator_port_wwn_list': [CONFIG_MAP['host_wwn']],
+ 'target_port_wwn_list': [CONFIG_MAP['target_wwn']]}}
+
+REMOTE_DEVICE_MAP = {
+ 'fabric_name': {
+ 'initiator_port_wwn_list': [REMOTE_CONFIG_MAP['host_wwn']],
+ 'target_port_wwn_list': [REMOTE_CONFIG_MAP['target_wwn']]}}
+
+DEFAULT_CONNECTOR = {
+ 'host': 'host',
+ 'ip': CONFIG_MAP['my_ip'],
+ 'wwpns': [CONFIG_MAP['host_wwn']],
+ 'multipath': False,
+}
+
+REMOTE_DEFAULT_CONNECTOR = {
+ 'host': 'host',
+ 'ip': REMOTE_CONFIG_MAP['my_ip'],
+ 'wwpns': [REMOTE_CONFIG_MAP['host_wwn']],
+ 'multipath': False,
+}
+
+CTXT = cinder_context.get_admin_context()
+
+TEST_VOLUME = []
+for i in range(7):
+ volume = {}
+ volume['id'] = '00000000-0000-0000-0000-{0:012d}'.format(i)
+ volume['name'] = 'test-volume{0:d}'.format(i)
+ volume['volume_type_id'] = '00000000-0000-0000-0000-{0:012d}'.format(i)
+ if i == 3:
+ volume['provider_location'] = None
+ elif i == 4:
+ volume['provider_location'] = json.dumps(
+ {'pldev': 4, 'sldev': 4,
+ 'remote-copy': hbsd_utils.MIRROR_ATTR})
+ elif i == 5:
+ volume['provider_location'] = json.dumps(
+ {'pldev': 5, 'sldev': 5,
+ 'remote-copy': hbsd_utils.MIRROR_ATTR})
+ elif i == 6:
+ volume['provider_location'] = json.dumps(
+ {'pldev': 6, 'sldev': 6,
+ 'remote-copy': hbsd_utils.MIRROR_ATTR})
+ else:
+ volume['provider_location'] = '{0:d}'.format(i)
+ volume['size'] = 128
+ if i == 2 or i == 6:
+ volume['status'] = 'in-use'
+ else:
+ volume['status'] = 'available'
+ volume = fake_volume.fake_volume_obj(CTXT, **volume)
+ volume.volume_type = fake_volume.fake_volume_type_obj(CTXT)
+ TEST_VOLUME.append(volume)
+
+
+def _volume_get(context, volume_id):
+ """Return predefined volume info."""
+ return TEST_VOLUME[int(volume_id.replace("-", ""))]
+
+
+TEST_SNAPSHOT = []
+snapshot = {}
+snapshot['id'] = '10000000-0000-0000-0000-{0:012d}'.format(0)
+snapshot['name'] = 'TEST_SNAPSHOT{0:d}'.format(0)
+snapshot['provider_location'] = '{0:d}'.format(1)
+snapshot['status'] = 'available'
+snapshot['volume_id'] = '00000000-0000-0000-0000-{0:012d}'.format(0)
+snapshot['volume'] = _volume_get(None, snapshot['volume_id'])
+snapshot['volume_name'] = 'test-volume{0:d}'.format(0)
+snapshot['volume_size'] = 128
+snapshot = obj_snap.Snapshot._from_db_object(
+ CTXT, obj_snap.Snapshot(),
+ fake_snapshot.fake_db_snapshot(**snapshot))
+TEST_SNAPSHOT.append(snapshot)
+
+TEST_GROUP = []
+for i in range(2):
+ group = {}
+ group['id'] = '20000000-0000-0000-0000-{0:012d}'.format(i)
+ group['status'] = 'available'
+ group = fake_group.fake_group_obj(CTXT, **group)
+ TEST_GROUP.append(group)
+
+TEST_GROUP_SNAP = []
+group_snapshot = {}
+group_snapshot['id'] = '30000000-0000-0000-0000-{0:012d}'.format(0)
+group_snapshot['status'] = 'available'
+group_snapshot = obj_group_snap.GroupSnapshot._from_db_object(
+ CTXT, obj_group_snap.GroupSnapshot(),
+ fake_group_snapshot.fake_db_group_snapshot(**group_snapshot))
+TEST_GROUP_SNAP.append(group_snapshot)
+
+# Dummy response for REST API
+POST_SESSIONS_RESULT = {
+ "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d3",
+ "sessionId": 0,
+}
+
+REMOTE_POST_SESSIONS_RESULT = {
+ "token": "b74777a3-f9f0-4ea8-bd8f-09847fac48d4",
+ "sessionId": 0,
+}
+
+GET_PORTS_RESULT = {
+ "data": [
+ {
+ "portId": CONFIG_MAP['port_id'],
+ "portType": "FIBRE",
+ "portAttributes": [
+ "TAR",
+ "MCU",
+ "RCU",
+ "ELUN"
+ ],
+ "fabricMode": True,
+ "portConnection": "PtoP",
+ "lunSecuritySetting": True,
+ "wwn": CONFIG_MAP['target_wwn'],
+ },
+ ],
+}
+
+REMOTE_GET_PORTS_RESULT = {
+ "data": [
+ {
+ "portId": REMOTE_CONFIG_MAP['port_id'],
+ "portType": "FIBRE",
+ "portAttributes": [
+ "TAR",
+ "MCU",
+ "RCU",
+ "ELUN"
+ ],
+ "fabricMode": True,
+ "portConnection": "PtoP",
+ "lunSecuritySetting": True,
+ "wwn": REMOTE_CONFIG_MAP['target_wwn'],
+ },
+ ],
+}
+
+GET_HOST_WWNS_RESULT = {
+ "data": [
+ {
+ "hostGroupNumber": 0,
+ "hostWwn": CONFIG_MAP['host_wwn'],
+ },
+ ],
+}
+
+REMOTE_GET_HOST_WWNS_RESULT = {
+ "data": [
+ {
+ "hostGroupNumber": 0,
+ "hostWwn": REMOTE_CONFIG_MAP['host_wwn'],
+ },
+ ],
+}
+
+COMPLETED_SUCCEEDED_RESULT = {
+ "status": "Completed",
+ "state": "Succeeded",
+ "affectedResources": ('a/b/c/1',),
+}
+
+REMOTE_COMPLETED_SUCCEEDED_RESULT = {
+ "status": "Completed",
+ "state": "Succeeded",
+ "affectedResources": ('a/b/c/2',),
+}
+
+COMPLETED_FAILED_RESULT_LU_DEFINED = {
+ "status": "Completed",
+ "state": "Failed",
+ "error": {
+ "errorCode": {
+ "SSB1": "B958",
+ "SSB2": "015A",
+ },
+ },
+}
+
+GET_LDEV_RESULT = {
+ "emulationType": "OPEN-V-CVS",
+ "blockCapacity": 2097152,
+ "attributes": ["CVS", "HDP"],
+ "status": "NML",
+ "poolId": 30,
+ "dataReductionStatus": "DISABLED",
+ "dataReductionMode": "disabled",
+}
+
+GET_LDEV_RESULT_MAPPED = {
+ "emulationType": "OPEN-V-CVS",
+ "blockCapacity": 2097152,
+ "attributes": ["CVS", "HDP"],
+ "status": "NML",
+ "ports": [
+ {
+ "portId": CONFIG_MAP['port_id'],
+ "hostGroupNumber": 0,
+ "hostGroupName": CONFIG_MAP['host_grp_name'],
+ "lun": 1
+ },
+ ],
+}
+
+REMOTE_GET_LDEV_RESULT_MAPPED = {
+ "emulationType": "OPEN-V-CVS",
+ "blockCapacity": 2097152,
+ "attributes": ["CVS", "HDP"],
+ "status": "NML",
+ "ports": [
+ {
+ "portId": REMOTE_CONFIG_MAP['port_id'],
+ "hostGroupNumber": 0,
+ "hostGroupName": REMOTE_CONFIG_MAP['host_grp_name'],
+ "lun": 1
+ },
+ ],
+}
+
+GET_LDEV_RESULT_PAIR = {
+ "emulationType": "OPEN-V-CVS",
+ "blockCapacity": 2097152,
+ "attributes": ["CVS", "HDP", "HTI"],
+ "status": "NML",
+}
+
+GET_LDEV_RESULT_REP = {
+ "emulationType": "OPEN-V-CVS",
+ "blockCapacity": 2097152,
+ "attributes": ["CVS", "HDP", "GAD"],
+ "status": "NML",
+ "numOfPorts": 1,
+}
+
+GET_POOL_RESULT = {
+ "availableVolumeCapacity": 480144,
+ "totalPoolCapacity": 507780,
+ "totalLocatedCapacity": 71453172,
+ "virtualVolumeCapacityRate": -1,
+}
+
+GET_POOLS_RESULT = {
+ "data": [
+ {
+ "poolId": 30,
+ "poolName": CONFIG_MAP['pool_name'],
+ "availableVolumeCapacity": 480144,
+ "totalPoolCapacity": 507780,
+ "totalLocatedCapacity": 71453172,
+ "virtualVolumeCapacityRate": -1,
+ },
+ ],
+}
+
+GET_SNAPSHOTS_RESULT = {
+ "data": [
+ {
+ "primaryOrSecondary": "S-VOL",
+ "status": "PSUS",
+ "pvolLdevId": 0,
+ "muNumber": 1,
+ "svolLdevId": 1,
+ },
+ ],
+}
+
+GET_SNAPSHOTS_RESULT_PAIR = {
+ "data": [
+ {
+ "primaryOrSecondary": "S-VOL",
+ "status": "PAIR",
+ "pvolLdevId": 0,
+ "muNumber": 1,
+ "svolLdevId": 1,
+ },
+ ],
+}
+
+GET_SNAPSHOTS_RESULT_BUSY = {
+ "data": [
+ {
+ "primaryOrSecondary": "P-VOL",
+ "status": "PSUP",
+ "pvolLdevId": 0,
+ "muNumber": 1,
+ "svolLdevId": 1,
+ },
+ ],
+}
+
+GET_LUNS_RESULT = {
+ "data": [
+ {
+ "ldevId": 0,
+ "lun": 1,
+ },
+ ],
+}
+
+GET_HOST_GROUP_RESULT = {
+ "hostGroupName": CONFIG_MAP['host_grp_name'],
+}
+
+GET_HOST_GROUPS_RESULT = {
+ "data": [
+ {
+ "hostGroupNumber": 0,
+ "portId": CONFIG_MAP['port_id'],
+ "hostGroupName": "HBSD-test",
+ },
+ ],
+}
+
+GET_HOST_GROUPS_RESULT_PAIR = {
+ "data": [
+ {
+ "hostGroupNumber": 1,
+ "portId": CONFIG_MAP['port_id'],
+ "hostGroupName": "HBSD-pair00",
+ },
+ ],
+}
+
+REMOTE_GET_HOST_GROUPS_RESULT_PAIR = {
+ "data": [
+ {
+ "hostGroupNumber": 1,
+ "portId": REMOTE_CONFIG_MAP['port_id'],
+ "hostGroupName": "HBSD-pair00",
+ },
+ ],
+}
+
+GET_LDEVS_RESULT = {
+ "data": [
+ {
+ "ldevId": 0,
+ "label": "15960cc738c94c5bb4f1365be5eeed44",
+ },
+ {
+ "ldevId": 1,
+ "label": "15960cc738c94c5bb4f1365be5eeed45",
+ },
+ ],
+}
+
+GET_REMOTE_MIRROR_COPYPAIR_RESULT = {
+ 'pvolLdevId': 4,
+ 'svolLdevId': 4,
+ 'pvolStatus': 'PAIR',
+ 'svolStatus': 'PAIR',
+ 'replicationType': hbsd_utils.MIRROR_ATTR,
+}
+
+GET_REMOTE_MIRROR_COPYPAIR_RESULT_SPLIT = {
+ 'pvolLdevId': 4,
+ 'svolLdevId': 4,
+ 'pvolStatus': 'PSUS',
+ 'svolStatus': 'SSUS',
+ 'replicationType': hbsd_utils.MIRROR_ATTR,
+}
+
+GET_REMOTE_MIRROR_COPYGROUP_RESULT = {
+ 'copyGroupName': 'HBSD-127.0.0.100U00',
+ 'copyPairs': [GET_REMOTE_MIRROR_COPYPAIR_RESULT],
+}
+
+GET_REMOTE_MIRROR_COPYGROUP_RESULT_ERROR = {
+ "errorSource": "<URL>",
+ "message": "<message>",
+ "solution": "<solution>",
+ "messageId": "aaa",
+ "errorCode": {
+ "SSB1": "",
+ "SSB2": "",
+ }
+}
+
+NOTFOUND_RESULT = {
+ "data": [],
+}
+
+ERROR_RESULT = {
+ "errorSource": "<URL>",
+ "message": "<message>",
+ "solution": "<solution>",
+ "messageId": "<messageId>",
+ "errorCode": {
+ "SSB1": "",
+ "SSB2": "",
+ }
+}
+
+
+def _brick_get_connector_properties(multipath=False, enforce_multipath=False):
+ """Return a predefined connector object."""
+ return DEFAULT_CONNECTOR
+
+
+class FakeLookupService():
+ """Dummy FC zoning mapping lookup service class."""
+
+ def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
+ """Return predefined FC zoning mapping."""
+ return DEVICE_MAP
+
+
+class FakeResponse():
+
+ def __init__(self, status_code, data=None, headers=None):
+ self.status_code = status_code
+ self.data = data
+ self.text = data
+ self.content = data
+ self.headers = {'Content-Type': 'json'} if headers is None else headers
+
+ def json(self):
+ return self.data
+
+
+class HBSDMIRRORFCDriverTest(test.TestCase):
+ """Unit test class for HBSD MIRROR interface fibre channel module."""
+
+ test_existing_ref = {'source-id': '1'}
+ test_existing_ref_name = {
+ 'source-name': '15960cc7-38c9-4c5b-b4f1-365be5eeed45'}
+
+ def setUp(self):
+ """Set up the test environment."""
+ def _set_required(opts, required):
+ for opt in opts:
+ opt.required = required
+
+ # Initialize Cinder and avoid checking driver options.
+ rest_required_opts = [
+ opt for opt in hbsd_rest.REST_VOLUME_OPTS if opt.required]
+ common_required_opts = [
+ opt for opt in hbsd_common.COMMON_VOLUME_OPTS if opt.required]
+ _set_required(rest_required_opts, False)
+ _set_required(common_required_opts, False)
+ super(HBSDMIRRORFCDriverTest, self).setUp()
+ _set_required(rest_required_opts, True)
+ _set_required(common_required_opts, True)
+
+ self.configuration = conf.Configuration(None)
+ self.ctxt = cinder_context.get_admin_context()
+ self._setup_config()
+ self._setup_driver()
+
+ def _setup_config(self):
+ """Set configuration parameter values."""
+ self.configuration.config_group = "REST"
+
+ self.configuration.volume_backend_name = "RESTFC"
+ self.configuration.volume_driver = (
+ "cinder.volume.drivers.hitachi.hbsd_fc.HBSDFCDriver")
+ self.configuration.reserved_percentage = "0"
+ self.configuration.use_multipath_for_image_xfer = False
+ self.configuration.enforce_multipath_for_image_xfer = False
+ self.configuration.max_over_subscription_ratio = 500.0
+ self.configuration.driver_ssl_cert_verify = False
+
+ self.configuration.hitachi_storage_id = CONFIG_MAP['serial']
+ self.configuration.hitachi_pools = ["30"]
+ self.configuration.hitachi_snap_pool = None
+ self.configuration.hitachi_ldev_range = "0-1"
+ self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']]
+ self.configuration.hitachi_compute_target_ports\
+ = [CONFIG_MAP['port_id']]
+ self.configuration.hitachi_group_create = True
+ self.configuration.hitachi_group_delete = True
+ self.configuration.hitachi_copy_speed = 3
+ self.configuration.hitachi_copy_check_interval = 3
+ self.configuration.hitachi_async_copy_check_interval = 10
+
+ self.configuration.san_login = CONFIG_MAP['user_id']
+ self.configuration.san_password = CONFIG_MAP['user_pass']
+ self.configuration.san_ip = CONFIG_MAP[
+ 'rest_server_ip_addr']
+ self.configuration.san_api_port = CONFIG_MAP[
+ 'rest_server_ip_port']
+ self.configuration.hitachi_rest_disable_io_wait = True
+ self.configuration.hitachi_rest_tcp_keepalive = True
+ self.configuration.hitachi_discard_zero_page = True
+ self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT
+ self.configuration.hitachi_lun_retry_interval = (
+ hbsd_rest._LUN_RETRY_INTERVAL)
+ self.configuration.hitachi_restore_timeout = hbsd_rest._RESTORE_TIMEOUT
+ self.configuration.hitachi_state_transition_timeout = (
+ hbsd_rest._STATE_TRANSITION_TIMEOUT)
+ self.configuration.hitachi_lock_timeout = hbsd_rest_api._LOCK_TIMEOUT
+ self.configuration.hitachi_rest_timeout = hbsd_rest_api._REST_TIMEOUT
+ self.configuration.hitachi_extend_timeout = (
+ hbsd_rest_api._EXTEND_TIMEOUT)
+ self.configuration.hitachi_exec_retry_interval = (
+ hbsd_rest_api._EXEC_RETRY_INTERVAL)
+ self.configuration.hitachi_rest_connect_timeout = (
+ hbsd_rest_api._DEFAULT_CONNECT_TIMEOUT)
+ self.configuration.hitachi_rest_job_api_response_timeout = (
+ hbsd_rest_api._JOB_API_RESPONSE_TIMEOUT)
+ self.configuration.hitachi_rest_get_api_response_timeout = (
+ hbsd_rest_api._GET_API_RESPONSE_TIMEOUT)
+ self.configuration.hitachi_rest_server_busy_timeout = (
+ hbsd_rest_api._REST_SERVER_BUSY_TIMEOUT)
+ self.configuration.hitachi_rest_keep_session_loop_interval = (
+ hbsd_rest_api._KEEP_SESSION_LOOP_INTERVAL)
+ self.configuration.hitachi_rest_another_ldev_mapped_retry_timeout = (
+ hbsd_rest_api._ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT)
+ self.configuration.hitachi_rest_tcp_keepidle = (
+ hbsd_rest_api._TCP_KEEPIDLE)
+ self.configuration.hitachi_rest_tcp_keepintvl = (
+ hbsd_rest_api._TCP_KEEPINTVL)
+ self.configuration.hitachi_rest_tcp_keepcnt = (
+ hbsd_rest_api._TCP_KEEPCNT)
+ self.configuration.hitachi_host_mode_options = []
+
+ self.configuration.hitachi_zoning_request = False
+
+ self.configuration.use_chap_auth = True
+ self.configuration.chap_username = CONFIG_MAP['auth_user']
+ self.configuration.chap_password = CONFIG_MAP['auth_password']
+
+ self.configuration.san_thin_provision = True
+ self.configuration.san_private_key = ''
+ self.configuration.san_clustername = ''
+ self.configuration.san_ssh_port = '22'
+ self.configuration.san_is_local = False
+ self.configuration.ssh_conn_timeout = '30'
+ self.configuration.ssh_min_pool_conn = '1'
+ self.configuration.ssh_max_pool_conn = '5'
+
+ self.configuration.hitachi_replication_status_check_short_interval = 5
+ self.configuration.hitachi_replication_status_check_long_interval\
+ = 10 * 60
+ self.configuration.hitachi_replication_status_check_timeout\
+ = 24 * 60 * 60
+
+ self.configuration.hitachi_replication_number = 0
+ self.configuration.hitachi_pair_target_number = 0
+ self.configuration.hitachi_rest_pair_target_ports\
+ = [CONFIG_MAP['port_id']]
+ self.configuration.hitachi_quorum_disk_id = 13
+ self.configuration.hitachi_mirror_copy_speed = 3
+ self.configuration.hitachi_mirror_storage_id\
+ = REMOTE_CONFIG_MAP['serial']
+ self.configuration.hitachi_mirror_pool = '40'
+ self.configuration.hitachi_mirror_snap_pool = None
+ self.configuration.hitachi_mirror_ldev_range = '2-3'
+ self.configuration.hitachi_mirror_target_ports\
+ = [REMOTE_CONFIG_MAP['port_id']]
+ self.configuration.hitachi_mirror_compute_target_ports\
+ = [REMOTE_CONFIG_MAP['port_id']]
+ self.configuration.hitachi_mirror_pair_target_number = 0
+ self.configuration.hitachi_mirror_rest_pair_target_ports\
+ = [REMOTE_CONFIG_MAP['port_id']]
+ self.configuration.hitachi_mirror_rest_user\
+ = REMOTE_CONFIG_MAP['user_id']
+ self.configuration.hitachi_mirror_rest_password\
+ = REMOTE_CONFIG_MAP['user_pass']
+ self.configuration.hitachi_mirror_rest_api_ip\
+ = REMOTE_CONFIG_MAP['rest_server_ip_addr']
+ self.configuration.hitachi_mirror_rest_api_port\
+ = REMOTE_CONFIG_MAP['rest_server_ip_port']
+ self.configuration.hitachi_set_mirror_reserve_attribute = True
+ self.configuration.hitachi_path_group_id = 0
+
+ self.configuration.hitachi_mirror_use_chap_auth = True
+ self.configuration.hitachi_mirror_chap_user = CONFIG_MAP['auth_user']
+ self.configuration.hitachi_mirror_chap_password\
+ = CONFIG_MAP['auth_password']
+
+ self.configuration.hitachi_mirror_ssl_cert_verify = False
+ self.configuration.hitachi_mirror_ssl_cert_path = '/root/path'
+
+ self.configuration.safe_get = self._fake_safe_get
+
+ CONF = cfg.CONF
+ CONF.my_ip = CONFIG_MAP['my_ip']
+
+ def _fake_safe_get(self, value):
+ """Retrieve a configuration value avoiding throwing an exception."""
+ try:
+ val = getattr(self.configuration, value)
+ except AttributeError:
+ val = None
+ return val
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(
+ volume_utils, 'brick_get_connector_properties',
+ side_effect=_brick_get_connector_properties)
+ def _setup_driver(
+ self, brick_get_connector_properties=None, request=None):
+ """Set up the driver environment."""
+ self.driver = hbsd_fc.HBSDFCDriver(
+ configuration=self.configuration)
+
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method == 'POST':
+ return FakeResponse(200, POST_SESSIONS_RESULT)
+ elif '/ports' in url:
+ return FakeResponse(200, GET_PORTS_RESULT)
+ elif '/host-wwns' in url:
+ return FakeResponse(200, GET_HOST_WWNS_RESULT)
+ elif '/host-groups' in url:
+ return FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)
+ else:
+ if method == 'POST':
+ return FakeResponse(200, REMOTE_POST_SESSIONS_RESULT)
+ elif '/ports' in url:
+ return FakeResponse(200, REMOTE_GET_PORTS_RESULT)
+ elif '/host-wwns' in url:
+ return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT)
+ elif '/host-groups' in url:
+ return FakeResponse(
+ 200, REMOTE_GET_HOST_GROUPS_RESULT_PAIR)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ self.driver.do_setup(None)
+ self.driver.check_for_setup_error()
+ self.driver.local_path(None)
+ self.driver.create_export(None, None, None)
+ self.driver.ensure_export(None, None)
+ self.driver.remove_export(None, None)
+ self.driver.create_export_snapshot(None, None, None)
+ self.driver.remove_export_snapshot(None, None)
+ # stop the Loopingcall within the do_setup treatment
+ self.driver.common.rep_primary.client.keep_session_loop.stop()
+ self.driver.common.rep_secondary.client.keep_session_loop.stop()
+
+ def tearDown(self):
+ self.client = None
+ super(HBSDMIRRORFCDriverTest, self).tearDown()
+
+ # API test cases
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(
+ volume_utils, 'brick_get_connector_properties',
+ side_effect=_brick_get_connector_properties)
+ def test_do_setup(self, brick_get_connector_properties, request):
+ drv = hbsd_fc.HBSDFCDriver(
+ configuration=self.configuration)
+ self._setup_config()
+ self.configuration.hitachi_pair_target_number = 10
+ self.configuration.hitachi_mirror_pair_target_number = 20
+
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method == 'POST':
+ return FakeResponse(200, POST_SESSIONS_RESULT)
+ elif '/ports' in url:
+ return FakeResponse(200, GET_PORTS_RESULT)
+ elif '/host-wwns' in url:
+ return FakeResponse(200, GET_HOST_WWNS_RESULT)
+ elif '/host-groups' in url:
+ return FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)
+ else:
+ if method == 'POST':
+ return FakeResponse(200, REMOTE_POST_SESSIONS_RESULT)
+ elif '/ports' in url:
+ return FakeResponse(200, REMOTE_GET_PORTS_RESULT)
+ elif '/host-wwns' in url:
+ return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT)
+ elif '/host-groups' in url:
+ return FakeResponse(
+ 200, REMOTE_GET_HOST_GROUPS_RESULT_PAIR)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ drv.do_setup(None)
+ self.assertEqual(
+ {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
+ drv.common.rep_primary.storage_info['wwns'])
+ self.assertEqual(
+ {REMOTE_CONFIG_MAP['port_id']: REMOTE_CONFIG_MAP['target_wwn']},
+ drv.common.rep_secondary.storage_info['wwns'])
+ self.assertEqual(2, brick_get_connector_properties.call_count)
+ self.assertEqual(10, request.call_count)
+ self.assertEqual(
+ "HBSD-pair%2d" % self.configuration.hitachi_pair_target_number,
+ drv.common.rep_primary._PAIR_TARGET_NAME)
+ self.assertEqual(
+ ("HBSD-pair%2d" %
+ self.configuration.hitachi_mirror_pair_target_number),
+ drv.common.rep_secondary._PAIR_TARGET_NAME)
+ # stop the Loopingcall within the do_setup treatment
+ self.driver.common.rep_primary.client.keep_session_loop.stop()
+ self.driver.common.rep_primary.client.keep_session_loop.wait()
+ self.driver.common.rep_secondary.client.keep_session_loop.stop()
+ self.driver.common.rep_secondary.client.keep_session_loop.wait()
+ self._setup_config()
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_volume(self, get_volume_type_extra_specs, request):
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_volume(fake_volume.fake_volume_obj(self.ctxt))
+ actual = {'provider_location': '1'}
+ self.assertEqual(actual, ret)
+ self.assertEqual(2, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_volume_replication(
+ self, get_volume_type_extra_specs, get_volume_type, request):
+ extra_specs = {"test1": "aaa",
+ "hbsd:topology": "active_active_mirror_volume"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ get_volume_type.return_value = {}
+
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/remote-mirror-copygroups' in url:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ elif '/remote-mirror-copypairs/' in url:
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT)
+ else:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/remote-mirror-copygroups' in url:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_volume(TEST_VOLUME[3])
+ actual = {
+ 'provider_location': json.dumps(
+ {'pldev': 1, 'sldev': 2,
+ 'remote-copy': hbsd_utils.MIRROR_ATTR})}
+ self.assertEqual(actual, ret)
+ self.assertEqual(14, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_delete_volume(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.delete_volume(TEST_VOLUME[0])
+ self.assertEqual(5, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_delete_volume_replication(self, request):
+ self.copygroup_count = 0
+ self.ldev_count = 0
+
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method in ('POST', 'PUT', 'DELETE'):
+ return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/remote-mirror-copygroups/' in url:
+ if self.copygroup_count < 2:
+ self.copygroup_count = self.copygroup_count + 1
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYGROUP_RESULT)
+ else:
+ return FakeResponse(
+ 500, GET_REMOTE_MIRROR_COPYGROUP_RESULT_ERROR,
+ headers={'Content-Type': 'json'})
+ elif '/remote-mirror-copypairs/' in url:
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT_SPLIT)
+ elif '/ldevs/' in url:
+ if self.ldev_count == 0:
+ self.ldev_count = self.ldev_count + 1
+ return FakeResponse(200, GET_LDEV_RESULT_REP)
+ else:
+ return FakeResponse(200, GET_LDEV_RESULT)
+ else:
+ if method in ('POST', 'PUT', 'DELETE'):
+ return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/ldevs/' in url:
+ return FakeResponse(200, GET_LDEV_RESULT)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ self.driver.delete_volume(TEST_VOLUME[4])
+ self.assertEqual(17, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_extend_volume(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.extend_volume(TEST_VOLUME[0], 256)
+ self.assertEqual(4, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_extend_volume_replication(self, request):
+ self.ldev_count = 0
+ self.copypair_count = 0
+
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method in ('POST', 'PUT', 'DELETE'):
+ return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/remote-mirror-copygroups/' in url:
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYGROUP_RESULT)
+ elif '/remote-mirror-copygroups' in url:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ elif '/remote-mirror-copypairs/' in url:
+ if self.copypair_count == 0:
+ self.copypair_count = self.copypair_count + 1
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT_SPLIT)
+ else:
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT)
+ elif '/ldevs/' in url:
+ if self.ldev_count < 2:
+ self.ldev_count = self.ldev_count + 1
+ return FakeResponse(200, GET_LDEV_RESULT_REP)
+ else:
+ return FakeResponse(200, GET_LDEV_RESULT)
+ else:
+ if method in ('POST', 'PUT', 'DELETE'):
+ return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/ldevs/' in url:
+ return FakeResponse(200, GET_LDEV_RESULT)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ self.driver.extend_volume(TEST_VOLUME[4], 256)
+ self.assertEqual(23, request.call_count)
+
+ @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function")
+ @mock.patch.object(driver.FibreChannelDriver, "get_filter_function")
+ @mock.patch.object(requests.Session, "request")
+ def test_get_volume_stats(
+ self, request, get_filter_function, get_goodness_function):
+ request.return_value = FakeResponse(200, GET_POOLS_RESULT)
+ get_filter_function.return_value = None
+ get_goodness_function.return_value = None
+ stats = self.driver.get_volume_stats(True)
+ self.assertEqual('Hitachi', stats['vendor_name'])
+ self.assertTrue(stats["pools"][0]['multiattach'])
+ self.assertEqual(1, request.call_count)
+ self.assertEqual(1, get_filter_function.call_count)
+ self.assertEqual(1, get_goodness_function.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
+ def test_create_snapshot(
+ self, volume_get, get_volume_type_extra_specs, request):
+ extra_specs = {"test1": "aaa",
+ "hbsd:topology": "active_active_mirror_volume"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT)]
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
+ actual = {'provider_location': '1'}
+ self.assertEqual(actual, ret)
+ self.assertEqual(4, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_delete_snapshot(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.delete_snapshot(TEST_SNAPSHOT[0])
+ self.assertEqual(14, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_cloned_volume(
+ self, get_volume_type_extra_specs, get_volume_type, request):
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ get_volume_type.return_value = {}
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
+ actual = {'provider_location': '1'}
+ self.assertEqual(actual, ret)
+ self.assertEqual(5, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_cloned_volume_replication(
+ self, get_volume_type_extra_specs, get_volume_type, request):
+ extra_specs = {"test1": "aaa",
+ "hbsd:topology": "active_active_mirror_volume"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ get_volume_type.return_value = {}
+ self.snapshot_count = 0
+
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/remote-mirror-copygroups' in url:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ elif '/remote-mirror-copypairs/' in url:
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT)
+ elif '/ldevs/' in url:
+ return FakeResponse(200, GET_LDEV_RESULT_REP)
+ elif '/snapshots' in url:
+ if self.snapshot_count < 1:
+ self.snapshot_count = self.snapshot_count + 1
+ return FakeResponse(200, GET_SNAPSHOTS_RESULT)
+ else:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ else:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/remote-mirror-copygroups' in url:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_cloned_volume(TEST_VOLUME[4], TEST_VOLUME[5])
+ actual = {
+ 'provider_location': json.dumps(
+ {'pldev': 1, 'sldev': 2,
+ 'remote-copy': hbsd_utils.MIRROR_ATTR})}
+ self.assertEqual(actual, ret)
+ self.assertEqual(23, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_volume_from_snapshot(
+ self, get_volume_type_extra_specs, get_volume_type, request):
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ get_volume_type.return_value = {}
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_volume_from_snapshot(
+ TEST_VOLUME[0], TEST_SNAPSHOT[0])
+ actual = {'provider_location': '1'}
+ self.assertEqual(actual, ret)
+ self.assertEqual(5, request.call_count)
+
+ @mock.patch.object(fczm_utils, "add_fc_zone")
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_initialize_connection(
+ self, get_volume_type_extra_specs, request, add_fc_zone):
+ self.driver.common.conf.hitachi_zoning_request = True
+ self.driver.common.rep_primary.lookup_service = FakeLookupService()
+ self.driver.common.rep_secondary.lookup_service = FakeLookupService()
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ return FakeResponse(200, GET_HOST_WWNS_RESULT)
+ else:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ ret = self.driver.initialize_connection(
+ TEST_VOLUME[4], DEFAULT_CONNECTOR)
+ self.assertEqual('fibre_channel', ret['driver_volume_type'])
+ self.assertEqual(
+ [CONFIG_MAP['target_wwn'], REMOTE_CONFIG_MAP['target_wwn']],
+ ret['data']['target_wwn'])
+ self.assertEqual(1, ret['data']['target_lun'])
+ self.assertEqual(4, request.call_count)
+ self.assertEqual(1, add_fc_zone.call_count)
+
+ @mock.patch.object(fczm_utils, "remove_fc_zone")
+ @mock.patch.object(requests.Session, "request")
+ def test_terminate_connection(self, request, remove_fc_zone):
+ self.driver.common.conf.hitachi_zoning_request = True
+ self.driver.common.rep_primary.lookup_service = FakeLookupService()
+ self.driver.common.rep_secondary.lookup_service = FakeLookupService()
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method in ('POST', 'PUT', 'DELETE'):
+ return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/ldevs/' in url:
+ return FakeResponse(200, GET_LDEV_RESULT_MAPPED)
+ elif '/host-wwns' in url:
+ return FakeResponse(200, GET_HOST_WWNS_RESULT)
+ else:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ else:
+ if method in ('POST', 'PUT', 'DELETE'):
+ return FakeResponse(202, REMOTE_COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/ldevs/' in url:
+ return FakeResponse(200, REMOTE_GET_LDEV_RESULT_MAPPED)
+ elif '/host-wwns' in url:
+ return FakeResponse(200, REMOTE_GET_HOST_WWNS_RESULT)
+ else:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ return FakeResponse(
+ 500, ERROR_RESULT, headers={'Content-Type': 'json'})
+ request.side_effect = _request_side_effect
+ self.driver.terminate_connection(TEST_VOLUME[6], DEFAULT_CONNECTOR)
+ self.assertEqual(10, request.call_count)
+ self.assertEqual(1, remove_fc_zone.call_count)
+
+ @mock.patch.object(fczm_utils, "add_fc_zone")
+ @mock.patch.object(requests.Session, "request")
+ def test_initialize_connection_snapshot(self, request, add_fc_zone):
+ self.driver.common.rep_primary.conf.hitachi_zoning_request = True
+ self.driver.common.lookup_service = FakeLookupService()
+ request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ ret = self.driver.initialize_connection_snapshot(
+ TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
+ self.assertEqual('fibre_channel', ret['driver_volume_type'])
+ self.assertEqual([CONFIG_MAP['target_wwn']], ret['data']['target_wwn'])
+ self.assertEqual(1, ret['data']['target_lun'])
+ self.assertEqual(2, request.call_count)
+ self.assertEqual(1, add_fc_zone.call_count)
+
+ @mock.patch.object(fczm_utils, "remove_fc_zone")
+ @mock.patch.object(requests.Session, "request")
+ def test_terminate_connection_snapshot(self, request, remove_fc_zone):
+ self.driver.common.rep_primary.conf.hitachi_zoning_request = True
+ self.driver.common.lookup_service = FakeLookupService()
+ request.side_effect = [FakeResponse(200, GET_HOST_WWNS_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT_MAPPED),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.terminate_connection_snapshot(
+ TEST_SNAPSHOT[0], DEFAULT_CONNECTOR)
+ self.assertEqual(5, request.call_count)
+ self.assertEqual(1, remove_fc_zone.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ def test_manage_existing(self, get_volume_type, request):
+ get_volume_type.return_value = {}
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ ret = self.driver.manage_existing(
+ TEST_VOLUME[0], self.test_existing_ref)
+ actual = {'provider_location': '1'}
+ self.assertEqual(actual, ret)
+ self.assertEqual(2, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_manage_existing_get_size(
+ self, get_volume_type_extra_specs, request):
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ request.return_value = FakeResponse(200, GET_LDEV_RESULT)
+ self.driver.manage_existing_get_size(
+ TEST_VOLUME[0], self.test_existing_ref)
+ self.assertEqual(1, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_unmanage(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT)]
+ self.driver.unmanage(TEST_VOLUME[0])
+ self.assertEqual(3, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_copy_image_to_volume(self, request):
+ image_service = 'fake_image_service'
+ image_id = 'fake_image_id'
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, COMPLETED_SUCCEEDED_RESULT)]
+ with mock.patch.object(driver.VolumeDriver, 'copy_image_to_volume') \
+ as mock_copy_image:
+ self.driver.copy_image_to_volume(
+ self.ctxt, TEST_VOLUME[0], image_service, image_id)
+ mock_copy_image.assert_called_with(
+ self.ctxt, TEST_VOLUME[0], image_service, image_id)
+ self.assertEqual(2, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_update_migrated_volume(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, COMPLETED_SUCCEEDED_RESULT)]
+ self.assertRaises(
+ NotImplementedError,
+ self.driver.update_migrated_volume,
+ self.ctxt,
+ TEST_VOLUME[0],
+ TEST_VOLUME[1],
+ "available")
+ self.assertEqual(2, request.call_count)
+
+ def test_unmanage_snapshot(self):
+ """The driver don't support unmange_snapshot."""
+ self.assertRaises(
+ NotImplementedError,
+ self.driver.unmanage_snapshot,
+ TEST_SNAPSHOT[0])
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ @mock.patch.object(obj_snap.SnapshotList, 'get_all_for_volume')
+ def test_retype(self, get_all_for_volume,
+ get_volume_type_extra_specs, request):
+ extra_specs = {'test1': 'aaa',
+ 'hbsd:target_ports': 'CL2-A'}
+ get_volume_type_extra_specs.return_value = extra_specs
+ get_all_for_volume.return_value = True
+
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT)]
+
+ old_specs = {'hbsd:target_ports': 'CL1-A'}
+ new_specs = {'hbsd:target_ports': 'CL2-A'}
+ old_type_ref = volume_types.create(self.ctxt, 'old', old_specs)
+ new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
+
+ diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'],
+ new_type_ref['id'])[0]
+ host = {
+ 'capabilities': {
+ 'location_info': {
+ 'pool_id': 30,
+ },
+ },
+ }
+
+ ret = self.driver.retype(
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
+ self.assertEqual(2, request.call_count)
+ self.assertFalse(ret)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_retype_replication(self, get_volume_type_extra_specs, request):
+ extra_specs = {'test1': 'aaa',
+ 'hbsd:topology': 'active_active_mirror_volume'}
+ get_volume_type_extra_specs.return_value = extra_specs
+
+ request.return_value = FakeResponse(200, GET_LDEV_RESULT)
+
+ new_type_ref = volume_types.create(self.ctxt, 'new', extra_specs)
+ new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
+ diff = {}
+ host = {
+ 'capabilities': {
+ 'location_info': {
+ 'pool_id': 30,
+ },
+ },
+ }
+ ret = self.driver.retype(
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
+ self.assertEqual(1, request.call_count)
+ self.assertFalse(ret)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_migrate_volume(
+ self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT)]
+ host = {
+ 'capabilities': {
+ 'location_info': {
+ 'storage_id': CONFIG_MAP['serial'],
+ 'pool_id': 30,
+ },
+ },
+ }
+ ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host)
+ self.assertEqual(3, request.call_count)
+ actual = (True, None)
+ self.assertTupleEqual(actual, ret)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_revert_to_snapshot(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
+ FakeResponse(200, GET_LDEV_RESULT_PAIR),
+ FakeResponse(200, GET_LDEV_RESULT_PAIR),
+ FakeResponse(200, GET_LDEV_RESULT_PAIR),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT)]
+ self.driver.revert_to_snapshot(
+ self.ctxt, TEST_VOLUME[0], TEST_SNAPSHOT[0])
+ self.assertEqual(8, request.call_count)
+
+ def test_create_group(self):
+ ret = self.driver.create_group(self.ctxt, TEST_GROUP[0])
+ self.assertIsNone(ret)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_delete_group(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ ret = self.driver.delete_group(
+ self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]])
+ self.assertEqual(5, request.call_count)
+ actual = (
+ {'status': TEST_GROUP[0]['status']},
+ [{'id': TEST_VOLUME[0]['id'], 'status': 'deleted'}]
+ )
+ self.assertTupleEqual(actual, ret)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_group_from_src_volume(
+ self, get_volume_type_extra_specs, get_volume_type, request):
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ get_volume_type.return_value = {}
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_group_from_src(
+ self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
+ source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
+ )
+ self.assertEqual(5, request.call_count)
+ actual = (
+ None,
+ [{'id': TEST_VOLUME[1]['id'],
+ 'provider_location': '1'}])
+ self.assertTupleEqual(actual, ret)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_group_from_src_snapshot(
+ self, get_volume_type_extra_specs, get_volume_type, request):
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ get_volume_type.return_value = {}
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_group_from_src(
+ self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
+ group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
+ )
+ self.assertEqual(5, request.call_count)
+ actual = (
+ None,
+ [{'id': TEST_VOLUME[0]['id'],
+ 'provider_location': '1'}])
+ self.assertTupleEqual(actual, ret)
+
+ @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
+ def test_update_group(self, is_group_a_cg_snapshot_type):
+ is_group_a_cg_snapshot_type.return_value = False
+ ret = self.driver.update_group(
+ self.ctxt, TEST_GROUP[0], add_volumes=[TEST_VOLUME[0]])
+ self.assertTupleEqual((None, None, None), ret)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
+ @mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
+ def test_create_group_snapshot_non_cg(
+ self, is_group_a_cg_snapshot_type, volume_get,
+ get_volume_type_extra_specs, request):
+ is_group_a_cg_snapshot_type.return_value = False
+ extra_specs = {"test1": "aaa"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT)]
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ ret = self.driver.create_group_snapshot(
+ self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
+ )
+ self.assertEqual(4, request.call_count)
+ actual = (
+ {'status': 'available'},
+ [{'id': TEST_SNAPSHOT[0]['id'],
+ 'provider_location': '1',
+ 'status': 'available'}]
+ )
+ self.assertTupleEqual(actual, ret)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_delete_group_snapshot(self, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT_PAIR),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ ret = self.driver.delete_group_snapshot(
+ self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
+ self.assertEqual(14, request.call_count)
+ actual = (
+ {'status': TEST_GROUP_SNAP[0]['status']},
+ [{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
+ )
+ self.assertTupleEqual(actual, ret)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type')
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_rep_ldev_and_pair_deduplication_compression(
+ self, get_volume_type_extra_specs, get_volume_type, request):
+ get_volume_type_extra_specs.return_value = {
+ 'hbsd:topology': 'active_active_mirror_volume',
+ 'hbsd:capacity_saving': 'deduplication_compression'}
+ get_volume_type.return_value = {}
+ self.snapshot_count = 0
+
+ def _request_side_effect(
+ method, url, params, json, headers, auth, timeout, verify):
+ if self.configuration.hitachi_storage_id in url:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if ('/remote-mirror-copygroups' in url or
+ '/journals' in url):
+ return FakeResponse(200, NOTFOUND_RESULT)
+ elif '/remote-mirror-copypairs/' in url:
+ return FakeResponse(
+ 200, GET_REMOTE_MIRROR_COPYPAIR_RESULT)
+ elif '/ldevs/' in url:
+ return FakeResponse(200, GET_LDEV_RESULT_REP)
+ elif '/snapshots' in url:
+ if self.snapshot_count < 1:
+ self.snapshot_count = self.snapshot_count + 1
+ return FakeResponse(200, GET_SNAPSHOTS_RESULT)
+ else:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ else:
+ if method in ('POST', 'PUT'):
+ return FakeResponse(400, REMOTE_COMPLETED_SUCCEEDED_RESULT)
+ elif method == 'GET':
+ if '/remote-mirror-copygroups' in url:
+ return FakeResponse(200, NOTFOUND_RESULT)
+ elif '/ldevs/' in url:
+ return FakeResponse(200, GET_LDEV_RESULT_REP)
+ if '/ldevs/' in url:
+ return FakeResponse(200, GET_LDEV_RESULT_REP)
+ else:
+ return FakeResponse(
+ 200, COMPLETED_SUCCEEDED_RESULT)
+ self.driver.common.rep_primary._stats = {}
+ self.driver.common.rep_primary._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ self.driver.common.rep_secondary._stats = {}
+ self.driver.common.rep_secondary._stats['pools'] = [
+ {'location_info': {'pool_id': 40}}]
+ request.side_effect = _request_side_effect
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver.create_cloned_volume,
+ TEST_VOLUME[4],
+ TEST_VOLUME[5])
+ self.assertEqual(2, get_volume_type_extra_specs.call_count)
+ self.assertEqual(0, get_volume_type.call_count)
+ self.assertEqual(14, request.call_count)
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py
index 733eb8761..63e48b5bd 100644
--- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_fc.py
@@ -18,6 +18,7 @@ import functools
from unittest import mock
from oslo_config import cfg
+from oslo_utils import units
import requests
from requests import models
@@ -36,6 +37,7 @@ from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_fc
+from cinder.volume.drivers.hitachi import hbsd_replication
from cinder.volume.drivers.hitachi import hbsd_rest
from cinder.volume.drivers.hitachi import hbsd_rest_api
from cinder.volume.drivers.hitachi import hbsd_rest_fc
@@ -182,6 +184,16 @@ GET_HOST_WWNS_RESULT = {
],
}
+GET_HOST_GROUPS_RESULT_TEST = {
+ "data": [
+ {
+ "hostGroupNumber": 0,
+ "portId": CONFIG_MAP['port_id'],
+ "hostGroupName": CONFIG_MAP['host_grp_name'],
+ },
+ ],
+}
+
COMPLETED_SUCCEEDED_RESULT = {
"status": "Completed",
"state": "Succeeded",
@@ -205,6 +217,8 @@ GET_LDEV_RESULT = {
"attributes": ["CVS", "HDP"],
"status": "NML",
"poolId": 30,
+ "dataReductionStatus": "DISABLED",
+ "dataReductionMode": "disabled",
}
GET_LDEV_RESULT_MAPPED = {
@@ -229,6 +243,16 @@ GET_LDEV_RESULT_PAIR = {
"status": "NML",
}
+GET_LDEV_RESULT_PAIR_STATUS_TEST = {
+ "emulationType": "OPEN-V-CVS",
+ "blockCapacity": 2097152,
+ "attributes": ["CVS", "HDP", "HTI"],
+ "status": "TEST",
+ "poolId": 30,
+ "dataReductionStatus": "REHYDRATING",
+ "dataReductionMode": "disabled"
+}
+
GET_POOL_RESULT = {
"availableVolumeCapacity": 480144,
"totalPoolCapacity": 507780,
@@ -307,6 +331,16 @@ GET_HOST_GROUPS_RESULT = {
],
}
+GET_HOST_GROUPS_RESULT_PAIR = {
+ "data": [
+ {
+ "hostGroupNumber": 1,
+ "portId": CONFIG_MAP['port_id'],
+ "hostGroupName": "HBSD-pair00",
+ },
+ ],
+}
+
GET_LDEVS_RESULT = {
"data": [
{
@@ -458,7 +492,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.hitachi_storage_id = CONFIG_MAP['serial']
- self.configuration.hitachi_pool = ["30"]
+ self.configuration.hitachi_pools = ["30"]
self.configuration.hitachi_snap_pool = None
self.configuration.hitachi_ldev_range = "0-1"
self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']]
@@ -470,6 +504,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.configuration.hitachi_copy_check_interval = 3
self.configuration.hitachi_async_copy_check_interval = 10
self.configuration.hitachi_port_scheduler = False
+ self.configuration.hitachi_group_name_format = None
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
@@ -480,7 +515,6 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.configuration.hitachi_rest_disable_io_wait = True
self.configuration.hitachi_rest_tcp_keepalive = True
self.configuration.hitachi_discard_zero_page = True
- self.configuration.hitachi_rest_number = "0"
self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT
self.configuration.hitachi_lun_retry_interval = (
hbsd_rest._LUN_RETRY_INTERVAL)
@@ -528,6 +562,21 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.configuration.chap_username = CONFIG_MAP['auth_user']
self.configuration.chap_password = CONFIG_MAP['auth_password']
+ self.configuration.hitachi_replication_number = 0
+ self.configuration.hitachi_pair_target_number = 0
+ self.configuration.hitachi_rest_pair_target_ports = []
+ self.configuration.hitachi_quorum_disk_id = ''
+ self.configuration.hitachi_mirror_copy_speed = ''
+ self.configuration.hitachi_mirror_storage_id = ''
+ self.configuration.hitachi_mirror_pool = ''
+ self.configuration.hitachi_mirror_ldev_range = ''
+ self.configuration.hitachi_mirror_target_ports = ''
+ self.configuration.hitachi_mirror_rest_user = ''
+ self.configuration.hitachi_mirror_rest_password = ''
+ self.configuration.hitachi_mirror_rest_api_ip = ''
+ self.configuration.hitachi_set_mirror_reserve_attribute = ''
+ self.configuration.hitachi_path_group_id = ''
+
self.configuration.safe_get = self._fake_safe_get
CONF = cfg.CONF
@@ -552,7 +601,8 @@ class HBSDRESTFCDriverTest(test.TestCase):
configuration=self.configuration)
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
- FakeResponse(200, GET_HOST_WWNS_RESULT)]
+ FakeResponse(200, GET_HOST_WWNS_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.driver.local_path(None)
@@ -579,13 +629,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
self._setup_config()
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
- FakeResponse(200, GET_HOST_WWNS_RESULT)]
+ FakeResponse(200, GET_HOST_WWNS_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(3, request.call_count)
+ self.assertEqual(4, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -606,13 +657,43 @@ class HBSDRESTFCDriverTest(test.TestCase):
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
- FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(8, request.call_count)
+ self.assertEqual(9, request.call_count)
+ # stop the Loopingcall within the do_setup treatment
+ self.driver.common.client.keep_session_loop.stop()
+ self.driver.common.client.keep_session_loop.wait()
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(
+ volume_utils, 'brick_get_connector_properties',
+ side_effect=_brick_get_connector_properties)
+ def test_do_setup_create_hg_format(
+ self, brick_get_connector_properties, request):
+ drv = hbsd_fc.HBSDFCDriver(configuration=self.configuration)
+ self._setup_config()
+ self.configuration.hitachi_group_name_format = (
+ 'HBSD-{wwn}-{host}-_:.@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@')
+ request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
+ FakeResponse(200, GET_PORTS_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
+ drv.do_setup(None)
+ self.assertEqual(
+ {CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
+ drv.common.storage_info['wwns'])
+ self.assertEqual(1, brick_get_connector_properties.call_count)
+ self.assertEqual(9, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -620,6 +701,26 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
@mock.patch.object(
volume_utils, 'brick_get_connector_properties',
+ side_effect=_brick_get_connector_properties)
+ def test_do_setup_create_hg_format_error(
+ self, brick_get_connector_properties, request):
+ drv = hbsd_fc.HBSDFCDriver(configuration=self.configuration)
+ self._setup_config()
+ self.configuration.hitachi_group_name_format = '{host}-{wwn}'
+ request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
+ FakeResponse(200, GET_PORTS_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+
+ self.assertRaises(exception.VolumeDriverException, drv.do_setup, None)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(
+ volume_utils, 'brick_get_connector_properties',
side_effect=_brick_get_connector_properties_multi_wwn)
def test_do_setup_create_hg_port_scheduler(
self, brick_get_connector_properties, request):
@@ -638,13 +739,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
- FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(9, request.call_count)
+ self.assertEqual(10, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -658,19 +760,20 @@ class HBSDRESTFCDriverTest(test.TestCase):
drv = hbsd_fc.HBSDFCDriver(
configuration=self.configuration)
self._setup_config()
- tmp_pool = self.configuration.hitachi_pool
- self.configuration.hitachi_pool = [CONFIG_MAP['pool_name']]
+ tmp_pools = self.configuration.hitachi_pools
+ self.configuration.hitachi_pools = [CONFIG_MAP['pool_name']]
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_POOLS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
- FakeResponse(200, GET_HOST_WWNS_RESULT)]
+ FakeResponse(200, GET_HOST_WWNS_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']: CONFIG_MAP['target_wwn']},
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(4, request.call_count)
- self.configuration.hitachi_pool = tmp_pool
+ self.assertEqual(5, request.call_count)
+ self.configuration.hitachi_pools = tmp_pools
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -685,6 +788,25 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.assertEqual('1', ret['provider_location'])
self.assertEqual(2, request.call_count)
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_volume_deduplication_compression(
+ self, get_volume_type_extra_specs, request):
+ extra_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
+ get_volume_type_extra_specs.return_value = extra_specs
+ request.return_value = FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ self.driver.common._stats = {}
+ self.driver.common._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ ret = self.driver.create_volume(TEST_VOLUME[3])
+ args, kwargs = request.call_args_list[0]
+ body = kwargs['json']
+ self.assertEqual(body.get('dataReductionMode'),
+ 'compression_deduplication')
+ self.assertEqual('1', ret['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
+ self.assertEqual(2, request.call_count)
+
@reduce_retrying_time
@mock.patch.object(requests.Session, "request")
def test_create_volume_timeout(self, request):
@@ -756,23 +878,94 @@ class HBSDRESTFCDriverTest(test.TestCase):
get_goodness_function.return_value = None
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
+ self.assertEqual(self.configuration.volume_backend_name,
+ stats["pools"][0]['pool_name'])
+ self.assertEqual(self.configuration.reserved_percentage,
+ stats["pools"][0]['reserved_percentage'])
+ self.assertTrue(stats["pools"][0]['thin_provisioning_support'])
+ self.assertFalse(stats["pools"][0]['thick_provisioning_support'])
self.assertTrue(stats["pools"][0]['multiattach'])
+ self.assertTrue(stats["pools"][0]['consistencygroup_support'])
+ self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled'])
+ self.assertEqual(self.configuration.max_over_subscription_ratio,
+ stats["pools"][0]['max_over_subscription_ratio'])
+ self.assertEqual(
+ GET_POOL_RESULT['totalPoolCapacity'] // units.Ki,
+ stats["pools"][0]['total_capacity_gb'])
+ self.assertEqual(
+ GET_POOL_RESULT['availableVolumeCapacity'] // units.Ki,
+ stats["pools"][0]['free_capacity_gb'])
+ self.assertEqual(
+ GET_POOL_RESULT['totalLocatedCapacity'] // units.Ki,
+ stats["pools"][0]['provisioned_capacity_gb'])
+ self.assertEqual('up', stats["pools"][0]['backend_state'])
self.assertEqual(1, request.call_count)
self.assertEqual(1, get_filter_function.call_count)
self.assertEqual(1, get_goodness_function.call_count)
+ @mock.patch.object(driver.FibreChannelDriver, "get_goodness_function")
+ @mock.patch.object(driver.FibreChannelDriver, "get_filter_function")
+ @mock.patch.object(hbsd_rest.HBSDREST, "get_pool_info")
+ def test_get_volume_stats_error(
+ self, get_pool_info, get_filter_function, get_goodness_function):
+ get_pool_info.side_effect = exception.VolumeDriverException(data='')
+ get_filter_function.return_value = None
+ get_goodness_function.return_value = None
+ stats = self.driver.get_volume_stats(True)
+ self.assertEqual('Hitachi', stats['vendor_name'])
+ self.assertEqual(self.configuration.volume_backend_name,
+ stats["pools"][0]['pool_name'])
+ self.assertEqual(self.configuration.reserved_percentage,
+ stats["pools"][0]['reserved_percentage'])
+ self.assertTrue(stats["pools"][0]['thin_provisioning_support'])
+ self.assertFalse(stats["pools"][0]['thick_provisioning_support'])
+ self.assertTrue(stats["pools"][0]['multiattach'])
+ self.assertTrue(stats["pools"][0]['consistencygroup_support'])
+ self.assertTrue(stats["pools"][0]['consistent_group_snapshot_enabled'])
+ self.assertEqual(self.configuration.max_over_subscription_ratio,
+ stats["pools"][0]['max_over_subscription_ratio'])
+ self.assertEqual(0, stats["pools"][0]['total_capacity_gb'])
+ self.assertEqual(0, stats["pools"][0]['free_capacity_gb'])
+ self.assertEqual(0, stats["pools"][0]['provisioned_capacity_gb'])
+ self.assertEqual('down', stats["pools"][0]['backend_state'])
+ self.assertEqual(1, get_filter_function.call_count)
+ self.assertEqual(1, get_goodness_function.call_count)
+
@mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
- def test_create_snapshot(self, volume_get, request):
+ def test_create_snapshot(
+ self, volume_get, get_volume_type_extra_specs, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT)]
+ get_volume_type_extra_specs.return_value = {}
self.driver.common._stats = {}
self.driver.common._stats['pools'] = [
{'location_info': {'pool_id': 30}}]
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
self.assertEqual('1', ret['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
+ self.assertEqual(4, request.call_count)
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ @mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
+ def test_create_snapshot_dedup_false(
+ self, volume_get, get_volume_type_extra_specs, request):
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_SNAPSHOTS_RESULT)]
+ get_volume_type_extra_specs.return_value = {'hbsd:capacity_saving':
+ 'disable'}
+ self.driver.common._stats = {}
+ self.driver.common._stats['pools'] = [
+ {'location_info': {'pool_id': 30}}]
+ ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
+ self.assertEqual('1', ret['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
@@ -786,9 +979,13 @@ class HBSDRESTFCDriverTest(test.TestCase):
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.delete_snapshot(TEST_SNAPSHOT[0])
- self.assertEqual(10, request.call_count)
+ self.assertEqual(14, request.call_count)
@mock.patch.object(requests.Session, "request")
def test_delete_snapshot_no_pair(self, request):
@@ -801,32 +998,40 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
- def test_create_cloned_volume(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_cloned_volume(
+ self, get_volume_type_extra_specs, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ get_volume_type_extra_specs.return_value = {}
self.driver.common._stats = {}
self.driver.common._stats['pools'] = [
{'location_info': {'pool_id': 30}}]
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
self.assertEqual('1', vol['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
@mock.patch.object(requests.Session, "request")
- def test_create_volume_from_snapshot(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_volume_from_snapshot(
+ self, get_volume_type_extra_specs, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
self.driver.common._stats = {}
+ get_volume_type_extra_specs.return_value = {}
self.driver.common._stats['pools'] = [
{'location_info': {'pool_id': 30}}]
vol = self.driver.create_volume_from_snapshot(
TEST_VOLUME[0], TEST_SNAPSHOT[0])
self.assertEqual('1', vol['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
@mock.patch.object(fczm_utils, "add_fc_zone")
@@ -897,6 +1102,32 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.assertEqual(5, request.call_count)
self.assertEqual(1, add_fc_zone.call_count)
+ @mock.patch.object(fczm_utils, "add_fc_zone")
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_target_to_storage_return(
+ self, get_volume_type_extra_specs, request, add_fc_zone):
+ self.configuration.hitachi_zoning_request = True
+ self.driver.common._lookup_service = FakeLookupService()
+ extra_specs = {"hbsd:target_ports": "CL1-A"}
+ get_volume_type_extra_specs.return_value = extra_specs
+ request.side_effect = [
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(400, GET_HOST_GROUPS_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_TEST),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_TEST),
+ ]
+ self.assertRaises(exception.VolumeDriverException,
+ self.driver.initialize_connection,
+ TEST_VOLUME[1],
+ DEFAULT_CONNECTOR)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
+ self.assertEqual(10, request.call_count)
+ self.assertEqual(0, add_fc_zone.call_count)
+
@mock.patch.object(fczm_utils, "remove_fc_zone")
@mock.patch.object(requests.Session, "request")
def test_terminate_connection(self, request, remove_fc_zone):
@@ -1046,10 +1277,9 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
def test_retype(self, request):
- request.return_value = FakeResponse(200, GET_LDEV_RESULT)
- new_specs = {'hbsd:test': 'test'}
- new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
- diff = {}
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
host = {
'capabilities': {
'location_info': {
@@ -1057,9 +1287,17 @@ class HBSDRESTFCDriverTest(test.TestCase):
},
},
}
+ new_type = {'extra_specs': {
+ 'hbsd:capacity_saving': 'deduplication_compression'}}
+ old_specs = {'hbsd:capacity_saving': 'disable'}
+ new_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
+ old_type_ref = volume_types.create(self.ctxt, 'old', old_specs)
+ new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'],
+ new_type_ref['id'])[0]
ret = self.driver.retype(
- self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
- self.assertEqual(1, request.call_count)
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
+ self.assertEqual(3, request.call_count)
self.assertTrue(ret)
@mock.patch.object(requests.Session, "request")
@@ -1079,7 +1317,10 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
- def test_migrate_volume_diff_pool(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_migrate_volume_diff_pool(
+ self, get_volume_type_extra_specs, request):
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
@@ -1104,6 +1345,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
},
}
ret = self.driver.migrate_volume(self.ctxt, TEST_VOLUME[0], host)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(15, request.call_count)
actual = (True, {'provider_location': '1'})
self.assertTupleEqual(actual, ret)
@@ -1148,7 +1390,10 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
- def test_create_group_from_src_volume(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_group_from_src_volume(
+ self, get_volume_type_extra_specs, request):
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
@@ -1161,13 +1406,17 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
- def test_create_group_from_src_snapshot(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_group_from_src_snapshot(
+ self, get_volume_type_extra_specs, request):
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
@@ -1180,6 +1429,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
@@ -1210,10 +1460,13 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_non_cg(
- self, is_group_a_cg_snapshot_type, volume_get, request):
+ self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
+ volume_get, request):
is_group_a_cg_snapshot_type.return_value = False
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
@@ -1224,6 +1477,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(4, request.call_count)
actual = (
{'status': 'available'},
@@ -1235,10 +1489,13 @@ class HBSDRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_cg(
- self, is_group_a_cg_snapshot_type, volume_get, request):
+ self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
+ volume_get, request):
is_group_a_cg_snapshot_type.return_value = True
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
@@ -1250,6 +1507,7 @@ class HBSDRESTFCDriverTest(test.TestCase):
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
actual = (
None,
@@ -1270,10 +1528,14 @@ class HBSDRESTFCDriverTest(test.TestCase):
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.delete_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
- self.assertEqual(10, request.call_count)
+ self.assertEqual(14, request.call_count)
actual = (
{'status': TEST_GROUP_SNAP[0]['status']},
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
@@ -1286,6 +1548,41 @@ class HBSDRESTFCDriverTest(test.TestCase):
ret = self.driver.get_driver_options()
actual = (hbsd_common.COMMON_VOLUME_OPTS +
hbsd_common.COMMON_PORT_OPTS +
+ hbsd_common.COMMON_PAIR_OPTS +
+ hbsd_common.COMMON_NAME_OPTS +
hbsd_rest.REST_VOLUME_OPTS +
- hbsd_rest_fc.FC_VOLUME_OPTS)
+ hbsd_rest.REST_PAIR_OPTS +
+ hbsd_rest_fc.FC_VOLUME_OPTS +
+ hbsd_replication._REP_OPTS +
+ hbsd_replication.COMMON_MIRROR_OPTS +
+ hbsd_replication.ISCSI_MIRROR_OPTS +
+ hbsd_replication.REST_MIRROR_OPTS +
+ hbsd_replication.REST_MIRROR_API_OPTS +
+ hbsd_replication.REST_MIRROR_SSL_OPTS)
self.assertEqual(actual, ret)
+
+ @mock.patch.object(requests.Session, "request")
+ def test_is_modifiable_dr_value_new_dr_mode_disabled(self, request):
+ request.side_effect = [
+ FakeResponse(200, GET_LDEV_RESULT_PAIR_STATUS_TEST),
+ FakeResponse(200, GET_LDEV_RESULT_PAIR_STATUS_TEST),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)
+ ]
+ host = {
+ 'capabilities': {
+ 'location_info': {
+ 'pool_id': 30,
+ },
+ },
+ }
+ new_type = {'extra_specs': {'hbsd:capacity_saving': 'disable'}}
+ old_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
+ new_specs = {'hbsd:capacity_saving': 'disable'}
+ old_type_ref = volume_types.create(self.ctxt, 'old', old_specs)
+ new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'],
+ new_type_ref['id'])[0]
+ ret = self.driver.retype(
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
+ self.assertEqual(3, request.call_count)
+ self.assertTrue(ret)
diff --git a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py
index 65c6b1650..9d79a1031 100644
--- a/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py
+++ b/cinder/tests/unit/volume/drivers/hitachi/test_hitachi_hbsd_rest_iscsi.py
@@ -33,6 +33,7 @@ from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_iscsi
+from cinder.volume.drivers.hitachi import hbsd_replication
from cinder.volume.drivers.hitachi import hbsd_rest
from cinder.volume.drivers.hitachi import hbsd_rest_api
from cinder.volume import volume_types
@@ -191,6 +192,8 @@ GET_LDEV_RESULT = {
"attributes": ["CVS", "HDP"],
"status": "NML",
"poolId": 30,
+ "dataReductionStatus": "DISABLED",
+ "dataReductionMode": "disabled",
}
GET_LDEV_RESULT_MAPPED = {
@@ -252,6 +255,16 @@ GET_SNAPSHOTS_RESULT_PAIR = {
],
}
+GET_HOST_GROUPS_RESULT_PAIR = {
+ "data": [
+ {
+ "hostGroupNumber": 1,
+ "portId": CONFIG_MAP['port_id'],
+ "hostGroupName": "HBSD-pair00",
+ },
+ ],
+}
+
GET_LDEVS_RESULT = {
"data": [
{
@@ -331,7 +344,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.hitachi_storage_id = CONFIG_MAP['serial']
- self.configuration.hitachi_pool = ['30']
+ self.configuration.hitachi_pools = ['30']
self.configuration.hitachi_snap_pool = None
self.configuration.hitachi_ldev_range = "0-1"
self.configuration.hitachi_target_ports = [CONFIG_MAP['port_id']]
@@ -343,6 +356,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.configuration.hitachi_copy_check_interval = 3
self.configuration.hitachi_async_copy_check_interval = 10
self.configuration.hitachi_port_scheduler = False
+ self.configuration.hitachi_group_name_format = None
self.configuration.san_login = CONFIG_MAP['user_id']
self.configuration.san_password = CONFIG_MAP['user_pass']
@@ -353,7 +367,6 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.configuration.hitachi_rest_disable_io_wait = True
self.configuration.hitachi_rest_tcp_keepalive = True
self.configuration.hitachi_discard_zero_page = True
- self.configuration.hitachi_rest_number = "0"
self.configuration.hitachi_lun_timeout = hbsd_rest._LUN_TIMEOUT
self.configuration.hitachi_lun_retry_interval = (
hbsd_rest._LUN_RETRY_INTERVAL)
@@ -399,6 +412,21 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.configuration.ssh_min_pool_conn = '1'
self.configuration.ssh_max_pool_conn = '5'
+ self.configuration.hitachi_replication_number = 0
+ self.configuration.hitachi_pair_target_number = 0
+ self.configuration.hitachi_rest_pair_target_ports = []
+ self.configuration.hitachi_quorum_disk_id = ''
+ self.configuration.hitachi_mirror_copy_speed = ''
+ self.configuration.hitachi_mirror_storage_id = ''
+ self.configuration.hitachi_mirror_pool = ''
+ self.configuration.hitachi_mirror_ldev_range = ''
+ self.configuration.hitachi_mirror_target_ports = ''
+ self.configuration.hitachi_mirror_rest_user = ''
+ self.configuration.hitachi_mirror_rest_password = ''
+ self.configuration.hitachi_mirror_rest_api_ip = ''
+ self.configuration.hitachi_set_mirror_reserve_attribute = ''
+ self.configuration.hitachi_path_group_id = ''
+
self.configuration.safe_get = self._fake_safe_get
CONF = cfg.CONF
@@ -425,7 +453,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_PORT_RESULT),
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
- FakeResponse(200, GET_HOST_GROUP_RESULT)]
+ FakeResponse(200, GET_HOST_GROUP_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
self.driver.do_setup(None)
self.driver.check_for_setup_error()
self.driver.local_path(None)
@@ -454,7 +483,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
FakeResponse(200, GET_PORTS_RESULT),
FakeResponse(200, GET_PORT_RESULT),
FakeResponse(200, GET_HOST_ISCSIS_RESULT),
- FakeResponse(200, GET_HOST_GROUP_RESULT)]
+ FakeResponse(200, GET_HOST_GROUP_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']:
@@ -463,7 +493,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
'port': CONFIG_MAP['tcpPort']}},
drv.common.storage_info['portals'])
self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(5, request.call_count)
+ self.assertEqual(6, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -484,7 +514,8 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
FakeResponse(200, NOTFOUND_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
- FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
drv.do_setup(None)
self.assertEqual(
{CONFIG_MAP['port_id']:
@@ -493,12 +524,63 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
'port': CONFIG_MAP['tcpPort']}},
drv.common.storage_info['portals'])
self.assertEqual(1, brick_get_connector_properties.call_count)
- self.assertEqual(8, request.call_count)
+ self.assertEqual(9, request.call_count)
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@mock.patch.object(requests.Session, "request")
+ @mock.patch.object(
+ volume_utils, 'brick_get_connector_properties',
+ side_effect=_brick_get_connector_properties)
+ def test_do_setup_create_hg_format(
+ self, brick_get_connector_properties, request):
+ drv = hbsd_iscsi.HBSDISCSIDriver(configuration=self.configuration)
+ self._setup_config()
+ self.configuration.hitachi_group_name_format = 'HBSD-{ip}@{host}-_:.'
+ request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
+ FakeResponse(200, GET_PORTS_RESULT),
+ FakeResponse(200, GET_PORT_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(200, GET_HOST_GROUPS_RESULT_PAIR)]
+ drv.do_setup(None)
+ self.assertEqual(
+ {CONFIG_MAP['port_id']:
+ '%(ip)s:%(port)s' % {
+ 'ip': CONFIG_MAP['ipv4Address'],
+ 'port': CONFIG_MAP['tcpPort']}},
+ drv.common.storage_info['portals'])
+ self.assertEqual(1, brick_get_connector_properties.call_count)
+ self.assertEqual(9, request.call_count)
+ # stop the Loopingcall within the do_setup treatment
+ self.driver.common.client.keep_session_loop.stop()
+ self.driver.common.client.keep_session_loop.wait()
+
+ @mock.patch.object(requests.Session, "request")
+ @mock.patch.object(
+ volume_utils, 'brick_get_connector_properties',
+ side_effect=_brick_get_connector_properties)
+ def test_do_setup_create_hg_format_error(
+ self, brick_get_connector_properties, request):
+ drv = hbsd_iscsi.HBSDISCSIDriver(configuration=self.configuration)
+ self._setup_config()
+ self.configuration.hitachi_group_name_format = (
+ 'HBSD-{ip}@{host}ZZZZZZZZZZZ')
+ request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
+ FakeResponse(200, GET_PORTS_RESULT),
+ FakeResponse(200, GET_PORT_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(200, NOTFOUND_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ self.assertRaises(exception.VolumeDriverException, drv.do_setup, None)
+
+ @mock.patch.object(requests.Session, "request")
def test_extend_volume(self, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
@@ -541,8 +623,11 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
- def test_create_snapshot(self, volume_get, request):
+ def test_create_snapshot(
+ self, volume_get, get_volume_type_extra_specs, request):
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
@@ -552,6 +637,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
{'location_info': {'pool_id': 30}}]
ret = self.driver.create_snapshot(TEST_SNAPSHOT[0])
self.assertEqual('1', ret['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
@@ -564,32 +650,40 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.assertEqual(4, request.call_count)
@mock.patch.object(requests.Session, "request")
- def test_create_cloned_volume(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_cloned_volume(
+ self, get_volume_type_extra_specs, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ get_volume_type_extra_specs.return_value = {}
self.driver.common._stats = {}
self.driver.common._stats['pools'] = [
{'location_info': {'pool_id': 30}}]
vol = self.driver.create_cloned_volume(TEST_VOLUME[0], TEST_VOLUME[1])
self.assertEqual('1', vol['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
@mock.patch.object(requests.Session, "request")
- def test_create_volume_from_snapshot(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_volume_from_snapshot(
+ self, get_volume_type_extra_specs, request):
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
+ get_volume_type_extra_specs.return_value = {}
self.driver.common._stats = {}
self.driver.common._stats['pools'] = [
{'location_info': {'pool_id': 30}}]
vol = self.driver.create_volume_from_snapshot(
TEST_VOLUME[0], TEST_SNAPSHOT[0])
self.assertEqual('1', vol['provider_location'])
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
@mock.patch.object(requests.Session, "request")
@@ -786,10 +880,9 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
def test_retype(self, request):
- request.return_value = FakeResponse(200, GET_LDEV_RESULT)
- new_specs = {'hbsd:test': 'test'}
- new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
- diff = {}
+ request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
host = {
'capabilities': {
'location_info': {
@@ -797,9 +890,17 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
},
},
}
+ new_type = {'extra_specs': {
+ 'hbsd:capacity_saving': 'deduplication_compression'}}
+ old_specs = {'hbsd:capacity_saving': 'disable'}
+ new_specs = {'hbsd:capacity_saving': 'deduplication_compression'}
+ old_type_ref = volume_types.create(self.ctxt, 'old', old_specs)
+ new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ diff = volume_types.volume_types_diff(self.ctxt, old_type_ref['id'],
+ new_type_ref['id'])[0]
ret = self.driver.retype(
- self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
- self.assertEqual(1, request.call_count)
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
+ self.assertEqual(3, request.call_count)
self.assertTrue(ret)
@mock.patch.object(requests.Session, "request")
@@ -852,7 +953,10 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
- def test_create_group_from_src_volume(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_group_from_src_volume(
+ self, get_volume_type_extra_specs, request):
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
@@ -865,13 +969,17 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.ctxt, TEST_GROUP[1], [TEST_VOLUME[1]],
source_group=TEST_GROUP[0], source_vols=[TEST_VOLUME[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[1]['id'], 'provider_location': '1'}])
self.assertTupleEqual(actual, ret)
@mock.patch.object(requests.Session, "request")
- def test_create_group_from_src_snapshot(self, request):
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
+ def test_create_group_from_src_snapshot(
+ self, get_volume_type_extra_specs, request):
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
@@ -884,6 +992,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
self.ctxt, TEST_GROUP[0], [TEST_VOLUME[0]],
group_snapshot=TEST_GROUP_SNAP[0], snapshots=[TEST_SNAPSHOT[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
actual = (
None, [{'id': TEST_VOLUME[0]['id'], 'provider_location': '1'}])
@@ -914,10 +1023,13 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_non_cg(
- self, is_group_a_cg_snapshot_type, volume_get, request):
+ self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
+ volume_get, request):
is_group_a_cg_snapshot_type.return_value = False
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
@@ -928,6 +1040,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(4, request.call_count)
actual = (
{'status': 'available'},
@@ -939,10 +1052,13 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
@mock.patch.object(sqlalchemy_api, 'volume_get', side_effect=_volume_get)
+ @mock.patch.object(volume_types, 'get_volume_type_extra_specs')
@mock.patch.object(volume_utils, 'is_group_a_cg_snapshot_type')
def test_create_group_snapshot_cg(
- self, is_group_a_cg_snapshot_type, volume_get, request):
+ self, is_group_a_cg_snapshot_type, get_volume_type_extra_specs,
+ volume_get, request):
is_group_a_cg_snapshot_type.return_value = True
+ get_volume_type_extra_specs.return_value = {}
request.side_effect = [FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT),
FakeResponse(200, GET_SNAPSHOTS_RESULT_PAIR),
@@ -954,6 +1070,7 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
ret = self.driver.create_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]]
)
+ self.assertEqual(1, get_volume_type_extra_specs.call_count)
self.assertEqual(5, request.call_count)
actual = (
None,
@@ -974,10 +1091,14 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
+ FakeResponse(200, GET_LDEV_RESULT),
FakeResponse(202, COMPLETED_SUCCEEDED_RESULT)]
ret = self.driver.delete_group_snapshot(
self.ctxt, TEST_GROUP_SNAP[0], [TEST_SNAPSHOT[0]])
- self.assertEqual(10, request.call_count)
+ self.assertEqual(14, request.call_count)
actual = (
{'status': TEST_GROUP_SNAP[0]['status']},
[{'id': TEST_SNAPSHOT[0]['id'], 'status': 'deleted'}]
@@ -989,5 +1110,14 @@ class HBSDRESTISCSIDriverTest(test.TestCase):
_get_oslo_driver_opts.return_value = []
ret = self.driver.get_driver_options()
actual = (hbsd_common.COMMON_VOLUME_OPTS +
- hbsd_rest.REST_VOLUME_OPTS)
+ hbsd_common.COMMON_PAIR_OPTS +
+ hbsd_common.COMMON_NAME_OPTS +
+ hbsd_rest.REST_VOLUME_OPTS +
+ hbsd_rest.REST_PAIR_OPTS +
+ hbsd_replication._REP_OPTS +
+ hbsd_replication.COMMON_MIRROR_OPTS +
+ hbsd_replication.ISCSI_MIRROR_OPTS +
+ hbsd_replication.REST_MIRROR_OPTS +
+ hbsd_replication.REST_MIRROR_API_OPTS +
+ hbsd_replication.REST_MIRROR_SSL_OPTS)
self.assertEqual(actual, ret)
diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
index d19824208..1d1e440cb 100644
--- a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
+++ b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
@@ -749,10 +749,7 @@ class HPE3PARBaseDriver(test.TestCase):
configuration.unique_fqdn_network = True
return configuration
- @mock.patch(
- 'hpe3parclient.client.HPE3ParClient',
- spec=True,
- )
+ @mock.patch('hpe3parclient.client.HPE3ParClient')
def setup_mock_client(self, _m_client, driver, conf=None, m_conf=None,
is_primera=False,
wsapi_version=wsapi_version_latest):
diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py
index ff139ac73..1499964c3 100644
--- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py
+++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_fc.py
@@ -426,7 +426,7 @@ class HPEXPRESTFCDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.hpexp_storage_id = CONFIG_MAP['serial']
- self.configuration.hpexp_pool = ["30"]
+ self.configuration.hpexp_pools = ["30"]
self.configuration.hpexp_snap_pool = None
self.configuration.hpexp_ldev_range = "0-1"
self.configuration.hpexp_target_ports = [CONFIG_MAP['port_id']]
@@ -593,8 +593,8 @@ class HPEXPRESTFCDriverTest(test.TestCase):
drv = hpe_xp_fc.HPEXPFCDriver(
configuration=self.configuration)
self._setup_config()
- tmp_pool = self.configuration.hitachi_pool
- self.configuration.hitachi_pool = [CONFIG_MAP['pool_name']]
+ tmp_pools = self.configuration.hitachi_pools
+ self.configuration.hitachi_pools = [CONFIG_MAP['pool_name']]
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_POOLS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
@@ -605,7 +605,7 @@ class HPEXPRESTFCDriverTest(test.TestCase):
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(4, request.call_count)
- self.configuration.hitachi_pool = tmp_pool
+ self.configuration.hitachi_pools = tmp_pools
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -963,8 +963,9 @@ class HPEXPRESTFCDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
def test_retype(self, request):
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
- new_specs = {'hbsd:test': 'test'}
+ new_specs = {'hpe_xp:test': 'test'}
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
diff = {}
host = {
'capabilities': {
@@ -974,7 +975,7 @@ class HPEXPRESTFCDriverTest(test.TestCase):
},
}
ret = self.driver.retype(
- self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
self.assertEqual(1, request.call_count)
self.assertTrue(ret)
diff --git a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py
index 5cc5e20aa..18e933d80 100644
--- a/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py
+++ b/cinder/tests/unit/volume/drivers/hpe/xp/test_hpe_xp_rest_iscsi.py
@@ -330,7 +330,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.hpexp_storage_id = CONFIG_MAP['serial']
- self.configuration.hpexp_pool = ["30"]
+ self.configuration.hpexp_pools = ["30"]
self.configuration.hpexp_snap_pool = None
self.configuration.hpexp_ldev_range = "0-1"
self.configuration.hpexp_target_ports = [CONFIG_MAP['port_id']]
@@ -776,8 +776,9 @@ class HPEXPRESTISCSIDriverTest(test.TestCase):
@mock.patch.object(requests.Session, "request")
def test_retype(self, request):
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
- new_specs = {'hbsd:test': 'test'}
+ new_specs = {'hpe_xp:test': 'test'}
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
diff = {}
host = {
'capabilities': {
@@ -787,7 +788,7 @@ class HPEXPRESTISCSIDriverTest(test.TestCase):
},
}
ret = self.driver.retype(
- self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
self.assertEqual(1, request.call_count)
self.assertTrue(ret)
diff --git a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py
index 79d69cd0c..0b16810d3 100644
--- a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py
+++ b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py
@@ -4321,7 +4321,6 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
self.assertIsNotNone(host_name)
def test_storwize_get_host_from_connector_with_lshost_failure(self):
- self.skipTest('Bug 1640205')
self._connector.pop('initiator')
helper = self.fc_driver._helpers
# Create two hosts. The first is not related to the connector and
@@ -4341,12 +4340,6 @@ class StorwizeSVCFcDriverTestCase(test.TestCase):
host_name = helper.get_host_from_connector(self._connector)
self.assertIsNotNone(host_name)
- # Need to assert that lshost was actually called. The way
- # we do that is check that the next simulator error for lshost
- # has been reset.
- self.assertEqual(self.sim._next_cmd_error['lshost'], '',
- "lshost was not called in the simulator. The "
- "queued error still remains.")
def test_storwize_get_host_from_connector_with_lshost_failure2(self):
self._connector.pop('initiator')
@@ -6527,8 +6520,6 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
self.assertTrue(each_pool['multiattach'])
self.assertLessEqual(each_pool['free_capacity_gb'],
each_pool['total_capacity_gb'])
- self.assertLessEqual(each_pool['allocated_capacity_gb'],
- each_pool['total_capacity_gb'])
self.assertEqual(25, each_pool['reserved_percentage'])
self.assertEqual(is_thin_provisioning_enabled,
each_pool['thin_provisioning_support'])
@@ -6543,8 +6534,6 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
self._def_flags['storwize_svc_volpool_name'])
self.assertAlmostEqual(3328.0, each_pool['total_capacity_gb'])
self.assertAlmostEqual(3287.5, each_pool['free_capacity_gb'])
- self.assertAlmostEqual(25.0,
- each_pool['allocated_capacity_gb'])
if is_thin_provisioning_enabled:
self.assertAlmostEqual(
1576.96, each_pool['provisioned_capacity_gb'])
@@ -7257,7 +7246,7 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
self.assertEqual(fields.GroupStatus.AVAILABLE,
model_update['status'])
# Delete Volume Group
- model_update = self.driver.delete_group(self.ctxt, volumegroup, None)
+ model_update = self.driver.delete_group(self.ctxt, volumegroup, [])
self.assertTrue(delete_volumegroup.called)
self.assertEqual(fields.GroupStatus.DELETED,
model_update[0]['status'])
@@ -7374,7 +7363,7 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
# Delete Volume Group
model_update = self.driver.delete_group(self.ctxt, volumegroup,
- None)
+ [])
self.assertTrue(delete_volumegroup.called)
self.assertEqual(fields.GroupStatus.DELETED,
model_update[0]['status'])
@@ -7463,6 +7452,66 @@ class StorwizeSVCCommonDriverTestCase(test.TestCase):
@mock.patch.object(storwize_svc_common.StorwizeHelpers,
'get_system_info')
+ def test_storwize_delete_volumegroup_with_delete_volumes(self,
+ get_system_info):
+ """Test volume group creation and deletion"""
+
+ fake_system_info = {'code_level': (8, 5, 1, 0),
+ 'system_name': 'storwize-svc-sim',
+ 'system_id': '0123456789ABCDEF'}
+ get_system_info.return_value = fake_system_info
+ self.driver.do_setup(None)
+
+ # Seting the storwize_volume_group to True
+ self._set_flag('storwize_volume_group', True)
+
+ # Create volumegroup type
+ volumegroup_spec = {'volume_group_enabled': '<is> True'}
+ volumegroup_type_ref = group_types.create(self.ctxt,
+ 'volumegroup_type',
+ volumegroup_spec)
+ volumegroup_type = objects.GroupType.get_by_id(
+ self.ctxt, volumegroup_type_ref['id'])
+
+ # Create source volume
+ vol_type_ref = volume_types.create(self.ctxt, 'non_rep_type', {})
+ vol_type = objects.VolumeType.get_by_id(self.ctxt,
+ vol_type_ref['id'])
+ source_vol = self._generate_vol_info(vol_type)
+ self.driver.create_volume(source_vol)
+
+ # Create source volumegroup
+ source_volumegroup = testutils.create_group(
+ self.ctxt, group_type_id=volumegroup_type.id,
+ volume_type_ids=[vol_type_ref['id']])
+
+ model_update = self.driver.create_group(self.ctxt, source_volumegroup)
+ self.assertEqual(fields.GroupStatus.AVAILABLE,
+ model_update['status'])
+
+ # Add source volumes to source volumegroup
+ (model_update, add_volumes_update, remove_volumes_update) = (
+ self.driver.update_group(self.ctxt, source_volumegroup,
+ [source_vol], []))
+
+ self.assertEqual(fields.GroupStatus.AVAILABLE,
+ model_update['status'])
+ source_volumegroup_name = self.driver._get_volumegroup_name(
+ source_volumegroup)
+ self.assertEqual(source_volumegroup_name,
+ source_vol.metadata['Volume Group Name'])
+
+ # Delete Volume Group
+ model_update = self.driver.delete_group(self.ctxt,
+ source_volumegroup,
+ [source_vol])
+ self.assertEqual(fields.GroupStatus.DELETED,
+ model_update[0]['status'])
+ for volume in model_update[1]:
+ self.assertEqual('deleted', volume['status'])
+
+ @mock.patch.object(storwize_svc_common.StorwizeHelpers,
+ 'get_system_info')
@mock.patch.object(cinder.volume.volume_utils,
'is_group_a_type')
@mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type')
diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py
index 5ef009f20..43b3bfb92 100644
--- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py
+++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_fc.py
@@ -420,7 +420,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.nec_v_storage_id = CONFIG_MAP['serial']
- self.configuration.nec_v_pool = ["30"]
+ self.configuration.nec_v_pools = ["30"]
self.configuration.nec_v_snap_pool = None
self.configuration.nec_v_ldev_range = "0-1"
self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']]
@@ -587,8 +587,8 @@ class VStorageRESTFCDriverTest(test.TestCase):
drv = nec_v_fc.VStorageFCDriver(
configuration=self.configuration)
self._setup_config()
- tmp_pool = self.configuration.hitachi_pool
- self.configuration.hitachi_pool = [CONFIG_MAP['pool_name']]
+ tmp_pools = self.configuration.hitachi_pools
+ self.configuration.hitachi_pools = [CONFIG_MAP['pool_name']]
request.side_effect = [FakeResponse(200, POST_SESSIONS_RESULT),
FakeResponse(200, GET_POOLS_RESULT),
FakeResponse(200, GET_PORTS_RESULT),
@@ -599,7 +599,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
drv.common.storage_info['wwns'])
self.assertEqual(1, brick_get_connector_properties.call_count)
self.assertEqual(4, request.call_count)
- self.configuration.hitachi_pool = tmp_pool
+ self.configuration.hitachi_pools = tmp_pools
# stop the Loopingcall within the do_setup treatment
self.driver.common.client.keep_session_loop.stop()
self.driver.common.client.keep_session_loop.wait()
@@ -959,6 +959,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
new_specs = {'nec:test': 'test'}
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
diff = {}
host = {
'capabilities': {
@@ -968,7 +969,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
},
}
ret = self.driver.retype(
- self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
self.assertEqual(1, request.call_count)
self.assertTrue(ret)
diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py
index 05f0faa52..b5baa149a 100644
--- a/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py
+++ b/cinder/tests/unit/volume/drivers/nec/v/test_internal_nec_rest_iscsi.py
@@ -331,7 +331,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.nec_v_storage_id = CONFIG_MAP['serial']
- self.configuration.nec_v_pool = ["30"]
+ self.configuration.nec_v_pools = ["30"]
self.configuration.nec_v_snap_pool = None
self.configuration.nec_v_ldev_range = "0-1"
self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']]
@@ -817,6 +817,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
request.return_value = FakeResponse(200, GET_LDEV_RESULT)
new_specs = {'nec:test': 'test'}
new_type_ref = volume_types.create(self.ctxt, 'new', new_specs)
+ new_type = volume_types.get_volume_type(self.ctxt, new_type_ref['id'])
diff = {}
host = {
'capabilities': {
@@ -826,7 +827,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
},
}
ret = self.driver.retype(
- self.ctxt, TEST_VOLUME[0], new_type_ref, diff, host)
+ self.ctxt, TEST_VOLUME[0], new_type, diff, host)
self.assertEqual(1, request.call_count)
self.assertTrue(ret)
diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py
index 75fd90eba..dadbd1f1d 100644
--- a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py
+++ b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_fc.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2021 NEC corporation
+# Copyright (C) 2021, 2023, NEC corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -158,7 +158,7 @@ class VStorageRESTFCDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.nec_v_storage_id = CONFIG_MAP['serial']
- self.configuration.nec_v_pool = ["30"]
+ self.configuration.nec_v_pools = ["30"]
self.configuration.nec_v_snap_pool = None
self.configuration.nec_v_ldev_range = "0-1"
self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']]
@@ -268,8 +268,8 @@ class VStorageRESTFCDriverTest(test.TestCase):
configuration=self.configuration, db=db)
self.assertEqual(drv.configuration.hitachi_storage_id,
drv.configuration.nec_v_storage_id)
- self.assertEqual(drv.configuration.hitachi_pool,
- drv.configuration.nec_v_pool)
+ self.assertEqual(drv.configuration.hitachi_pools,
+ drv.configuration.nec_v_pools)
self.assertEqual(drv.configuration.hitachi_snap_pool,
drv.configuration.nec_v_snap_pool)
self.assertEqual(drv.configuration.hitachi_ldev_range,
diff --git a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py
index c779105c3..2edb42a56 100644
--- a/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py
+++ b/cinder/tests/unit/volume/drivers/nec/v/test_nec_rest_iscsi.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2021 NEC corporation
+# Copyright (C) 2021, 2023, NEC corporation
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -180,7 +180,7 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
self.configuration.driver_ssl_cert_verify = False
self.configuration.nec_v_storage_id = CONFIG_MAP['serial']
- self.configuration.nec_v_pool = ["30"]
+ self.configuration.nec_v_pools = ["30"]
self.configuration.nec_v_snap_pool = None
self.configuration.nec_v_ldev_range = "0-1"
self.configuration.nec_v_target_ports = [CONFIG_MAP['port_id']]
@@ -290,8 +290,8 @@ class VStorageRESTISCSIDriverTest(test.TestCase):
configuration=self.configuration, db=db)
self.assertEqual(drv.configuration.hitachi_storage_id,
drv.configuration.nec_v_storage_id)
- self.assertEqual(drv.configuration.hitachi_pool,
- drv.configuration.nec_v_pool)
+ self.assertEqual(drv.configuration.hitachi_pools,
+ drv.configuration.nec_v_pools)
self.assertEqual(drv.configuration.hitachi_snap_pool,
drv.configuration.nec_v_snap_pool)
self.assertEqual(drv.configuration.hitachi_ldev_range,
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py
index 550da728a..9c6e686cd 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_base.py
@@ -16,7 +16,6 @@
"""Unit tests for the NetApp NFS storage driver."""
import copy
import os
-import threading
import time
from unittest import mock
@@ -620,36 +619,6 @@ class NetAppNfsDriverTestCase(test.TestCase):
os.path.exists.assert_called_once_with(
'dir/' + fake.CLONE_DESTINATION_NAME)
- def test__spawn_clean_cache_job_clean_job_setup(self):
- self.driver.cleaning = True
- mock_debug_log = self.mock_object(nfs_base.LOG, 'debug')
- self.mock_object(utils, 'synchronized', return_value=lambda f: f)
-
- retval = self.driver._spawn_clean_cache_job()
-
- self.assertIsNone(retval)
- self.assertEqual(1, mock_debug_log.call_count)
-
- def test__spawn_clean_cache_job_new_clean_job(self):
-
- class FakeTimer(object):
- def start(self):
- pass
-
- fake_timer = FakeTimer()
- self.mock_object(utils, 'synchronized', return_value=lambda f: f)
- self.mock_object(fake_timer, 'start')
- self.mock_object(nfs_base.LOG, 'debug')
- self.mock_object(self.driver, '_clean_image_cache')
- self.mock_object(threading, 'Timer', return_value=fake_timer)
-
- retval = self.driver._spawn_clean_cache_job()
-
- self.assertIsNone(retval)
- threading.Timer.assert_called_once_with(
- 0, self.driver._clean_image_cache)
- fake_timer.start.assert_called_once_with()
-
def test_cleanup_volume_on_failure(self):
path = '%s/%s' % (fake.NFS_SHARE, fake.NFS_VOLUME['name'])
mock_local_path = self.mock_object(self.driver, 'local_path')
@@ -1077,13 +1046,22 @@ class NetAppNfsDriverTestCase(test.TestCase):
self.driver, '_delete_snapshots_marked_for_deletion')
mock_call_ems_logging = self.mock_object(
self.driver, '_handle_ems_logging')
+ mock_call_clean_image_cache = self.mock_object(
+ self.driver, '_clean_image_cache')
+
+ # image cache cleanup task can be configured with custom timeout
+ cache_cleanup_interval = loopingcalls.ONE_HOUR
+ self.driver.configuration.netapp_nfs_image_cache_cleanup_interval = (
+ cache_cleanup_interval)
self.driver._add_looping_tasks()
mock_add_task.assert_has_calls([
mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE,
loopingcalls.ONE_MINUTE),
- mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)])
+ mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR),
+ mock.call(mock_call_clean_image_cache, cache_cleanup_interval)
+ ])
def test__clone_from_cache(self):
image_id = 'fake_image_id'
diff --git a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
index d08d8f2dd..1f986a2f6 100644
--- a/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
+++ b/cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_cmode.py
@@ -137,7 +137,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
mock_debug_log = self.mock_object(nfs_cmode.LOG, 'debug')
self.mock_object(self.driver, 'get_filter_function')
self.mock_object(self.driver, 'get_goodness_function')
- self.mock_object(self.driver, '_spawn_clean_cache_job')
self.driver.zapi_client = mock.Mock()
self.mock_object(self.driver, '_get_pool_stats', return_value={})
expected_stats = {
@@ -153,7 +152,6 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
retval = self.driver._update_volume_stats()
self.assertIsNone(retval)
- self.assertTrue(self.driver._spawn_clean_cache_job.called)
self.assertEqual(1, mock_debug_log.call_count)
self.assertEqual(expected_stats, self.driver._stats)
diff --git a/cinder/tests/unit/volume/drivers/test_lvm_driver.py b/cinder/tests/unit/volume/drivers/test_lvm_driver.py
index 12bfa00d4..f899a4829 100644
--- a/cinder/tests/unit/volume/drivers/test_lvm_driver.py
+++ b/cinder/tests/unit/volume/drivers/test_lvm_driver.py
@@ -17,6 +17,7 @@ from unittest import mock
import ddt
from oslo_concurrency import processutils
from oslo_config import cfg
+from oslo_utils import importutils
from cinder.brick.local_dev import lvm as brick_lvm
from cinder import db
@@ -47,6 +48,71 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
FAKE_VOLUME = {'name': 'test1',
'id': 'test1'}
+ def test___init___share_target_not_supported(self):
+ """Fail to use shared targets if target driver doesn't support it."""
+ original_import = importutils.import_object
+
+ def wrap_target_as_no_shared_support(*args, **kwargs):
+ res = original_import(*args, **kwargs)
+ self.mock_object(res, 'SHARED_TARGET_SUPPORT', False)
+ return res
+
+ self.patch('oslo_utils.importutils.import_object',
+ side_effect=wrap_target_as_no_shared_support)
+
+ self.configuration.lvm_share_target = True
+ self.assertRaises(exception.InvalidConfigurationValue,
+ lvm.LVMVolumeDriver,
+ configuration=self.configuration)
+
+ def test___init___secondary_ips_not_supported(self):
+ """Fail to use secondary ips if target driver doesn't support it."""
+ original_import = importutils.import_object
+
+ def wrap_target_as_no_secondary_ips_support(*args, **kwargs):
+ res = original_import(*args, **kwargs)
+ self.mock_object(res, 'SECONDARY_IP_SUPPORT', False)
+ return res
+
+ self.patch('oslo_utils.importutils.import_object',
+ side_effect=wrap_target_as_no_secondary_ips_support)
+
+ self.configuration.target_secondary_ip_addresses = True
+ self.assertRaises(exception.InvalidConfigurationValue,
+ lvm.LVMVolumeDriver,
+ configuration=self.configuration)
+
+ def test___init___share_target_supported(self):
+ """OK to use shared targets if target driver supports it."""
+ original_import = importutils.import_object
+
+ def wrap_target_as_no_shared_support(*args, **kwargs):
+ res = original_import(*args, **kwargs)
+ self.mock_object(res, 'SHARED_TARGET_SUPPORT', True)
+ return res
+
+ self.patch('oslo_utils.importutils.import_object',
+ side_effect=wrap_target_as_no_shared_support)
+
+ self.configuration.lvm_share_target = True
+ lvm.LVMVolumeDriver(configuration=self.configuration)
+
+ @ddt.data(True, False)
+ def test___init___share_target_not_requested(self, supports_shared):
+ """For non shared it works regardless of target driver support."""
+ original_import = importutils.import_object
+
+ def wrap_target_as_no_shared_support(*args, **kwargs):
+ res = original_import(*args, **kwargs)
+ self.mock_object(res, 'SHARED_TARGET_SUPPORT', supports_shared)
+ return res
+
+ self.patch('oslo_utils.importutils.import_object',
+ side_effect=wrap_target_as_no_shared_support)
+
+ self.configuration.lvm_share_target = False
+ lvm.LVMVolumeDriver(configuration=self.configuration)
+
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export')
def test_delete_volume_invalid_parameter(self, _mock_create_export,
@@ -1029,6 +1095,10 @@ class LVMISCSITestCase(test_driver.BaseDriverTestCase):
lvm_driver = lvm.LVMVolumeDriver(
configuration=self.configuration, vg_obj=vg_obj)
+ mock_same = self.mock_object(
+ lvm_driver.target_driver, 'are_same_connector',
+ side_effect=lvm_driver.target_driver.are_same_connector)
+
with mock.patch.object(lvm_driver.target_driver,
'terminate_connection') as mock_term_conn:
@@ -1037,14 +1107,31 @@ class LVMISCSITestCase(test_driver.BaseDriverTestCase):
self.assertTrue(lvm_driver.terminate_connection(vol,
host1_connector))
mock_term_conn.assert_not_called()
+ self.assertEqual(3, mock_same.call_count)
+ mock_same.assert_has_calls((
+ mock.call(host1_connector, host1_connector),
+ mock.call(host1_connector, host1_connector),
+ mock.call(host2_connector, host1_connector)))
+ mock_same.reset_mock()
# Verify that terminate_connection is called against either host
# when only one active attachment per host is present.
vol.volume_attachment.objects.remove(host1_attachment1)
self.assertTrue(lvm_driver.terminate_connection(vol,
host1_connector))
+ self.assertEqual(2, mock_same.call_count)
+ mock_same.assert_has_calls((
+ mock.call(host1_connector, host1_connector),
+ mock.call(host2_connector, host1_connector)))
+ mock_same.reset_mock()
+
self.assertTrue(lvm_driver.terminate_connection(vol,
host2_connector))
+ self.assertEqual(2, mock_same.call_count)
+ mock_same.assert_has_calls((
+ mock.call(host1_connector, host2_connector),
+ mock.call(host2_connector, host2_connector)))
+ mock_same.reset_mock()
mock_term_conn.assert_has_calls([mock.call(vol, host1_connector),
mock.call(vol, host2_connector)])
diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py
index 21645886f..57dae3af1 100644
--- a/cinder/tests/unit/volume/drivers/test_rbd.py
+++ b/cinder/tests/unit/volume/drivers/test_rbd.py
@@ -1969,6 +1969,51 @@ class RBDTestCase(test.TestCase):
])
self.assertEqual((free_capacity, total_capacity), result)
+ @ddt.data(
+ # Normal case, no quota and dynamic total
+ {'free_capacity': 27.0, 'total_capacity': 28.44},
+ # No quota and static total
+ {'dynamic_total': False,
+ 'free_capacity': 27.0, 'total_capacity': 59.96},
+ # Quota and dynamic total
+ {'quota_max_bytes': 3221225472, 'max_avail': 1073741824,
+ 'free_capacity': 1, 'total_capacity': 2.44},
+ # Quota and static total
+ {'quota_max_bytes': 3221225472, 'max_avail': 1073741824,
+ 'dynamic_total': False,
+ 'free_capacity': 1, 'total_capacity': 3.00},
+ # Quota and dynamic total when free would be negative
+ {'quota_max_bytes': 1073741824,
+ 'free_capacity': 0, 'total_capacity': 1.44},
+ )
+ @ddt.unpack
+ @common_mocks
+ def test_get_pool_nautilus(self, free_capacity, total_capacity,
+ max_avail=28987613184, quota_max_bytes=0,
+ dynamic_total=True):
+ client = self.mock_client.return_value
+ client.__enter__.return_value = client
+ client.cluster.mon_command.side_effect = [
+ (0, '{"stats":{"total_bytes":64385286144,'
+ '"total_used_bytes":3289628672,"total_avail_bytes":61095657472},'
+ '"pools":[{"name":"rbd","id":2,"stats":{"kb_used":1510197,'
+ '"stored":1546440971,"bytes_used":4639322913,"max_avail":%s,'
+ '"objects":412}},{"name":"volumes","id":3,"stats":{"kb_used":0,'
+ '"bytes_used":0,"max_avail":28987613184,"objects":0}}]}\n' %
+ max_avail, ''),
+ (0, '{"pool_name":"volumes","pool_id":4,"quota_max_objects":0,'
+ '"quota_max_bytes":%s}\n' % quota_max_bytes, ''),
+ ]
+ with mock.patch.object(self.driver.configuration, 'safe_get',
+ return_value=dynamic_total):
+ result = self.driver._get_pool_stats()
+ client.cluster.mon_command.assert_has_calls([
+ mock.call('{"prefix":"df", "format":"json"}', b''),
+ mock.call('{"prefix":"osd pool get-quota", "pool": "rbd",'
+ ' "format":"json"}', b''),
+ ])
+ self.assertEqual((free_capacity, total_capacity), result)
+
@common_mocks
def test_get_pool_bytes(self):
"""Test for mon_commands returning bytes instead of strings."""
diff --git a/cinder/tests/unit/volume/drivers/test_spdk.py b/cinder/tests/unit/volume/drivers/test_spdk.py
index 5a6fa9df0..cf725ffba 100644
--- a/cinder/tests/unit/volume/drivers/test_spdk.py
+++ b/cinder/tests/unit/volume/drivers/test_spdk.py
@@ -502,8 +502,10 @@ class SpdkDriverTestCase(test.TestCase):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.target_helper = ""
self.configuration.target_ip_address = "192.168.0.1"
+ self.configuration.target_secondary_ip_addresses = []
self.configuration.target_port = 4420
self.configuration.target_prefix = "nqn.2014-08.io.spdk"
+ self.configuration.nvmeof_conn_info_version = 1
self.configuration.nvmet_port_id = "1"
self.configuration.nvmet_ns_id = "fake_id"
self.configuration.nvmet_subsystem_name = "2014-08.io.spdk"
@@ -514,6 +516,7 @@ class SpdkDriverTestCase(test.TestCase):
mock_safe_get = mock.Mock()
mock_safe_get.return_value = 'spdk-nvmeof'
self.configuration.safe_get = mock_safe_get
+ self.configuration.lvm_share_target = False
self.jsonrpcclient = JSONRPCClient()
self.driver = spdk_driver.SPDKDriver(configuration=
self.configuration)
@@ -794,7 +797,7 @@ class SpdkDriverTestCase(test.TestCase):
self.configuration.nvmet_subsystem_name,
self.driver.target_driver._get_first_free_node()
),
- self.configuration.target_ip_address,
+ [self.configuration.target_ip_address],
self.configuration.target_port, "rdma",
self.configuration.nvmet_ns_id
),
diff --git a/cinder/tests/unit/volume/test_volume.py b/cinder/tests/unit/volume/test_volume.py
index 0b9ac4504..fa0f1da04 100644
--- a/cinder/tests/unit/volume/test_volume.py
+++ b/cinder/tests/unit/volume/test_volume.py
@@ -776,19 +776,6 @@ class VolumeTestCase(base.BaseVolumeTestCase):
self.assertEqual(foo['id'], vol['volume_type_id'])
self.assertTrue(vol['multiattach'])
- def test_create_volume_with_multiattach_flag(self):
- """Tests creating a volume with multiattach=True but no special type.
-
- This tests the pre 3.50 microversion behavior of being able to create
- a volume with the multiattach request parameter regardless of a
- multiattach-capable volume type.
- """
- volume_api = cinder.volume.api.API()
- volume = volume_api.create(
- self.context, 1, 'name', 'description', multiattach=True,
- volume_type=self.vol_type)
- self.assertTrue(volume.multiattach)
-
def _fail_multiattach_policy_authorize(self, policy):
if policy == vol_policy.MULTIATTACH_POLICY:
raise exception.PolicyNotAuthorized(action='Test')
@@ -813,16 +800,6 @@ class VolumeTestCase(base.BaseVolumeTestCase):
1, 'admin-vol', 'description',
volume_type=foo)
- def test_create_volume_with_multiattach_flag_not_authorized(self):
- """Test policy unauthorized create with multiattach flag."""
- volume_api = cinder.volume.api.API()
-
- with mock.patch.object(self.context, 'authorize') as mock_auth:
- mock_auth.side_effect = self._fail_multiattach_policy_authorize
- self.assertRaises(exception.PolicyNotAuthorized,
- volume_api.create, self.context, 1, 'name',
- 'description', multiattach=True)
-
@mock.patch.object(key_manager, 'API', fake_keymgr.fake_api)
def test_create_volume_with_encrypted_volume_type_multiattach(self):
ctxt = context.get_admin_context()
diff --git a/cinder/tests/unit/volume/test_volume_usage_audit.py b/cinder/tests/unit/volume/test_volume_usage_audit.py
index bb9270d70..c2bdb75dd 100644
--- a/cinder/tests/unit/volume/test_volume_usage_audit.py
+++ b/cinder/tests/unit/volume/test_volume_usage_audit.py
@@ -187,9 +187,8 @@ class GetActiveByWindowTestCase(base.BaseVolumeTestCase):
datetime.datetime(1, 4, 1, 1, 1, 1),
project_id=fake.PROJECT_ID)
self.assertEqual(3, len(volumes))
- self.assertEqual(fake.VOLUME2_ID, volumes[0].id)
- self.assertEqual(fake.VOLUME3_ID, volumes[1].id)
- self.assertEqual(fake.VOLUME4_ID, volumes[2].id)
+ self.assertEqual({fake.VOLUME2_ID, fake.VOLUME3_ID, fake.VOLUME4_ID},
+ {v.id for v in volumes})
def test_snapshot_get_all_active_by_window(self):
# Find all all snapshots valid within a timeframe window.
@@ -229,12 +228,11 @@ class GetActiveByWindowTestCase(base.BaseVolumeTestCase):
datetime.datetime(1, 3, 1, 1, 1, 1),
datetime.datetime(1, 4, 1, 1, 1, 1)).objects
self.assertEqual(3, len(snapshots))
- self.assertEqual(snap2.id, snapshots[0].id)
- self.assertEqual(fake.VOLUME_ID, snapshots[0].volume_id)
- self.assertEqual(snap3.id, snapshots[1].id)
- self.assertEqual(fake.VOLUME_ID, snapshots[1].volume_id)
- self.assertEqual(snap4.id, snapshots[2].id)
- self.assertEqual(fake.VOLUME_ID, snapshots[2].volume_id)
+
+ self.assertEqual({snap2.id, snap3.id, snap4.id},
+ {s.id for s in snapshots})
+ self.assertEqual({fake.VOLUME_ID},
+ {s.volume_id for s in snapshots})
def test_backup_get_all_active_by_window(self):
# Find all backups valid within a timeframe window.
@@ -266,6 +264,5 @@ class GetActiveByWindowTestCase(base.BaseVolumeTestCase):
project_id=fake.PROJECT_ID
)
self.assertEqual(3, len(backups))
- self.assertEqual(fake.BACKUP2_ID, backups[0].id)
- self.assertEqual(fake.BACKUP3_ID, backups[1].id)
- self.assertEqual(fake.BACKUP4_ID, backups[2].id)
+ self.assertEqual({fake.BACKUP2_ID, fake.BACKUP3_ID, fake.BACKUP4_ID},
+ {b.id for b in backups})
diff --git a/cinder/tests/unit/windows/test_iscsi.py b/cinder/tests/unit/windows/test_iscsi.py
index 282557fca..166def263 100644
--- a/cinder/tests/unit/windows/test_iscsi.py
+++ b/cinder/tests/unit/windows/test_iscsi.py
@@ -82,7 +82,7 @@ class TestWindowsISCSIDriver(test.TestCase):
self._driver.configuration = mock.Mock()
self._driver.configuration.target_port = iscsi_port
self._driver.configuration.target_ip_address = requested_ips[0]
- self._driver.configuration.iscsi_secondary_ip_addresses = (
+ self._driver.configuration.target_secondary_ip_addresses = (
requested_ips[1:])
self._driver._tgt_utils.get_portal_locations.return_value = (
diff --git a/cinder/volume/api.py b/cinder/volume/api.py
index ff98c9401..480f4dc95 100644
--- a/cinder/volume/api.py
+++ b/cinder/volume/api.py
@@ -230,7 +230,6 @@ class API(base.Base):
source_replica=None,
consistencygroup: Optional[objects.ConsistencyGroup] = None,
cgsnapshot: Optional[objects.CGSnapshot] = None,
- multiattach: Optional[bool] = False,
source_cg=None,
group: Optional[objects.Group] = None,
group_snapshot=None,
@@ -339,7 +338,6 @@ class API(base.Base):
'optional_args': {'is_quota_committed': False},
'consistencygroup': consistencygroup,
'cgsnapshot': cgsnapshot,
- 'raw_multiattach': multiattach,
'group': group,
'group_snapshot': group_snapshot,
'source_group': source_group,
diff --git a/cinder/volume/driver.py b/cinder/volume/driver.py
index 96c76faed..f623a39a0 100644
--- a/cinder/volume/driver.py
+++ b/cinder/volume/driver.py
@@ -57,7 +57,8 @@ volume_opts = [
default='$my_ip',
help='The IP address that the iSCSI/NVMEoF daemon is '
'listening on'),
- cfg.ListOpt('iscsi_secondary_ip_addresses',
+ cfg.ListOpt('target_secondary_ip_addresses',
+ deprecated_name='iscsi_secondary_ip_addresses',
default=[],
help='The list of secondary IP addresses of the '
'iSCSI/NVMEoF daemon'),
@@ -263,14 +264,27 @@ iser_opts = [
help='The name of the iSER target user-land tool to use'),
]
+nvmeof_opts = [
+ cfg.IntOpt('nvmeof_conn_info_version',
+ default=1,
+ min=1, max=2,
+ help='NVMe os-brick connector has 2 different connection info '
+ 'formats, this allows some NVMe-oF drivers that use the '
+ 'original format (version 1), such as spdk and LVM-nvmet, '
+ 'to send the newer format.'),
+]
+
nvmet_opts = [
cfg.PortOpt('nvmet_port_id',
default=1,
- help='The port that the NVMe target is listening on.'),
+ help='The id of the NVMe target port definition when not '
+ 'sharing targets. The starting port id value when '
+ 'sharing, incremented for each secondary ip address.'),
cfg.IntOpt('nvmet_ns_id',
default=10,
- help='The namespace id associated with the subsystem '
- 'that will be created with the path for the LVM volume.'),
+ help='Namespace id for the subsystem for the LVM volume when '
+ 'not sharing targets. The minimum id value when sharing.'
+ 'Maximum supported value in Linux is 8192')
]
scst_opts = [
@@ -348,11 +362,13 @@ fqdn_opts = [
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(iser_opts, group=configuration.SHARED_CONF_GROUP)
+CONF.register_opts(nvmeof_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(nvmet_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(scst_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(image_opts, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(volume_opts)
CONF.register_opts(iser_opts)
+CONF.register_opts(nvmeof_opts)
CONF.register_opts(nvmet_opts)
CONF.register_opts(scst_opts)
CONF.register_opts(backup_opts)
@@ -414,6 +430,7 @@ class BaseVD(object, metaclass=abc.ABCMeta):
if self.configuration:
self.configuration.append_config_values(volume_opts)
self.configuration.append_config_values(iser_opts)
+ self.configuration.append_config_values(nvmeof_opts)
self.configuration.append_config_values(nvmet_opts)
self.configuration.append_config_values(scst_opts)
self.configuration.append_config_values(backup_opts)
diff --git a/cinder/volume/drivers/dell_emc/powerflex/driver.py b/cinder/volume/drivers/dell_emc/powerflex/driver.py
index 206752f62..3ec57ca10 100644
--- a/cinder/volume/drivers/dell_emc/powerflex/driver.py
+++ b/cinder/volume/drivers/dell_emc/powerflex/driver.py
@@ -868,6 +868,12 @@ class PowerFlexDriver(driver.VolumeDriver):
connection_properties["scaleIO_volume_id"] = vol_or_snap.provider_id
connection_properties["config_group"] = self.configuration.config_group
connection_properties["failed_over"] = self._is_failed_over
+ connection_properties["verify_certificate"] = (
+ self._get_client().verify_certificate
+ )
+ connection_properties["certificate_path"] = (
+ self._get_client().certificate_path
+ )
if vol_size is not None:
extra_specs = self._get_volumetype_extraspecs(vol_or_snap)
diff --git a/cinder/volume/drivers/dell_emc/powermax/rest.py b/cinder/volume/drivers/dell_emc/powermax/rest.py
index 43a9dd50e..2131bc2d5 100644
--- a/cinder/volume/drivers/dell_emc/powermax/rest.py
+++ b/cinder/volume/drivers/dell_emc/powermax/rest.py
@@ -1257,7 +1257,7 @@ class PowerMaxRest(object):
if not isinstance(device_id, list):
device_id = [device_id]
- force_add = "true" if force else "false"
+ force_add = self._check_force(extra_specs, force)
payload = ({"executionOption": "ASYNCHRONOUS",
"editStorageGroupActionParam": {
@@ -1282,8 +1282,7 @@ class PowerMaxRest(object):
:param extra_specs: the extra specifications
"""
- force_vol_edit = (
- "true" if utils.FORCE_VOL_EDIT in extra_specs else "false")
+ force_vol_edit = self._check_force(extra_specs)
if not isinstance(device_id, list):
device_id = [device_id]
payload = ({"executionOption": "ASYNCHRONOUS",
@@ -1410,7 +1409,7 @@ class PowerMaxRest(object):
:param extra_specs: extra specifications
:param force: force flag (necessary on a detach)
"""
- force_flag = "true" if force else "false"
+ force_flag = self._check_force(extra_specs, force)
payload = ({"executionOption": "ASYNCHRONOUS",
"editStorageGroupActionParam": {
"moveVolumeToStorageGroupParam": {
@@ -2200,8 +2199,7 @@ class PowerMaxRest(object):
"""
action, operation, payload = '', '', {}
copy = 'true' if copy else 'false'
- force = (
- "true" if utils.FORCE_VOL_EDIT in extra_specs else "false")
+ force = self._check_force(extra_specs)
if link:
action = "Link"
@@ -3562,3 +3560,18 @@ class PowerMaxRest(object):
return (self.ucode_major_level >= utils.UCODE_5978 and
self.ucode_minor_level >= utils.UCODE_5978_HICKORY) or (
self.ucode_major_level >= utils.UCODE_6079)
+
+ @staticmethod
+ def _check_force(extra_specs, force_flag=False):
+ """Determine whether force should be used
+
+ Returns 'true' if force_flag is True or FORCE_VOL_EDIT is set in
+ extra_specs, otherwise returns 'false'.
+
+ :param extra_specs: extra specs dict
+ :param force_flag: force flag boolean
+
+ :returns: str (true or false)
+ """
+ return "true" if force_flag else (
+ "true" if utils.FORCE_VOL_EDIT in extra_specs else "false")
diff --git a/cinder/volume/drivers/hitachi/hbsd_common.py b/cinder/volume/drivers/hitachi/hbsd_common.py
index 5a49228eb..79e604da8 100644
--- a/cinder/volume/drivers/hitachi/hbsd_common.py
+++ b/cinder/volume/drivers/hitachi/hbsd_common.py
@@ -14,6 +14,7 @@
#
"""Common module for Hitachi HBSD Driver."""
+import json
import re
from oslo_config import cfg
@@ -28,8 +29,25 @@ from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.volume import volume_types
from cinder.volume import volume_utils
-_STR_VOLUME = 'volume'
-_STR_SNAPSHOT = 'snapshot'
+_GROUP_NAME_FORMAT_DEFAULT_FC = utils.TARGET_PREFIX + '{wwn}'
+_GROUP_NAME_FORMAT_DEFAULT_ISCSI = utils.TARGET_PREFIX + '{ip}'
+_GROUP_NAME_MAX_LEN_FC = 64
+_GROUP_NAME_MAX_LEN_ISCSI = 32
+
+GROUP_NAME_ALLOWED_CHARS = 'a-zA-Z0-9.@_:-'
+GROUP_NAME_VAR_WWN = '{wwn}'
+GROUP_NAME_VAR_IP = '{ip}'
+GROUP_NAME_VAR_HOST = '{host}'
+
+_GROUP_NAME_VAR_WWN_LEN = 16
+_GROUP_NAME_VAR_IP_LEN = 15
+_GROUP_NAME_VAR_HOST_LEN = 1
+_GROUP_NAME_VAR_LEN = {GROUP_NAME_VAR_WWN: _GROUP_NAME_VAR_WWN_LEN,
+ GROUP_NAME_VAR_IP: _GROUP_NAME_VAR_IP_LEN,
+ GROUP_NAME_VAR_HOST: _GROUP_NAME_VAR_HOST_LEN}
+
+STR_VOLUME = 'volume'
+STR_SNAPSHOT = 'snapshot'
_INHERITED_VOLUME_OPTS = [
'volume_backend_name',
@@ -49,8 +67,9 @@ COMMON_VOLUME_OPTS = [
default=None,
help='Product number of the storage system.'),
cfg.ListOpt(
- 'hitachi_pool',
+ 'hitachi_pools',
default=[],
+ deprecated_name='hitachi_pool',
help='Pool number[s] or pool name[s] of the DP pool.'),
cfg.StrOpt(
'hitachi_snap_pool',
@@ -114,15 +133,52 @@ COMMON_PORT_OPTS = [
'WWNs are registered to ports in a round-robin fashion.'),
]
+COMMON_PAIR_OPTS = [
+ cfg.IntOpt(
+ 'hitachi_pair_target_number',
+ default=0, min=0, max=99,
+ help='Pair target name of the host group or iSCSI target'),
+]
+
+COMMON_NAME_OPTS = [
+ cfg.StrOpt(
+ 'hitachi_group_name_format',
+ default=None,
+ help='Format of host groups, iSCSI targets, and server objects.'),
+]
+
+_GROUP_NAME_FORMAT = {
+ 'FC': {
+ 'group_name_max_len': _GROUP_NAME_MAX_LEN_FC,
+ 'group_name_var_cnt': {
+ GROUP_NAME_VAR_WWN: [1],
+ GROUP_NAME_VAR_IP: [0],
+ GROUP_NAME_VAR_HOST: [0, 1],
+ },
+ 'group_name_format_default': _GROUP_NAME_FORMAT_DEFAULT_FC,
+ },
+ 'iSCSI': {
+ 'group_name_max_len': _GROUP_NAME_MAX_LEN_ISCSI,
+ 'group_name_var_cnt': {
+ GROUP_NAME_VAR_WWN: [0],
+ GROUP_NAME_VAR_IP: [1],
+ GROUP_NAME_VAR_HOST: [0, 1],
+ },
+ 'group_name_format_default': _GROUP_NAME_FORMAT_DEFAULT_ISCSI,
+ }
+}
+
CONF = cfg.CONF
CONF.register_opts(COMMON_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
CONF.register_opts(COMMON_PORT_OPTS, group=configuration.SHARED_CONF_GROUP)
+CONF.register_opts(COMMON_PAIR_OPTS, group=configuration.SHARED_CONF_GROUP)
+CONF.register_opts(COMMON_NAME_OPTS, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
-def _str2int(num):
+def str2int(num):
"""Convert a string into an integer."""
if not num:
return None
@@ -156,12 +212,32 @@ class HBSDCommon():
'ldev_range': [],
'controller_ports': [],
'compute_ports': [],
+ 'pair_ports': [],
'wwns': {},
'portals': {},
}
+ self.storage_id = None
+ self.group_name_format = _GROUP_NAME_FORMAT[driverinfo['proto']]
+ self.format_info = {
+ 'group_name_format': self.group_name_format[
+ 'group_name_format_default'],
+ 'group_name_format_without_var_len': (
+ len(re.sub('|'.join([GROUP_NAME_VAR_WWN,
+ GROUP_NAME_VAR_IP, GROUP_NAME_VAR_HOST]), '',
+ self.group_name_format['group_name_format_default']))),
+ 'group_name_var_cnt': {
+ GROUP_NAME_VAR_WWN: self.group_name_format[
+ 'group_name_format_default'].count(GROUP_NAME_VAR_WWN),
+ GROUP_NAME_VAR_IP: self.group_name_format[
+ 'group_name_format_default'].count(GROUP_NAME_VAR_IP),
+ GROUP_NAME_VAR_HOST: self.group_name_format[
+ 'group_name_format_default'].count(GROUP_NAME_VAR_HOST),
+ }
+ }
+
self._required_common_opts = [
self.driver_info['param_prefix'] + '_storage_id',
- self.driver_info['param_prefix'] + '_pool',
+ self.driver_info['param_prefix'] + '_pools',
]
self.port_index = {}
@@ -175,7 +251,7 @@ class HBSDCommon():
return pool['location_info']['pool_id']
return None
- def create_ldev(self, size, pool_id, ldev_range):
+ def create_ldev(self, size, extra_specs, pool_id, ldev_range):
"""Create an LDEV and return its LDEV number."""
raise NotImplementedError()
@@ -185,13 +261,15 @@ class HBSDCommon():
def create_volume(self, volume):
"""Create a volume and return its properties."""
+ extra_specs = self.get_volume_extra_specs(volume)
pool_id = self.get_pool_id_of_volume(volume)
ldev_range = self.storage_info['ldev_range']
try:
- ldev = self.create_ldev(volume['size'], pool_id, ldev_range)
+ ldev = self.create_ldev(
+ volume['size'], extra_specs, pool_id, ldev_range)
except Exception:
with excutils.save_and_reraise_exception():
- utils.output_log(MSG.CREATE_LDEV_FAILED)
+ self.output_log(MSG.CREATE_LDEV_FAILED)
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
return {
'provider_location': str(ldev),
@@ -211,44 +289,48 @@ class HBSDCommon():
raise NotImplementedError()
def copy_on_storage(
- self, pvol, size, pool_id, snap_pool_id, ldev_range,
- is_snapshot=False, sync=False):
+ self, pvol, size, extra_specs, pool_id, snap_pool_id, ldev_range,
+ is_snapshot=False, sync=False, is_rep=False):
"""Create a copy of the specified LDEV on the storage."""
ldev_info = self.get_ldev_info(['status', 'attributes'], pvol)
if ldev_info['status'] != 'NML':
- msg = utils.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
+ msg = self.output_log(MSG.INVALID_LDEV_STATUS_FOR_COPY, ldev=pvol)
self.raise_error(msg)
- svol = self.create_ldev(size, pool_id, ldev_range)
+ svol = self.create_ldev(size, extra_specs, pool_id, ldev_range)
try:
self.create_pair_on_storage(
pvol, svol, snap_pool_id, is_snapshot=is_snapshot)
- if sync:
+ if sync or is_rep:
self.wait_copy_completion(pvol, svol)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.delete_ldev(svol)
except exception.VolumeDriverException:
- utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
+ self.output_log(MSG.DELETE_LDEV_FAILED, ldev=svol)
return svol
- def create_volume_from_src(self, volume, src, src_type):
+ def create_volume_from_src(self, volume, src, src_type, is_rep=False):
"""Create a volume from a volume or snapshot and return its properties.
"""
- ldev = utils.get_ldev(src)
+ ldev = self.get_ldev(src)
if ldev is None:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type=src_type, id=src['id'])
self.raise_error(msg)
size = volume['size']
+ extra_specs = self.get_volume_extra_specs(volume)
pool_id = self.get_pool_id_of_volume(volume)
snap_pool_id = self.storage_info['snap_pool_id']
ldev_range = self.storage_info['ldev_range']
- new_ldev = self.copy_on_storage(
- ldev, size, pool_id, snap_pool_id, ldev_range)
+ new_ldev = self.copy_on_storage(ldev, size, extra_specs, pool_id,
+ snap_pool_id, ldev_range,
+ is_rep=is_rep)
self.modify_ldev_name(new_ldev, volume['id'].replace("-", ""))
+ if is_rep:
+ self.delete_pair(new_ldev)
return {
'provider_location': str(new_ldev),
@@ -256,11 +338,11 @@ class HBSDCommon():
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume and return its properties."""
- return self.create_volume_from_src(volume, src_vref, _STR_VOLUME)
+ return self.create_volume_from_src(volume, src_vref, STR_VOLUME)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot and return its properties."""
- return self.create_volume_from_src(volume, snapshot, _STR_SNAPSHOT)
+ return self.create_volume_from_src(volume, snapshot, STR_SNAPSHOT)
def delete_pair_based_on_svol(self, pvol, svol_info):
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
@@ -276,7 +358,7 @@ class HBSDCommon():
if not pair_info:
return
if pair_info['pvol'] == ldev:
- utils.output_log(
+ self.output_log(
MSG.UNABLE_TO_DELETE_PAIR, pvol=pair_info['pvol'])
self.raise_busy()
else:
@@ -311,9 +393,9 @@ class HBSDCommon():
def delete_volume(self, volume):
"""Delete the specified volume."""
- ldev = utils.get_ldev(volume)
+ ldev = self.get_ldev(volume)
if ldev is None:
- utils.output_log(
+ self.output_log(
MSG.INVALID_LDEV_FOR_DELETION,
method='delete_volume', id=volume['id'])
return
@@ -328,27 +410,29 @@ class HBSDCommon():
def create_snapshot(self, snapshot):
"""Create a snapshot from a volume and return its properties."""
src_vref = snapshot.volume
- ldev = utils.get_ldev(src_vref)
+ ldev = self.get_ldev(src_vref)
if ldev is None:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='volume', id=src_vref['id'])
self.raise_error(msg)
size = snapshot['volume_size']
+ extra_specs = self.get_volume_extra_specs(snapshot['volume'])
pool_id = self.get_pool_id_of_volume(snapshot['volume'])
snap_pool_id = self.storage_info['snap_pool_id']
ldev_range = self.storage_info['ldev_range']
new_ldev = self.copy_on_storage(
- ldev, size, pool_id, snap_pool_id, ldev_range, is_snapshot=True)
+ ldev, size, extra_specs, pool_id, snap_pool_id, ldev_range,
+ is_snapshot=True)
return {
'provider_location': str(new_ldev),
}
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot."""
- ldev = utils.get_ldev(snapshot)
+ ldev = self.get_ldev(snapshot)
if ldev is None:
- utils.output_log(
+ self.output_log(
MSG.INVALID_LDEV_FOR_DELETION, method='delete_snapshot',
id=snapshot['id'])
return
@@ -379,28 +463,30 @@ class HBSDCommon():
pool_name=pool_name,
reserved_percentage=self.conf.safe_get('reserved_percentage'),
QoS_support=False,
+ thin_provisioning_support=True,
thick_provisioning_support=False,
multiattach=True,
consistencygroup_support=True,
consistent_group_snapshot_enabled=True,
+ max_over_subscription_ratio=(
+ volume_utils.get_max_over_subscription_ratio(
+ self.conf.safe_get('max_over_subscription_ratio'),
+ True)),
location_info=location_info
))
if cap_data is None:
single_pool.update(dict(
+ total_capacity_gb=0,
+ free_capacity_gb=0,
provisioned_capacity_gb=0,
backend_state='down'))
- utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name)
+ self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool=pool_name)
return single_pool
total_capacity, free_capacity, provisioned_capacity = cap_data
single_pool.update(dict(
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
- provisioned_capacity_gb=provisioned_capacity,
- max_over_subscription_ratio=(
- volume_utils.get_max_over_subscription_ratio(
- self.conf.safe_get('max_over_subscription_ratio'),
- True)),
- thin_provisioning_support=True
+ provisioned_capacity_gb=provisioned_capacity
))
single_pool.update(dict(backend_state='up'))
return single_pool
@@ -418,10 +504,10 @@ class HBSDCommon():
'pools': [],
}
for pool_id, pool_name, cap_data in zip(
- self.storage_info['pool_id'], self.conf.hitachi_pool,
+ self.storage_info['pool_id'], self.conf.hitachi_pools,
self.get_pool_infos(self.storage_info['pool_id'])):
single_pool = self._create_single_pool_data(
- pool_id, pool_name if len(self.conf.hitachi_pool) > 1 else
+ pool_id, pool_name if len(self.conf.hitachi_pools) > 1 else
data['volume_backend_name'], cap_data)
data['pools'].append(single_pool)
LOG.debug("Updating volume status. (%s)", data)
@@ -442,14 +528,14 @@ class HBSDCommon():
def extend_volume(self, volume, new_size):
"""Extend the specified volume to the specified size."""
- ldev = utils.get_ldev(volume)
+ ldev = self.get_ldev(volume)
if ldev is None:
- msg = utils.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
- volume_id=volume['id'])
+ msg = self.output_log(MSG.INVALID_LDEV_FOR_EXTENSION,
+ volume_id=volume['id'])
self.raise_error(msg)
if self.check_pair_svol(ldev):
- msg = utils.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
- volume_id=volume['id'])
+ msg = self.output_log(MSG.INVALID_VOLUME_TYPE_FOR_EXTEND,
+ volume_id=volume['id'])
self.raise_error(msg)
self.delete_pair(ldev)
self.extend_ldev(ldev, volume['size'], new_size)
@@ -468,7 +554,7 @@ class HBSDCommon():
ldev = self.get_ldev_by_name(
existing_ref.get('source-name').replace('-', ''))
elif 'source-id' in existing_ref:
- ldev = _str2int(existing_ref.get('source-id'))
+ ldev = str2int(existing_ref.get('source-id'))
self.check_ldev_manageability(ldev, existing_ref)
self.modify_ldev_name(ldev, volume['id'].replace("-", ""))
return {
@@ -479,29 +565,29 @@ class HBSDCommon():
"""Return the size[GB] of the specified LDEV."""
raise NotImplementedError()
- def manage_existing_get_size(self, existing_ref):
+ def manage_existing_get_size(self, volume, existing_ref):
"""Return the size[GB] of the specified volume."""
ldev = None
if 'source-name' in existing_ref:
ldev = self.get_ldev_by_name(
existing_ref.get('source-name').replace("-", ""))
elif 'source-id' in existing_ref:
- ldev = _str2int(existing_ref.get('source-id'))
+ ldev = str2int(existing_ref.get('source-id'))
if ldev is None:
- msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
+ msg = self.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
return self.get_ldev_size_in_gigabyte(ldev, existing_ref)
def unmanage(self, volume):
"""Prepare the volume for removing it from Cinder management."""
- ldev = utils.get_ldev(volume)
+ ldev = self.get_ldev(volume)
if ldev is None:
- utils.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
- id=volume['id'])
+ self.output_log(MSG.INVALID_LDEV_FOR_DELETION, method='unmanage',
+ id=volume['id'])
return
if self.check_pair_svol(ldev):
- utils.output_log(
+ self.output_log(
MSG.INVALID_LDEV_TYPE_FOR_UNMANAGE, volume_id=volume['id'],
volume_type=utils.NORMAL_LDEV_TYPE)
raise exception.VolumeIsBusy(volume_name=volume['name'])
@@ -515,10 +601,10 @@ class HBSDCommon():
def _range2list(self, param):
"""Analyze a 'xxx-xxx' string and return a list of two integers."""
- values = [_str2int(value) for value in
+ values = [str2int(value) for value in
self.conf.safe_get(param).split('-')]
if len(values) != 2 or None in values or values[0] > values[1]:
- msg = utils.output_log(MSG.INVALID_PARAMETER, param=param)
+ msg = self.output_log(MSG.INVALID_PARAMETER, param=param)
self.raise_error(msg)
return values
@@ -530,63 +616,116 @@ class HBSDCommon():
self.check_opts(self.conf, COMMON_PORT_OPTS)
if (self.conf.hitachi_port_scheduler and
not self.conf.hitachi_group_create):
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_PARAMETER,
param=self.driver_info['param_prefix'] + '_port_scheduler')
self.raise_error(msg)
if (self._lookup_service is None and
self.conf.hitachi_port_scheduler):
- msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
+ msg = self.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
self.raise_error(msg)
def check_param_iscsi(self):
"""Check iSCSI-related parameter values and consistency among them."""
if self.conf.use_chap_auth:
if not self.conf.chap_username:
- msg = utils.output_log(MSG.INVALID_PARAMETER,
- param='chap_username')
+ msg = self.output_log(MSG.INVALID_PARAMETER,
+ param='chap_username')
self.raise_error(msg)
if not self.conf.chap_password:
- msg = utils.output_log(MSG.INVALID_PARAMETER,
- param='chap_password')
+ msg = self.output_log(MSG.INVALID_PARAMETER,
+ param='chap_password')
self.raise_error(msg)
def check_param(self):
"""Check parameter values and consistency among them."""
- utils.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
+ self.check_opt_value(self.conf, _INHERITED_VOLUME_OPTS)
self.check_opts(self.conf, COMMON_VOLUME_OPTS)
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_pair_target_number'):
+ self.check_opts(self.conf, COMMON_PAIR_OPTS)
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_group_name_format'):
+ self.check_opts(self.conf, COMMON_NAME_OPTS)
if self.conf.hitachi_ldev_range:
self.storage_info['ldev_range'] = self._range2list(
self.driver_info['param_prefix'] + '_ldev_range')
if (not self.conf.hitachi_target_ports and
not self.conf.hitachi_compute_target_ports):
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_PARAMETER,
param=self.driver_info['param_prefix'] + '_target_ports or ' +
self.driver_info['param_prefix'] + '_compute_target_ports')
self.raise_error(msg)
+ self._check_param_group_name_format()
if (self.conf.hitachi_group_delete and
not self.conf.hitachi_group_create):
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_PARAMETER,
param=self.driver_info['param_prefix'] + '_group_delete or '
+ self.driver_info['param_prefix'] + '_group_create')
self.raise_error(msg)
for opt in self._required_common_opts:
if not self.conf.safe_get(opt):
- msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
+ msg = self.output_log(MSG.INVALID_PARAMETER, param=opt)
self.raise_error(msg)
- for pool in self.conf.hitachi_pool:
+ for pool in self.conf.hitachi_pools:
if len(pool) == 0:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_PARAMETER,
- param=self.driver_info['param_prefix'] + '_pool')
+ param=self.driver_info['param_prefix'] + '_pools')
self.raise_error(msg)
if self.storage_info['protocol'] == 'FC':
self.check_param_fc()
if self.storage_info['protocol'] == 'iSCSI':
self.check_param_iscsi()
+ def _check_param_group_name_format(self):
+ if not hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_group_name_format'):
+ return
+ if self.conf.hitachi_group_name_format is not None:
+ error_flag = False
+ if re.match(
+ utils.TARGET_PREFIX + '(' + GROUP_NAME_VAR_WWN + '|' +
+ GROUP_NAME_VAR_IP + '|' + GROUP_NAME_VAR_HOST + '|' + '[' +
+ GROUP_NAME_ALLOWED_CHARS + '])+$',
+ self.conf.hitachi_group_name_format) is None:
+ error_flag = True
+ if not error_flag:
+ for var in _GROUP_NAME_VAR_LEN:
+ self.format_info['group_name_var_cnt'][var] = (
+ self.conf.hitachi_group_name_format.count(var))
+ if (self.format_info[
+ 'group_name_var_cnt'][var] not in
+ self.group_name_format['group_name_var_cnt'][var]):
+ error_flag = True
+ break
+ if not error_flag:
+ group_name_var_replaced = self.conf.hitachi_group_name_format
+ for var, length in _GROUP_NAME_VAR_LEN.items():
+ group_name_var_replaced = (
+ group_name_var_replaced.replace(var, '_' * length))
+ if len(group_name_var_replaced) > self.group_name_format[
+ 'group_name_max_len']:
+ error_flag = True
+ if error_flag:
+ msg = self.output_log(
+ MSG.INVALID_PARAMETER,
+ param=self.driver_info['param_prefix'] +
+ '_group_name_format')
+ self.raise_error(msg)
+ self.format_info['group_name_format'] = (
+ self.conf.hitachi_group_name_format)
+ self.format_info['group_name_format_without_var_len'] = (
+ len(re.sub('|'.join(
+ [GROUP_NAME_VAR_WWN, GROUP_NAME_VAR_IP,
+ GROUP_NAME_VAR_HOST]), '',
+ self.format_info['group_name_format'])))
+
def need_client_setup(self):
"""Check if the making of the communication client is necessary."""
raise NotImplementedError()
@@ -600,14 +739,14 @@ class HBSDCommon():
pass
def check_pool_id(self):
- """Check the pool id of hitachi_pool and hitachi_snap_pool."""
+ """Check the pool id of hitachi_pools and hitachi_snap_pool."""
raise NotImplementedError()
def connect_storage(self):
"""Prepare for using the storage."""
self.check_pool_id()
- utils.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
- value=self.storage_info['pool_id'])
+ self.output_log(MSG.SET_CONFIG_VALUE, object='DP Pool ID',
+ value=self.storage_info['pool_id'])
self.storage_info['controller_ports'] = []
self.storage_info['compute_ports'] = []
@@ -619,8 +758,8 @@ class HBSDCommon():
"""Return the HBA ID stored in the connector."""
if self.driver_info['hba_id'] in connector:
return connector[self.driver_info['hba_id']]
- msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
- resource=self.driver_info['hba_id_type'])
+ msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
+ resource=self.driver_info['hba_id_type'])
self.raise_error(msg)
def set_device_map(self, targets, hba_ids, volume):
@@ -646,7 +785,7 @@ class HBSDCommon():
for target_port, target_gid in targets['list']:
if target_port == port:
return target_gid
- msg = utils.output_log(MSG.NO_CONNECTED_TARGET)
+ msg = self.output_log(MSG.NO_CONNECTED_TARGET)
self.raise_error(msg)
def set_target_mode(self, port, gid):
@@ -669,7 +808,7 @@ class HBSDCommon():
if port not in targets['info'] or not targets['info'][port]:
target_name, gid = self.create_target_to_storage(
port, connector, hba_ids)
- utils.output_log(
+ self.output_log(
MSG.OBJECT_CREATED,
object='a target',
details='port: %(port)s, gid: %(gid)s, target_name: '
@@ -708,14 +847,14 @@ class HBSDCommon():
self.create_target(
targets, port, connector, active_hba_ids)
except exception.VolumeDriverException:
- utils.output_log(
+ self.output_log(
self.driver_info['msg_id']['target'], port=port)
# When other threads created a host group at same time, need to
# re-find targets.
if not targets['list']:
self.find_targets_from_storage(
- targets, connector, targets['info'].keys())
+ targets, connector, list(targets['info'].keys()))
def get_port_index_to_be_used(self, ports, network_name):
backend_name = self.conf.safe_get('volume_backend_name')
@@ -767,21 +906,22 @@ class HBSDCommon():
"""Check if available storage ports exist."""
if (self.conf.hitachi_target_ports and
not self.storage_info['controller_ports']):
- msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
- resource="Target ports")
+ msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
+ resource="Target ports")
self.raise_error(msg)
if (self.conf.hitachi_compute_target_ports and
not self.storage_info['compute_ports']):
- msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
- resource="Compute target ports")
+ msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
+ resource="Compute target ports")
self.raise_error(msg)
- utils.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
- value=self.storage_info['controller_ports'])
- utils.output_log(MSG.SET_CONFIG_VALUE,
- object='compute target port list',
- value=self.storage_info['compute_ports'])
-
- def attach_ldev(self, volume, ldev, connector, is_snapshot, targets):
+ self.output_log(MSG.SET_CONFIG_VALUE, object='target port list',
+ value=self.storage_info['controller_ports'])
+ self.output_log(MSG.SET_CONFIG_VALUE,
+ object='compute target port list',
+ value=self.storage_info['compute_ports'])
+
+ def attach_ldev(
+ self, volume, ldev, connector, is_snapshot, targets, lun=None):
"""Initialize connection between the server and the volume."""
raise NotImplementedError()
@@ -839,7 +979,8 @@ class HBSDCommon():
@coordination.synchronized(
'{self.driver_info[driver_file_prefix]}-host-'
'{self.conf.hitachi_storage_id}-{connector[host]}')
- def initialize_connection(self, volume, connector, is_snapshot=False):
+ def initialize_connection(
+ self, volume, connector, is_snapshot=False, lun=None):
"""Initialize connection between the server and the volume."""
targets = {
'info': {},
@@ -848,14 +989,14 @@ class HBSDCommon():
'iqns': {},
'target_map': {},
}
- ldev = utils.get_ldev(volume)
+ ldev = self.get_ldev(volume)
if ldev is None:
- msg = utils.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
- volume_id=volume['id'])
+ msg = self.output_log(MSG.INVALID_LDEV_FOR_CONNECTION,
+ volume_id=volume['id'])
self.raise_error(msg)
target_lun = self.attach_ldev(
- volume, ldev, connector, is_snapshot, targets)
+ volume, ldev, connector, is_snapshot, targets, lun)
return {
'driver_volume_type': self.driver_info['volume_type'],
@@ -883,10 +1024,10 @@ class HBSDCommon():
def terminate_connection(self, volume, connector):
"""Terminate connection between the server and the volume."""
- ldev = utils.get_ldev(volume)
+ ldev = self.get_ldev(volume)
if ldev is None:
- utils.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING,
- volume_id=volume['id'])
+ self.output_log(MSG.INVALID_LDEV_FOR_UNMAPPING,
+ volume_id=volume['id'])
return
# If a fake connector is generated by nova when the host
# is down, then the connector will not have a host property,
@@ -895,7 +1036,7 @@ class HBSDCommon():
if 'host' not in connector:
port_hostgroup_map = self.get_port_hostgroup_map(ldev)
if not port_hostgroup_map:
- utils.output_log(MSG.NO_LUN, ldev=ldev)
+ self.output_log(MSG.NO_LUN, ldev=ldev)
return
self.set_terminate_target(connector, port_hostgroup_map)
@@ -918,21 +1059,17 @@ class HBSDCommon():
'data': {'target_wwn': target_wwn}}
return inner(self, volume, connector)
- def get_volume_extra_specs(self, volume):
- if volume is None:
- return {}
- type_id = volume.get('volume_type_id')
- if type_id is None:
- return {}
-
- return volume_types.get_volume_type_extra_specs(type_id)
-
def filter_target_ports(self, target_ports, volume, is_snapshot=False):
specs = self.get_volume_extra_specs(volume) if volume else None
if not specs:
return target_ports
if self.driver_info.get('driver_dir_name'):
- tps_name = self.driver_info['driver_dir_name'] + ':target_ports'
+ if getattr(self, 'is_secondary', False):
+ tps_name = self.driver_info[
+ 'driver_dir_name'] + ':remote_target_ports'
+ else:
+ tps_name = self.driver_info[
+ 'driver_dir_name'] + ':target_ports'
else:
return target_ports
@@ -946,7 +1083,7 @@ class HBSDCommon():
volume = volume['volume']
for port in tpsset:
if port not in target_ports:
- utils.output_log(
+ self.output_log(
MSG.INVALID_EXTRA_SPEC_KEY_PORT,
port=port, target_ports_param=tps_name,
volume_type=volume['volume_type']['name'])
@@ -958,7 +1095,7 @@ class HBSDCommon():
def unmanage_snapshot(self, snapshot):
"""Output error message and raise NotImplementedError."""
- utils.output_log(
+ self.output_log(
MSG.SNAPSHOT_UNMANAGE_FAILED, snapshot_id=snapshot['id'])
raise NotImplementedError()
@@ -980,8 +1117,8 @@ class HBSDCommon():
def revert_to_snapshot(self, volume, snapshot):
"""Rollback the specified snapshot."""
- pvol = utils.get_ldev(volume)
- svol = utils.get_ldev(snapshot)
+ pvol = self.get_ldev(volume)
+ svol = self.get_ldev(snapshot)
if (pvol is not None and
svol is not None and
self.has_snap_pair(pvol, svol)):
@@ -1008,20 +1145,65 @@ class HBSDCommon():
def delete_group_snapshot(self, group_snapshot, snapshots):
raise NotImplementedError()
+ def output_log(self, msg_enum, **kwargs):
+ if self.storage_id is not None:
+ return utils.output_log(
+ msg_enum, storage_id=self.storage_id, **kwargs)
+ else:
+ return utils.output_log(msg_enum, **kwargs)
+
+ def get_ldev(self, obj, both=False):
+ if not obj:
+ return None
+ provider_location = obj.get('provider_location')
+ if not provider_location:
+ return None
+ if provider_location.isdigit():
+ return int(provider_location)
+ if provider_location.startswith('{'):
+ loc = json.loads(provider_location)
+ if isinstance(loc, dict):
+ if getattr(self, 'is_primary', False) or (
+ hasattr(self, 'primary_storage_id') and not both):
+ return None if 'pldev' not in loc else int(loc['pldev'])
+ elif getattr(self, 'is_secondary', False):
+ return None if 'sldev' not in loc else int(loc['sldev'])
+ if hasattr(self, 'primary_storage_id'):
+ return {key: loc.get(key) for key in ['pldev', 'sldev']}
+ return None
+
+ def check_opt_value(self, conf, names):
+ """Check if the parameter names and values are valid."""
+ for name in names:
+ try:
+ getattr(conf, name)
+ except (cfg.NoSuchOptError, cfg.ConfigFileValueError):
+ with excutils.save_and_reraise_exception():
+ self.output_log(MSG.INVALID_PARAMETER, param=name)
+
def check_opts(self, conf, opts):
"""Check if the specified configuration is valid."""
names = []
for opt in opts:
if opt.required and not conf.safe_get(opt.name):
- msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt.name)
+ msg = self.output_log(MSG.INVALID_PARAMETER, param=opt.name)
self.raise_error(msg)
names.append(opt.name)
- utils.check_opt_value(conf, names)
+ self.check_opt_value(conf, names)
+
+ def get_volume_extra_specs(self, volume):
+ if volume is None:
+ return {}
+ type_id = volume.get('volume_type_id', None)
+ if type_id is None:
+ return {}
+
+ return volume_types.get_volume_type_extra_specs(type_id)
def require_target_existed(self, targets):
"""Check if the target list includes one or more members."""
if not targets['list']:
- msg = utils.output_log(MSG.NO_CONNECTED_TARGET)
+ msg = self.output_log(MSG.NO_CONNECTED_TARGET)
self.raise_error(msg)
def raise_error(self, msg):
diff --git a/cinder/volume/drivers/hitachi/hbsd_fc.py b/cinder/volume/drivers/hitachi/hbsd_fc.py
index 658f982e5..4ebee7ae2 100644
--- a/cinder/volume/drivers/hitachi/hbsd_fc.py
+++ b/cinder/volume/drivers/hitachi/hbsd_fc.py
@@ -21,6 +21,7 @@ from oslo_utils import excutils
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common as common
+from cinder.volume.drivers.hitachi import hbsd_replication as replication
from cinder.volume.drivers.hitachi import hbsd_rest as rest
from cinder.volume.drivers.hitachi import hbsd_rest_fc as rest_fc
from cinder.volume.drivers.hitachi import hbsd_utils as utils
@@ -51,6 +52,8 @@ _DRIVER_INFO = {
'nvol_ldev_type': utils.NVOL_LDEV_TYPE,
'target_iqn_suffix': utils.TARGET_IQN_SUFFIX,
'pair_attr': utils.PAIR_ATTR,
+ 'mirror_attr': utils.MIRROR_ATTR,
+ 'driver_impl_class': rest_fc.HBSDRESTFC,
}
@@ -73,6 +76,10 @@ class HBSDFCDriver(driver.FibreChannelDriver):
2.2.3 - Add port scheduler.
2.3.0 - Support multi pool.
2.3.1 - Update retype and support storage assisted migration.
+ 2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
+ 2.3.3 - Add GAD volume support.
+ 2.3.4 - Support data deduplication and compression.
+ 2.3.5 - Fix key error when backend is down.
"""
@@ -81,6 +88,8 @@ class HBSDFCDriver(driver.FibreChannelDriver):
# ThirdPartySystems wiki page
CI_WIKI_NAME = utils.CI_WIKI_NAME
+ driver_info = dict(_DRIVER_INFO)
+
def __init__(self, *args, **kwargs):
"""Initialize instance variables."""
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
@@ -89,13 +98,25 @@ class HBSDFCDriver(driver.FibreChannelDriver):
super(HBSDFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
+ self.configuration.append_config_values(common.COMMON_PAIR_OPTS)
self.configuration.append_config_values(common.COMMON_PORT_OPTS)
+ self.configuration.append_config_values(common.COMMON_NAME_OPTS)
self.configuration.append_config_values(rest_fc.FC_VOLUME_OPTS)
+ self.configuration.append_config_values(
+ replication.COMMON_MIRROR_OPTS)
os.environ['LANG'] = 'C'
- self.common = self._init_common(self.configuration, kwargs.get('db'))
-
- def _init_common(self, conf, db):
- return rest_fc.HBSDRESTFC(conf, _DRIVER_INFO, db)
+ kwargs.setdefault('driver_info', _DRIVER_INFO)
+ self.driver_info = dict(kwargs['driver_info'])
+ self.driver_info['driver_class'] = self.__class__
+ if self.configuration.safe_get('hitachi_mirror_storage_id'):
+ self.common = replication.HBSDREPLICATION(
+ self.configuration, self.driver_info, kwargs.get('db'))
+ elif not hasattr(self, '_init_common'):
+ self.common = self.driver_info['driver_impl_class'](
+ self.configuration, self.driver_info, kwargs.get('db'))
+ else:
+ self.common = self._init_common(
+ self.configuration, kwargs.get('db'))
@staticmethod
def get_driver_options():
@@ -106,8 +127,17 @@ class HBSDFCDriver(driver.FibreChannelDriver):
'san_api_port', ]))
return (common.COMMON_VOLUME_OPTS +
common.COMMON_PORT_OPTS +
+ common.COMMON_PAIR_OPTS +
+ common.COMMON_NAME_OPTS +
rest.REST_VOLUME_OPTS +
+ rest.REST_PAIR_OPTS +
rest_fc.FC_VOLUME_OPTS +
+ replication._REP_OPTS +
+ replication.COMMON_MIRROR_OPTS +
+ replication.ISCSI_MIRROR_OPTS +
+ replication.REST_MIRROR_OPTS +
+ replication.REST_MIRROR_API_OPTS +
+ replication.REST_MIRROR_SSL_OPTS +
additional_opts)
def check_for_setup_error(self):
@@ -184,7 +214,7 @@ class HBSDFCDriver(driver.FibreChannelDriver):
@volume_utils.trace
def manage_existing_get_size(self, volume, existing_ref):
"""Return the size[GB] of the specified volume."""
- return self.common.manage_existing_get_size(existing_ref)
+ return self.common.manage_existing_get_size(volume, existing_ref)
@volume_utils.trace
def unmanage(self, volume):
diff --git a/cinder/volume/drivers/hitachi/hbsd_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_iscsi.py
index d29d01ec5..d06f665cd 100644
--- a/cinder/volume/drivers/hitachi/hbsd_iscsi.py
+++ b/cinder/volume/drivers/hitachi/hbsd_iscsi.py
@@ -21,6 +21,7 @@ from oslo_utils import excutils
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.hitachi import hbsd_common as common
+from cinder.volume.drivers.hitachi import hbsd_replication as replication
from cinder.volume.drivers.hitachi import hbsd_rest as rest
from cinder.volume.drivers.hitachi import hbsd_rest_iscsi as rest_iscsi
from cinder.volume.drivers.hitachi import hbsd_utils as utils
@@ -51,6 +52,8 @@ _DRIVER_INFO = {
'nvol_ldev_type': utils.NVOL_LDEV_TYPE,
'target_iqn_suffix': utils.TARGET_IQN_SUFFIX,
'pair_attr': utils.PAIR_ATTR,
+ 'mirror_attr': utils.MIRROR_ATTR,
+ 'driver_impl_class': rest_iscsi.HBSDRESTISCSI,
}
@@ -73,6 +76,10 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
2.2.3 - Add port scheduler.
2.3.0 - Support multi pool.
2.3.1 - Update retype and support storage assisted migration.
+ 2.3.2 - Add specifies format of the names HostGroups/iSCSI Targets.
+ 2.3.3 - Add GAD volume support.
+ 2.3.4 - Support data deduplication and compression.
+ 2.3.5 - Fix key error when backend is down.
"""
@@ -81,6 +88,8 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
# ThirdPartySystems wiki page
CI_WIKI_NAME = utils.CI_WIKI_NAME
+ driver_info = dict(_DRIVER_INFO)
+
def __init__(self, *args, **kwargs):
"""Initialize instance variables."""
utils.output_log(MSG.DRIVER_INITIALIZATION_START,
@@ -89,11 +98,23 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
super(HBSDISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common.COMMON_VOLUME_OPTS)
+ self.configuration.append_config_values(common.COMMON_PAIR_OPTS)
+ self.configuration.append_config_values(common.COMMON_NAME_OPTS)
+ self.configuration.append_config_values(
+ replication.COMMON_MIRROR_OPTS)
os.environ['LANG'] = 'C'
- self.common = self._init_common(self.configuration, kwargs.get('db'))
-
- def _init_common(self, conf, db):
- return rest_iscsi.HBSDRESTISCSI(conf, _DRIVER_INFO, db)
+ kwargs.setdefault('driver_info', _DRIVER_INFO)
+ self.driver_info = dict(kwargs['driver_info'])
+ self.driver_info['driver_class'] = self.__class__
+ if self.configuration.safe_get('hitachi_mirror_storage_id'):
+ self.common = replication.HBSDREPLICATION(
+ self.configuration, self.driver_info, kwargs.get('db'))
+ elif not hasattr(self, '_init_common'):
+ self.common = self.driver_info['driver_impl_class'](
+ self.configuration, self.driver_info, kwargs.get('db'))
+ else:
+ self.common = self._init_common(
+ self.configuration, kwargs.get('db'))
@staticmethod
def get_driver_options():
@@ -103,7 +124,16 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
['driver_ssl_cert_verify', 'driver_ssl_cert_path',
'san_api_port', ]))
return (common.COMMON_VOLUME_OPTS +
+ common.COMMON_PAIR_OPTS +
+ common.COMMON_NAME_OPTS +
rest.REST_VOLUME_OPTS +
+ rest.REST_PAIR_OPTS +
+ replication._REP_OPTS +
+ replication.COMMON_MIRROR_OPTS +
+ replication.ISCSI_MIRROR_OPTS +
+ replication.REST_MIRROR_OPTS +
+ replication.REST_MIRROR_API_OPTS +
+ replication.REST_MIRROR_SSL_OPTS +
additional_opts)
def check_for_setup_error(self):
@@ -180,7 +210,7 @@ class HBSDISCSIDriver(driver.ISCSIDriver):
@volume_utils.trace
def manage_existing_get_size(self, volume, existing_ref):
"""Return the size[GB] of the specified volume."""
- return self.common.manage_existing_get_size(existing_ref)
+ return self.common.manage_existing_get_size(volume, existing_ref)
@volume_utils.trace
def unmanage(self, volume):
diff --git a/cinder/volume/drivers/hitachi/hbsd_replication.py b/cinder/volume/drivers/hitachi/hbsd_replication.py
new file mode 100644
index 000000000..b296d8537
--- /dev/null
+++ b/cinder/volume/drivers/hitachi/hbsd_replication.py
@@ -0,0 +1,989 @@
+# Copyright (C) 2022, 2023, Hitachi, Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+"""replication module for Hitachi HBSD Driver."""
+
+import json
+
+from eventlet import greenthread
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_utils import timeutils
+
+from cinder import exception
+from cinder.volume.drivers.hitachi import hbsd_common as common
+from cinder.volume.drivers.hitachi import hbsd_rest as rest
+from cinder.volume.drivers.hitachi import hbsd_utils as utils
+from cinder.zonemanager import utils as fczm_utils
+
+_REP_STATUS_CHECK_SHORT_INTERVAL = 5
+_REP_STATUS_CHECK_LONG_INTERVAL = 10 * 60
+_REP_STATUS_CHECK_TIMEOUT = 24 * 60 * 60
+
+_WAIT_PAIR = 1
+_WAIT_PSUS = 2
+
+_REP_OPTS = [
+ cfg.IntOpt(
+ 'hitachi_replication_status_check_short_interval',
+ default=_REP_STATUS_CHECK_SHORT_INTERVAL,
+ help='Initial interval at which remote replication pair status is '
+ 'checked'),
+ cfg.IntOpt(
+ 'hitachi_replication_status_check_long_interval',
+ default=_REP_STATUS_CHECK_LONG_INTERVAL,
+ help='Interval at which remote replication pair status is checked. '
+ 'This parameter is applied if the status has not changed to the '
+ 'expected status after the time indicated by this parameter has '
+ 'elapsed.'),
+ cfg.IntOpt(
+ 'hitachi_replication_status_check_timeout',
+ default=_REP_STATUS_CHECK_TIMEOUT,
+ help='Maximum wait time before the remote replication pair status '
+ 'changes to the expected status'),
+ cfg.IntOpt(
+ 'hitachi_path_group_id',
+ default=0, min=0, max=255,
+ help='Path group ID assigned to the remote connection for remote '
+ 'replication'),
+ cfg.IntOpt(
+ 'hitachi_quorum_disk_id',
+ min=0, max=31,
+ help='ID of the Quorum disk used for global-active device'),
+ cfg.IntOpt(
+ 'hitachi_replication_copy_speed',
+ min=1, max=15, default=3,
+ help='Remote copy speed of storage system. 1 or 2 indicates '
+ 'low speed, 3 indicates middle speed, and a value between 4 and '
+ '15 indicates high speed.'),
+ cfg.BoolOpt(
+ 'hitachi_set_mirror_reserve_attribute',
+ default=True,
+ help='Whether or not to set the mirror reserve attribute'),
+ cfg.IntOpt(
+ 'hitachi_replication_number',
+ default=0, min=0, max=255,
+ help='Instance number for REST API'),
+]
+
+COMMON_MIRROR_OPTS = [
+ cfg.StrOpt(
+ 'hitachi_mirror_storage_id',
+ default=None,
+ help='ID of secondary storage system'),
+ cfg.StrOpt(
+ 'hitachi_mirror_pool',
+ default=None,
+ help='Pool of secondary storage system'),
+ cfg.StrOpt(
+ 'hitachi_mirror_snap_pool',
+ default=None,
+ help='Thin pool of secondary storage system'),
+ cfg.StrOpt(
+ 'hitachi_mirror_ldev_range',
+ default=None,
+ help='Logical device range of secondary storage system'),
+ cfg.ListOpt(
+ 'hitachi_mirror_target_ports',
+ default=[],
+ help='Target port names for host group or iSCSI target'),
+ cfg.ListOpt(
+ 'hitachi_mirror_compute_target_ports',
+ default=[],
+ help=(
+ 'Target port names of compute node '
+ 'for host group or iSCSI target')),
+ cfg.IntOpt(
+ 'hitachi_mirror_pair_target_number',
+ min=0, max=99, default=0,
+ help='Pair target name of the host group or iSCSI target'),
+]
+
+ISCSI_MIRROR_OPTS = [
+ cfg.BoolOpt(
+ 'hitachi_mirror_use_chap_auth',
+ default=False,
+ help='Whether or not to use iSCSI authentication'),
+ cfg.StrOpt(
+ 'hitachi_mirror_auth_user',
+ default=None,
+ help='iSCSI authentication username'),
+ cfg.StrOpt(
+ 'hitachi_mirror_auth_password',
+ default=None,
+ secret=True,
+ help='iSCSI authentication password'),
+]
+
+REST_MIRROR_OPTS = [
+ cfg.ListOpt(
+ 'hitachi_mirror_rest_pair_target_ports',
+ default=[],
+ help='Target port names for pair of the host group or iSCSI target'),
+]
+
+REST_MIRROR_API_OPTS = [
+ cfg.StrOpt(
+ 'hitachi_mirror_rest_user',
+ default=None,
+ help='Username of secondary storage system for REST API'),
+ cfg.StrOpt(
+ 'hitachi_mirror_rest_password',
+ default=None,
+ secret=True,
+ help='Password of secondary storage system for REST API'),
+ cfg.StrOpt(
+ 'hitachi_mirror_rest_api_ip',
+ default=None,
+ help='IP address of REST API server'),
+ cfg.PortOpt(
+ 'hitachi_mirror_rest_api_port',
+ default=443,
+ help='Port number of REST API server'),
+]
+
+REST_MIRROR_SSL_OPTS = [
+ cfg.BoolOpt('hitachi_mirror_ssl_cert_verify',
+ default=False,
+ help='If set to True the http client will validate the SSL '
+ 'certificate of the backend endpoint.'),
+ cfg.StrOpt('hitachi_mirror_ssl_cert_path',
+ help='Can be used to specify a non default path to a '
+ 'CA_BUNDLE file or directory with certificates of '
+ 'trusted CAs, which will be used to validate the backend'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(_REP_OPTS)
+CONF.register_opts(COMMON_MIRROR_OPTS)
+CONF.register_opts(ISCSI_MIRROR_OPTS)
+CONF.register_opts(REST_MIRROR_OPTS)
+CONF.register_opts(REST_MIRROR_API_OPTS)
+CONF.register_opts(REST_MIRROR_SSL_OPTS)
+
+LOG = logging.getLogger(__name__)
+
+MSG = utils.HBSDMsg
+
+
+def _pack_rep_provider_location(pldev=None, sldev=None, rep_type=None):
+ provider_location = {}
+ if pldev is not None:
+ provider_location['pldev'] = pldev
+ if sldev is not None:
+ provider_location['sldev'] = sldev
+ if rep_type is not None:
+ provider_location['remote-copy'] = rep_type
+ return json.dumps(provider_location)
+
+
+def _delays(short_interval, long_interval, timeout):
+ start_time = timeutils.utcnow()
+ watch = timeutils.StopWatch()
+ i = 0
+ while True:
+ watch.restart()
+ yield i
+ if utils.timed_out(start_time, timeout):
+ raise StopIteration()
+ watch.stop()
+ interval = long_interval if utils.timed_out(
+ start_time, long_interval) else short_interval
+ idle = max(interval - watch.elapsed(), 0)
+ greenthread.sleep(idle)
+ i += 1
+
+
+class HBSDREPLICATION(rest.HBSDREST):
+
+ def __init__(self, conf, driverinfo, db):
+ super(HBSDREPLICATION, self).__init__(conf, driverinfo, db)
+ conf.append_config_values(_REP_OPTS)
+ if driverinfo['proto'] == 'iSCSI':
+ conf.append_config_values(ISCSI_MIRROR_OPTS)
+ conf.append_config_values(REST_MIRROR_OPTS)
+ conf.append_config_values(REST_MIRROR_API_OPTS)
+ conf.append_config_values(REST_MIRROR_SSL_OPTS)
+ driver_impl_class = self.driver_info['driver_impl_class']
+ self.primary = driver_impl_class(conf, driverinfo, db)
+ self.rep_primary = self.primary
+ self.rep_primary.is_primary = True
+ self.rep_primary.storage_id = conf.safe_get(
+ self.driver_info['param_prefix'] + '_storage_id') or ''
+ self.primary_storage_id = self.rep_primary.storage_id
+ self.secondary = driver_impl_class(conf, driverinfo, db)
+ self.rep_secondary = self.secondary
+ self.rep_secondary.is_secondary = True
+ self.rep_secondary.storage_id = (
+ conf.safe_get(
+ self.driver_info['param_prefix'] + '_mirror_storage_id') or '')
+ self.secondary_storage_id = self.rep_secondary.storage_id
+ self.instances = self.rep_primary, self.rep_secondary
+ self._LDEV_NAME = self.driver_info['driver_prefix'] + '-LDEV-%d-%d'
+
+ def update_mirror_conf(self, conf, opts):
+ for opt in opts:
+ name = opt.name.replace('hitachi_mirror_', 'hitachi_')
+ try:
+ setattr(conf, name, getattr(conf, opt.name))
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.rep_secondary.output_log(
+ MSG.INVALID_PARAMETER, param=opt.name)
+
+ def _replace_with_mirror_conf(self):
+ conf = self.conf
+ new_conf = utils.Config(conf)
+ self.rep_secondary.conf = new_conf
+ self.update_mirror_conf(new_conf, COMMON_MIRROR_OPTS)
+ self.update_mirror_conf(new_conf, REST_MIRROR_OPTS)
+ if self.rep_secondary.driver_info['volume_type'] == 'iscsi':
+ self.update_mirror_conf(new_conf, ISCSI_MIRROR_OPTS)
+ new_conf.san_login = (
+ conf.safe_get(self.driver_info['param_prefix'] +
+ '_mirror_rest_user'))
+ new_conf.san_password = (
+ conf.safe_get(self.driver_info['param_prefix'] +
+ '_mirror_rest_password'))
+ new_conf.san_ip = (
+ conf.safe_get(self.driver_info['param_prefix'] +
+ '_mirror_rest_api_ip'))
+ new_conf.san_api_port = (
+ conf.safe_get(self.driver_info['param_prefix'] +
+ '_mirror_rest_api_port'))
+ new_conf.driver_ssl_cert_verify = (
+ conf.safe_get(self.driver_info['param_prefix'] +
+ '_mirror_ssl_cert_verify'))
+ new_conf.driver_ssl_cert_path = (
+ conf.safe_get(self.driver_info['param_prefix'] +
+ '_mirror_ssl_cert_path'))
+
+ def do_setup(self, context):
+ """Prepare for the startup of the driver."""
+ self.rep_primary = self.primary
+ self.rep_secondary = self.secondary
+ self.ctxt = context
+ try:
+ self.rep_primary.do_setup(context)
+ self.client = self.rep_primary.client
+ except Exception:
+ self.rep_primary.output_log(
+ MSG.SITE_INITIALIZATION_FAILED, site='primary')
+ self.rep_primary = None
+ try:
+ self._replace_with_mirror_conf()
+ self.rep_secondary.do_setup(context)
+ except Exception:
+ self.rep_secondary.output_log(
+ MSG.SITE_INITIALIZATION_FAILED, site='secondary')
+ if not self.rep_primary:
+ raise
+ self.rep_secondary = None
+
+ def update_volume_stats(self):
+ """Update properties, capabilities and current states of the driver."""
+ if self.rep_primary:
+ data = self.rep_primary.update_volume_stats()
+ else:
+ data = self.rep_secondary.update_volume_stats()
+ return data
+
+ def _require_rep_primary(self):
+ if not self.rep_primary:
+ msg = utils.output_log(
+ MSG.SITE_NOT_INITIALIZED, storage_id=self.primary_storage_id,
+ site='primary')
+ self.raise_error(msg)
+
+ def _require_rep_secondary(self):
+ if not self.rep_secondary:
+ msg = utils.output_log(
+ MSG.SITE_NOT_INITIALIZED, storage_id=self.secondary_storage_id,
+ site='secondary')
+ self.raise_error(msg)
+
+ def _is_mirror_spec(self, extra_specs):
+ topology = None
+ if not extra_specs:
+ return False
+ if self.driver_info.get('driver_dir_name'):
+ topology = extra_specs.get(
+ self.driver_info['driver_dir_name'] + ':topology')
+ if topology is None:
+ return False
+ elif topology == 'active_active_mirror_volume':
+ return True
+ else:
+ msg = self.rep_primary.output_log(
+ MSG.INVALID_EXTRA_SPEC_KEY,
+ key=self.driver_info['driver_dir_name'] + ':topology',
+ value=topology)
+ self.raise_error(msg)
+
+ def _create_rep_ldev(self, volume, extra_specs, rep_type, pvol=None):
+ """Create a primary volume and a secondary volume."""
+ pool_id = self.rep_secondary.storage_info['pool_id'][0]
+ ldev_range = self.rep_secondary.storage_info['ldev_range']
+ thread = greenthread.spawn(
+ self.rep_secondary.create_ldev, volume.size, extra_specs,
+ pool_id, ldev_range)
+ if pvol is None:
+ try:
+ pool_id = self.rep_primary.get_pool_id_of_volume(volume)
+ ldev_range = self.rep_primary.storage_info['ldev_range']
+ pvol = self.rep_primary.create_ldev(volume.size,
+ extra_specs,
+ pool_id, ldev_range)
+ except exception.VolumeDriverException:
+ self.rep_primary.output_log(MSG.CREATE_LDEV_FAILED)
+ try:
+ svol = thread.wait()
+ except Exception:
+ self.rep_secondary.output_log(MSG.CREATE_LDEV_FAILED)
+ svol = None
+ if pvol is None or svol is None:
+ for vol, type_, instance in zip((pvol, svol), ('P-VOL', 'S-VOL'),
+ self.instances):
+ if vol is None:
+ msg = instance.output_log(
+ MSG.CREATE_REPLICATION_VOLUME_FAILED,
+ type=type_, rep_type=rep_type,
+ volume_id=volume.id,
+ volume_type=volume.volume_type.name, size=volume.size)
+ else:
+ instance.delete_ldev(vol)
+ self.raise_error(msg)
+ thread = greenthread.spawn(
+ self.rep_secondary.modify_ldev_name,
+ svol, volume['id'].replace("-", ""))
+ try:
+ self.rep_primary.modify_ldev_name(
+ pvol, volume['id'].replace("-", ""))
+ finally:
+ thread.wait()
+ return pvol, svol
+
+ def _create_rep_copy_group_name(self, ldev):
+ return self.driver_info['target_prefix'] + '%s%02XU%02d' % (
+ CONF.my_ip, self.conf.hitachi_replication_number, ldev >> 10)
+
+ def _get_rep_copy_speed(self):
+ rep_copy_speed = self.rep_primary.conf.safe_get(
+ self.driver_info['param_prefix'] + '_replication_copy_speed')
+ if rep_copy_speed:
+ return rep_copy_speed
+ else:
+ return self.rep_primary.conf.hitachi_copy_speed
+
+ def _get_wait_pair_status_change_params(self, wait_type):
+ """Get a replication pair status information."""
+ _wait_pair_status_change_params = {
+ _WAIT_PAIR: {
+ 'instance': self.rep_primary,
+ 'remote_client': self.rep_secondary.client,
+ 'is_secondary': False,
+ 'transitional_status': ['COPY'],
+ 'expected_status': ['PAIR', 'PFUL'],
+ 'msgid': MSG.CREATE_REPLICATION_PAIR_FAILED,
+ 'status_keys': ['pvolStatus', 'svolStatus'],
+ },
+ _WAIT_PSUS: {
+ 'instance': self.rep_primary,
+ 'remote_client': self.rep_secondary.client,
+ 'is_secondary': False,
+ 'transitional_status': ['PAIR', 'PFUL'],
+ 'expected_status': ['PSUS', 'SSUS'],
+ 'msgid': MSG.SPLIT_REPLICATION_PAIR_FAILED,
+ 'status_keys': ['pvolStatus', 'svolStatus'],
+ }
+ }
+ return _wait_pair_status_change_params[wait_type]
+
+ def _wait_pair_status_change(self, copy_group_name, pvol, svol,
+ rep_type, wait_type):
+ """Wait until the replication pair status changes to the specified
+
+ status.
+ """
+ for _ in _delays(
+ self.conf.hitachi_replication_status_check_short_interval,
+ self.conf.hitachi_replication_status_check_long_interval,
+ self.conf.hitachi_replication_status_check_timeout):
+ params = self._get_wait_pair_status_change_params(wait_type)
+ status = params['instance'].client.get_remote_copypair(
+ params['remote_client'], copy_group_name, pvol, svol,
+ is_secondary=params['is_secondary'])
+ statuses = [status.get(status_key) for status_key in
+ params['status_keys']]
+ unexpected_status_set = (set(statuses) -
+ set(params['expected_status']))
+ if not unexpected_status_set:
+ break
+ if unexpected_status_set.issubset(
+ set(params['transitional_status'])):
+ continue
+ msg = params['instance'].output_log(
+ params['msgid'], rep_type=rep_type, pvol=pvol, svol=svol,
+ copy_group=copy_group_name, status='/'.join(statuses))
+ self.raise_error(msg)
+ else:
+ status = params['instance'].client.get_remote_copypair(
+ params['remote_client'], copy_group_name, pvol, svol,
+ is_secondary=params['is_secondary'])
+ msg = params['instance'].output_log(
+ MSG.PAIR_CHANGE_TIMEOUT,
+ rep_type=rep_type, pvol=pvol, svol=svol,
+ copy_group=copy_group_name, current_status='/'.join(statuses),
+ expected_status=str(params['expected_status']),
+ timeout=self.conf.hitachi_replication_status_check_timeout)
+ self.raise_error(msg)
+
+ def _create_rep_pair(self, volume, pvol, svol, rep_type,
+ do_initialcopy=True):
+ """Create a replication pair."""
+ copy_group_name = self._create_rep_copy_group_name(pvol)
+
+ @utils.synchronized_on_copy_group()
+ def inner(self, remote_client, copy_group_name, secondary_storage_id,
+ conf, copyPace, parent):
+ is_new_copy_grp = True
+ result = self.get_remote_copy_grps(remote_client)
+ if result:
+ for data in result:
+ if copy_group_name == data['copyGroupName']:
+ is_new_copy_grp = False
+ break
+ body = {
+ 'copyGroupName': copy_group_name,
+ 'copyPairName': parent._LDEV_NAME % (pvol, svol),
+ 'replicationType': rep_type,
+ 'remoteStorageDeviceId': secondary_storage_id,
+ 'pvolLdevId': pvol,
+ 'svolLdevId': svol,
+ 'pathGroupId': conf.hitachi_path_group_id,
+ 'localDeviceGroupName': copy_group_name + 'P',
+ 'remoteDeviceGroupName': copy_group_name + 'S',
+ 'isNewGroupCreation': is_new_copy_grp,
+ 'doInitialCopy': do_initialcopy,
+ 'isDataReductionForceCopy': False
+ }
+ if rep_type == parent.driver_info['mirror_attr']:
+ body['quorumDiskId'] = conf.hitachi_quorum_disk_id
+ body['copyPace'] = copyPace
+ if is_new_copy_grp:
+ body['muNumber'] = 0
+ self.add_remote_copypair(remote_client, body)
+
+ inner(
+ self.rep_primary.client, self.rep_secondary.client,
+ copy_group_name, self.rep_secondary.storage_id,
+ self.rep_secondary.conf, self._get_rep_copy_speed(),
+ self)
+ self._wait_pair_status_change(
+ copy_group_name, pvol, svol, rep_type, _WAIT_PAIR)
+
+ def _create_rep_ldev_and_pair(
+ self, volume, extra_specs, rep_type, pvol=None):
+ """Create volume and Replication pair."""
+ capacity_saving = None
+ if self.driver_info.get('driver_dir_name'):
+ capacity_saving = extra_specs.get(
+ self.driver_info['driver_dir_name'] + ':capacity_saving')
+ if capacity_saving == 'deduplication_compression':
+ msg = self.output_log(
+ MSG.DEDUPLICATION_IS_ENABLED,
+ rep_type=rep_type, volume_id=volume.id,
+ volume_type=volume.volume_type.name, size=volume.size)
+ if pvol is not None:
+ self.rep_primary.delete_ldev(pvol)
+ self.raise_error(msg)
+ svol = None
+ pvol, svol = self._create_rep_ldev(volume, extra_specs, rep_type, pvol)
+ try:
+ thread = greenthread.spawn(
+ self.rep_secondary.initialize_pair_connection, svol)
+ try:
+ self.rep_primary.initialize_pair_connection(pvol)
+ finally:
+ thread.wait()
+ if self.rep_primary.conf.\
+ hitachi_set_mirror_reserve_attribute:
+ self.rep_secondary.client.assign_virtual_ldevid(svol)
+ self._create_rep_pair(volume, pvol, svol, rep_type)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if svol is not None:
+ self.rep_secondary.terminate_pair_connection(svol)
+ if self.rep_primary.conf.\
+ hitachi_set_mirror_reserve_attribute:
+ self.rep_secondary.client.unassign_virtual_ldevid(
+ svol)
+ self.rep_secondary.delete_ldev(svol)
+ if pvol is not None:
+ self.rep_primary.terminate_pair_connection(pvol)
+ self.rep_primary.delete_ldev(pvol)
+ return pvol, svol
+
+ def create_volume(self, volume):
+ """Create a volume from a volume or snapshot and return its properties.
+
+ """
+ self._require_rep_primary()
+ extra_specs = self.rep_primary.get_volume_extra_specs(volume)
+ if self._is_mirror_spec(extra_specs):
+ self._require_rep_secondary()
+ rep_type = self.driver_info['mirror_attr']
+ pldev, sldev = self._create_rep_ldev_and_pair(
+ volume, extra_specs, rep_type)
+ provider_location = _pack_rep_provider_location(
+ pldev, sldev, rep_type)
+ return {
+ 'provider_location': provider_location
+ }
+ return self.rep_primary.create_volume(volume)
+
+ def _has_rep_pair(self, ldev):
+ ldev_info = self.rep_primary.get_ldev_info(
+ ['status', 'attributes'], ldev)
+ return (ldev_info['status'] == rest.NORMAL_STS and
+ self.driver_info['mirror_attr'] in ldev_info['attributes'])
+
+ def _get_rep_pair_info(self, pldev):
+ """Return replication pair info."""
+ pair_info = {}
+ if not self._has_rep_pair(pldev):
+ return pair_info
+ self._require_rep_secondary()
+ copy_group_name = self._create_rep_copy_group_name(pldev)
+ pairs = self.rep_primary.client.get_remote_copy_grp(
+ self.rep_secondary.client,
+ copy_group_name).get('copyPairs', [])
+ for pair in pairs:
+ if (pair.get('replicationType') in
+ [self.driver_info['mirror_attr']] and
+ pair['pvolLdevId'] == pldev):
+ break
+ else:
+ return pair_info
+ pair_info['pvol'] = pldev
+ pair_info['svol_info'] = [{
+ 'ldev': pair.get('svolLdevId'),
+ 'rep_type': pair.get('replicationType'),
+ 'is_psus': pair.get('svolStatus') in ['SSUS', 'PFUS'],
+ 'pvol_status': pair.get('pvolStatus'),
+ 'svol_status': pair.get('svolStatus')}]
+ return pair_info
+
+ def _split_rep_pair(self, pvol, svol):
+ copy_group_name = self._create_rep_copy_group_name(pvol)
+ rep_type = self.driver_info['mirror_attr']
+ self.rep_primary.client.split_remote_copypair(
+ self.rep_secondary.client, copy_group_name, pvol, svol, rep_type)
+ self._wait_pair_status_change(
+ copy_group_name, pvol, svol, rep_type, _WAIT_PSUS)
+
+ def _delete_rep_pair(self, pvol, svol):
+ """Delete a replication pair."""
+ copy_group_name = self._create_rep_copy_group_name(pvol)
+ self._split_rep_pair(pvol, svol)
+ self.rep_primary.client.delete_remote_copypair(
+ self.rep_secondary.client, copy_group_name, pvol, svol)
+
+ def delete_volume(self, volume):
+ """Delete the specified volume."""
+ self._require_rep_primary()
+ ldev = self.rep_primary.get_ldev(volume)
+ if ldev is None:
+ self.rep_primary.output_log(
+ MSG.INVALID_LDEV_FOR_DELETION, method='delete_volume',
+ id=volume.id)
+ return
+ pair_info = self._get_rep_pair_info(ldev)
+ if pair_info:
+ self._delete_rep_pair(
+ pair_info['pvol'], pair_info['svol_info'][0]['ldev'])
+ thread = greenthread.spawn(
+ self.rep_secondary.delete_volume, volume)
+ try:
+ self.rep_primary.delete_volume(volume)
+ finally:
+ thread.wait()
+ else:
+ self.rep_primary.delete_volume(volume)
+
+ def delete_ldev(self, ldev):
+ self._require_rep_primary()
+ pair_info = self._get_rep_pair_info(ldev)
+ if pair_info:
+ self._delete_rep_pair(ldev, pair_info['svol_info'][0]['ldev'])
+ th = greenthread.spawn(self.rep_secondary.delete_ldev,
+ pair_info['svol_info'][0]['ldev'])
+ try:
+ self.rep_primary.delete_ldev(ldev)
+ finally:
+ th.wait()
+ else:
+ self.rep_primary.delete_ldev(ldev)
+
+ def _create_rep_volume_from_src(
+ self, volume, extra_specs, src, src_type, operation):
+ """Create a replication volume from a volume or snapshot and return
+
+ its properties.
+ """
+ rep_type = self.driver_info['mirror_attr']
+ data = self.rep_primary.create_volume_from_src(
+ volume, src, src_type, is_rep=True)
+ new_ldev = self.rep_primary.get_ldev(data)
+ sldev = self._create_rep_ldev_and_pair(
+ volume, extra_specs, rep_type, new_ldev)[1]
+ provider_location = _pack_rep_provider_location(
+ new_ldev, sldev, rep_type)
+ return {
+ 'provider_location': provider_location,
+ }
+
+ def _create_volume_from_src(self, volume, src, src_type):
+ """Create a volume from a volume or snapshot and return its properties.
+
+ """
+ self._require_rep_primary()
+ operation = ('create a volume from a %s' % src_type)
+ extra_specs = self.rep_primary.get_volume_extra_specs(volume)
+ if self._is_mirror_spec(extra_specs):
+ self._require_rep_secondary()
+ return self._create_rep_volume_from_src(
+ volume, extra_specs, src, src_type, operation)
+ return self.rep_primary.create_volume_from_src(volume, src, src_type)
+
+ def create_cloned_volume(self, volume, src_vref):
+ """Create a clone of the specified volume and return its properties."""
+ return self._create_volume_from_src(
+ volume, src_vref, common.STR_VOLUME)
+
+ def create_volume_from_snapshot(self, volume, snapshot):
+ """Create a volume from a snapshot and return its properties."""
+ return self._create_volume_from_src(
+ volume, snapshot, common.STR_SNAPSHOT)
+
+ def create_snapshot(self, snapshot):
+ """Create a snapshot from a volume and return its properties."""
+ self._require_rep_primary()
+ return self.rep_primary.create_snapshot(snapshot)
+
+ def delete_snapshot(self, snapshot):
+ """Delete the specified snapshot."""
+ self._require_rep_primary()
+ self.rep_primary.delete_snapshot(snapshot)
+
+ def _get_remote_copy_mode(self, vol):
+ provider_location = vol.get('provider_location')
+ if not provider_location:
+ return None
+ if provider_location.startswith('{'):
+ loc = json.loads(provider_location)
+ if isinstance(loc, dict):
+ return loc.get('remote-copy')
+ return None
+
+ def _merge_properties(self, prop1, prop2):
+ if prop1 is None:
+ if prop2 is None:
+ return []
+ return prop2
+ elif prop2 is None:
+ return prop1
+ d = dict(prop1)
+ for key in ('target_luns', 'target_wwn', 'target_portals',
+ 'target_iqns'):
+ if key in d:
+ d[key] = d[key] + prop2[key]
+ if 'initiator_target_map' in d:
+ for key2 in d['initiator_target_map']:
+ d['initiator_target_map'][key2] = (
+ d['initiator_target_map'][key2]
+ + prop2['initiator_target_map'][key2])
+ return d
+
+ def initialize_connection_mirror(self, volume, connector):
+ lun = None
+ prop1 = None
+ prop2 = None
+ if self.rep_primary:
+ try:
+ conn_info1 = (
+ self.rep_primary.initialize_connection(
+ volume, connector, is_mirror=True))
+ except Exception as ex:
+ self.rep_primary.output_log(
+ MSG.REPLICATION_VOLUME_OPERATION_FAILED,
+ operation='attach', type='P-VOL',
+ volume_id=volume.id, reason=str(ex))
+ else:
+ prop1 = conn_info1['data']
+ if self.driver_info['volume_type'] == 'fibre_channel':
+ if 'target_lun' in prop1:
+ lun = prop1['target_lun']
+ else:
+ lun = prop1['target_luns'][0]
+ if self.rep_secondary:
+ try:
+ conn_info2 = (
+ self.rep_secondary.initialize_connection(
+ volume, connector, lun=lun, is_mirror=True))
+ except Exception as ex:
+ self.rep_secondary.output_log(
+ MSG.REPLICATION_VOLUME_OPERATION_FAILED,
+ operation='attach', type='S-VOL',
+ volume_id=volume.id, reason=str(ex))
+ if prop1 is None:
+ raise ex
+ else:
+ prop2 = conn_info2['data']
+ conn_info = {
+ 'driver_volume_type': self.driver_info['volume_type'],
+ 'data': self._merge_properties(prop1, prop2),
+ }
+ return conn_info
+
+ def initialize_connection(self, volume, connector, is_snapshot=False):
+ """Initialize connection between the server and the volume."""
+ if (self._get_remote_copy_mode(volume) ==
+ self.driver_info['mirror_attr']):
+ conn_info = self.initialize_connection_mirror(volume, connector)
+ if self.driver_info['volume_type'] == 'fibre_channel':
+ fczm_utils.add_fc_zone(conn_info)
+ return conn_info
+ else:
+ self._require_rep_primary()
+ return self.rep_primary.initialize_connection(
+ volume, connector, is_snapshot)
+
+ def terminate_connection_mirror(self, volume, connector):
+ prop1 = None
+ prop2 = None
+ if self.rep_primary:
+ try:
+ conn_info1 = self.rep_primary.terminate_connection(
+ volume, connector, is_mirror=True)
+ except Exception as ex:
+ self.rep_primary.output_log(
+ MSG.REPLICATION_VOLUME_OPERATION_FAILED,
+ operation='detach', type='P-VOL',
+ volume_id=volume.id, reason=str(ex))
+ raise ex
+ else:
+ if conn_info1:
+ prop1 = conn_info1['data']
+ if self.rep_secondary:
+ try:
+ conn_info2 = self.rep_secondary.terminate_connection(
+ volume, connector, is_mirror=True)
+ except Exception as ex:
+ self.rep_secondary.output_log(
+ MSG.REPLICATION_VOLUME_OPERATION_FAILED,
+ operation='detach', type='S-VOL',
+ volume_id=volume.id, reason=str(ex))
+ raise ex
+ else:
+ if conn_info2:
+ prop2 = conn_info2['data']
+ conn_info = {
+ 'driver_volume_type': self.driver_info['volume_type'],
+ 'data': self._merge_properties(prop1, prop2),
+ }
+ return conn_info
+
+ def terminate_connection(self, volume, connector):
+ """Terminate connection between the server and the volume."""
+ if (self._get_remote_copy_mode(volume) ==
+ self.driver_info['mirror_attr']):
+ conn_info = self.terminate_connection_mirror(volume, connector)
+ if self.driver_info['volume_type'] == 'fibre_channel':
+ fczm_utils.remove_fc_zone(conn_info)
+ return conn_info
+ else:
+ self._require_rep_primary()
+ return self.rep_primary.terminate_connection(volume, connector)
+
+ def _extend_pair_volume(self, volume, new_size, ldev, pair_info):
+ """Extend the specified replication volume to the specified size."""
+ rep_type = self.driver_info['mirror_attr']
+ pvol_info = self.rep_primary.get_ldev_info(
+ ['numOfPorts'], pair_info['pvol'])
+ if pvol_info['numOfPorts'] > 1:
+ msg = self.rep_primary.output_log(
+ MSG.EXTEND_REPLICATION_VOLUME_ERROR,
+ rep_type=rep_type, volume_id=volume.id, ldev=ldev,
+ source_size=volume.size, destination_size=new_size,
+ pvol=pair_info['pvol'], svol='',
+ pvol_num_of_ports=pvol_info['numOfPorts'],
+ svol_num_of_ports='')
+ self.raise_error(msg)
+ self._delete_rep_pair(
+ ldev, pair_info['svol_info'][0]['ldev'])
+ thread = greenthread.spawn(
+ self.rep_secondary.extend_volume, volume, new_size)
+ try:
+ self.rep_primary.extend_volume(volume, new_size)
+ finally:
+ thread.wait()
+ self._create_rep_pair(
+ volume, pair_info['pvol'], pair_info['svol_info'][0]['ldev'],
+ rep_type, do_initialcopy=False)
+
+ def extend_volume(self, volume, new_size):
+ """Extend the specified volume to the specified size."""
+ self._require_rep_primary()
+ ldev = self.rep_primary.get_ldev(volume)
+ if ldev is None:
+ msg = self.rep_primary.output_log(
+ MSG.INVALID_LDEV_FOR_EXTENSION, volume_id=volume.id)
+ self.raise_error(msg)
+ pair_info = self._get_rep_pair_info(ldev)
+ if pair_info:
+ self._extend_pair_volume(volume, new_size, ldev, pair_info)
+ else:
+ self.rep_primary.extend_volume(volume, new_size)
+
+ def manage_existing(self, volume, existing_ref):
+ """Return volume properties which Cinder needs to manage the volume."""
+ self._require_rep_primary()
+ return self.rep_primary.manage_existing(volume, existing_ref)
+
+ def manage_existing_get_size(self, volume, existing_ref):
+ """Return the size[GB] of the specified volume."""
+ self._require_rep_primary()
+ return self.rep_primary.manage_existing_get_size(volume, existing_ref)
+
+ def unmanage(self, volume):
+ """Prepare the volume for removing it from Cinder management."""
+ self._require_rep_primary()
+ ldev = self.rep_primary.get_ldev(volume)
+ if ldev is None:
+ self.rep_primary.output_log(
+ MSG.INVALID_LDEV_FOR_DELETION,
+ method='unmanage', id=volume.id)
+ return
+ if self._has_rep_pair(ldev):
+ msg = self.rep_primary.output_log(
+ MSG.REPLICATION_PAIR_ERROR,
+ operation='unmanage a volume', volume=volume.id,
+ snapshot_info='', ldev=ldev)
+ self.raise_error(msg)
+ self.rep_primary.unmanage(volume)
+
+ def discard_zero_page(self, volume):
+ self._require_rep_primary()
+ ldev = self.rep_primary.get_ldev(volume)
+ if self._has_rep_pair(ldev):
+ self._require_rep_secondary()
+ th = greenthread.spawn(
+ self.rep_secondary.discard_zero_page, volume)
+ try:
+ self.rep_primary.discard_zero_page(volume)
+ finally:
+ th.wait()
+ else:
+ self.rep_primary.discard_zero_page(volume)
+
+ def unmanage_snapshot(self, snapshot):
+ if not self.rep_primary:
+ return self.rep_secondary.unmanage_snapshot(snapshot)
+ else:
+ return self.rep_primary.unmanage_snapshot(snapshot)
+
+ def retype(self, ctxt, volume, new_type, diff, host):
+ self._require_rep_primary()
+ ldev = self.rep_primary.get_ldev(volume)
+ if ldev is None:
+ msg = self.rep_primary.output_log(
+ MSG.INVALID_LDEV_FOR_VOLUME_COPY,
+ type='volume', id=volume.id)
+ self.raise_error(msg)
+ if (self._has_rep_pair(ldev) or
+ self._is_mirror_spec(new_type['extra_specs'])):
+ return False
+ return self.rep_primary.retype(
+ ctxt, volume, new_type, diff, host)
+
+ def migrate_volume(self, volume, host):
+ self._require_rep_primary()
+ ldev = self.rep_primary.get_ldev(volume)
+ if ldev is None:
+ msg = self.rep_primary.output_log(
+ MSG.INVALID_LDEV_FOR_VOLUME_COPY,
+ type='volume', id=volume.id)
+ self.raise_error(msg)
+ if self._get_rep_pair_info(ldev):
+ return False, None
+ else:
+ return self.rep_primary.migrate_volume(volume, host)
+
+ def _resync_rep_pair(self, pvol, svol):
+ copy_group_name = self._create_rep_copy_group_name(pvol)
+ rep_type = self.driver_info['mirror_attr']
+ self.rep_primary.client.resync_remote_copypair(
+ self.rep_secondary.client, copy_group_name, pvol, svol,
+ rep_type, copy_speed=self._get_rep_copy_speed())
+ self._wait_pair_status_change(
+ copy_group_name, pvol, svol, rep_type, _WAIT_PAIR)
+
+ def revert_to_snapshot(self, volume, snapshot):
+ """Rollback the specified snapshot."""
+ self._require_rep_primary()
+ ldev = self.rep_primary.get_ldev(volume)
+ svol = self.rep_primary.get_ldev(snapshot)
+ if None in (ldev, svol):
+ raise NotImplementedError()
+ pair_info = self._get_rep_pair_info(ldev)
+ is_snap = self.rep_primary.has_snap_pair(ldev, svol)
+ if pair_info and is_snap:
+ self._split_rep_pair(pair_info['pvol'],
+ pair_info['svol_info'][0]['ldev'])
+ try:
+ self.rep_primary.revert_to_snapshot(volume, snapshot)
+ finally:
+ if pair_info and is_snap:
+ self._resync_rep_pair(pair_info['pvol'],
+ pair_info['svol_info'][0]['ldev'])
+
+ def create_group(self):
+ self._require_rep_primary()
+ return self.rep_primary.create_group()
+
+ def delete_group(self, group, volumes):
+ self._require_rep_primary()
+ return super(HBSDREPLICATION, self).delete_group(group, volumes)
+
+ def create_group_from_src(
+ self, context, group, volumes, snapshots=None, source_vols=None):
+ self._require_rep_primary()
+ return super(HBSDREPLICATION, self).create_group_from_src(
+ context, group, volumes, snapshots, source_vols)
+
+ def update_group(self, group, add_volumes=None):
+ self._require_rep_primary()
+ return self.rep_primary.update_group(group, add_volumes)
+
+ def create_group_snapshot(self, context, group_snapshot, snapshots):
+ self._require_rep_primary()
+ return self.rep_primary.create_group_snapshot(
+ context, group_snapshot, snapshots)
+
+ def delete_group_snapshot(self, group_snapshot, snapshots):
+ self._require_rep_primary()
+ return self.rep_primary.delete_group_snapshot(
+ group_snapshot, snapshots)
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest.py b/cinder/volume/drivers/hitachi/hbsd_rest.py
index 6b31bc7fe..ac7eb2c86 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest.py
@@ -15,6 +15,7 @@
"""REST interface module for Hitachi HBSD Driver."""
from collections import defaultdict
+import re
from oslo_config import cfg
from oslo_config import types
@@ -34,6 +35,9 @@ from cinder.volume.drivers.hitachi import hbsd_utils as utils
from cinder.volume.drivers.san import san
from cinder.volume import volume_utils
+_GROUP_NAME_PROHIBITED_CHAR_PATTERN = re.compile(
+ '[^' + common.GROUP_NAME_ALLOWED_CHARS + ']')
+
_LU_PATH_DEFINED = ('B958', '015A')
NORMAL_STS = 'NML'
_LUN_TIMEOUT = 50
@@ -87,6 +91,28 @@ _MAX_COPY_GROUP_NAME = 29
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT = ('2E10', '2302')
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT = ('2E13', '9900')
+_PAIR_TARGET_NAME_BODY_DEFAULT = 'pair00'
+
+_DR_VOL_PATTERN = {
+ 'disabled': ('REHYDRATING',),
+ 'compression_deduplication': ('ENABLED',),
+ None: ('DELETING',),
+}
+_DISABLE_ABLE_DR_STATUS = {
+ 'disabled': ('DISABLED', 'ENABLING', 'REHYDRATING'),
+ 'compression_deduplication': ('ENABLED', 'ENABLING'),
+}
+_DEDUPCOMP_ABLE_DR_STATUS = {
+ 'disabled': ('DISABLED', 'ENABLING'),
+ 'compression_deduplication': ('ENABLED', 'ENABLING'),
+}
+_CAPACITY_SAVING_DR_MODE = {
+ 'disable': 'disabled',
+ 'deduplication_compression': 'compression_deduplication',
+ '': 'disabled',
+ None: 'disabled',
+}
+
REST_VOLUME_OPTS = [
cfg.BoolOpt(
'hitachi_rest_disable_io_wait',
@@ -186,6 +212,13 @@ REST_VOLUME_OPTS = [
help='Host mode option for host group or iSCSI target.'),
]
+REST_PAIR_OPTS = [
+ cfg.ListOpt(
+ 'hitachi_rest_pair_target_ports',
+ default=[],
+ help='Target port names for pair of the host group or iSCSI target'),
+]
+
_REQUIRED_REST_OPTS = [
'san_login',
'san_password',
@@ -194,21 +227,26 @@ _REQUIRED_REST_OPTS = [
CONF = cfg.CONF
CONF.register_opts(REST_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
+CONF.register_opts(REST_PAIR_OPTS, group=configuration.SHARED_CONF_GROUP)
LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
-def _is_valid_target(self, target, target_name, target_ports):
+def _is_valid_target(self, target, target_name, target_ports, is_pair):
"""Check if the specified target is valid."""
+ if is_pair:
+ return (target[:utils.PORT_ID_LENGTH] in target_ports and
+ target_name == self._PAIR_TARGET_NAME)
return (target[:utils.PORT_ID_LENGTH] in target_ports and
- target_name.startswith(self.driver_info['target_prefix']))
+ target_name.startswith(self.driver_info['target_prefix']) and
+ target_name != self._PAIR_TARGET_NAME)
def _check_ldev_manageability(self, ldev_info, ldev, existing_ref):
"""Check if the LDEV meets the criteria for being managed."""
if ldev_info['status'] != NORMAL_STS:
- msg = utils.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
+ msg = self.output_log(MSG.INVALID_LDEV_FOR_MANAGE)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
attributes = set(ldev_info['attributes'])
@@ -217,20 +255,20 @@ def _check_ldev_manageability(self, ldev_info, ldev, existing_ref):
not attributes.issubset(
set(['CVS', self.driver_info['hdp_vol_attr'],
self.driver_info['hdt_vol_attr']]))):
- msg = utils.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev,
- ldevtype=self.driver_info['nvol_ldev_type'])
+ msg = self.output_log(MSG.INVALID_LDEV_ATTR_FOR_MANAGE, ldev=ldev,
+ ldevtype=self.driver_info['nvol_ldev_type'])
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
if ldev_info['numOfPorts']:
- msg = utils.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev)
+ msg = self.output_log(MSG.INVALID_LDEV_PORT_FOR_MANAGE, ldev=ldev)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
-def _check_ldev_size(ldev_info, ldev, existing_ref):
+def _check_ldev_size(self, ldev_info, ldev, existing_ref):
"""Hitachi storage calculates volume sizes in a block unit, 512 bytes."""
if ldev_info['blockCapacity'] % utils.GIGABYTE_PER_BLOCK_SIZE:
- msg = utils.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev)
+ msg = self.output_log(MSG.INVALID_LDEV_SIZE_FOR_MANAGE, ldev=ldev)
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
@@ -242,10 +280,24 @@ class HBSDREST(common.HBSDCommon):
"""Initialize instance variables."""
super(HBSDREST, self).__init__(conf, storage_protocol, db)
self.conf.append_config_values(REST_VOLUME_OPTS)
+ self.conf.append_config_values(REST_PAIR_OPTS)
self.conf.append_config_values(san.san_opts)
self.client = None
+ def do_setup(self, context):
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_pair_target_number'):
+ self._PAIR_TARGET_NAME_BODY = 'pair%02d' % (
+ self.conf.safe_get(self.driver_info['param_prefix'] +
+ '_pair_target_number'))
+ else:
+ self._PAIR_TARGET_NAME_BODY = _PAIR_TARGET_NAME_BODY_DEFAULT
+ self._PAIR_TARGET_NAME = (self.driver_info['target_prefix'] +
+ self._PAIR_TARGET_NAME_BODY)
+ super(HBSDREST, self).do_setup(context)
+
def setup_client(self):
"""Initialize RestApiClient."""
verify = self.conf.driver_ssl_cert_verify
@@ -254,6 +306,9 @@ class HBSDREST(common.HBSDCommon):
if verify_path:
verify = verify_path
self.verify = verify
+ is_rep = False
+ if self.storage_id is not None:
+ is_rep = True
self.client = rest_api.RestApiClient(
self.conf,
self.conf.san_ip,
@@ -263,7 +318,8 @@ class HBSDREST(common.HBSDCommon):
self.conf.san_password,
self.driver_info['driver_prefix'],
tcp_keepalive=self.conf.hitachi_rest_tcp_keepalive,
- verify=verify)
+ verify=verify,
+ is_rep=is_rep)
self.client.login()
def need_client_setup(self):
@@ -275,22 +331,39 @@ class HBSDREST(common.HBSDCommon):
if self.client is not None:
self.client.enter_keep_session()
- def _create_ldev_on_storage(self, size, pool_id, ldev_range):
+ def _set_dr_mode(self, body, capacity_saving):
+ dr_mode = _CAPACITY_SAVING_DR_MODE.get(capacity_saving)
+ if not dr_mode:
+ msg = self.output_log(
+ MSG.INVALID_EXTRA_SPEC_KEY,
+ key=self.driver_info['driver_dir_name'] + ':capacity_saving',
+ value=capacity_saving)
+ self.raise_error(msg)
+ body['dataReductionMode'] = dr_mode
+
+ def _create_ldev_on_storage(self, size, extra_specs, pool_id, ldev_range):
"""Create an LDEV on the storage system."""
body = {
'byteFormatCapacity': '%sG' % size,
'poolId': pool_id,
'isParallelExecutionEnabled': True,
}
+ capacity_saving = None
+ if self.driver_info.get('driver_dir_name'):
+ capacity_saving = extra_specs.get(
+ self.driver_info['driver_dir_name'] + ':capacity_saving')
+ if capacity_saving:
+ self._set_dr_mode(body, capacity_saving)
if self.storage_info['ldev_range']:
min_ldev, max_ldev = self.storage_info['ldev_range'][:2]
body['startLdevId'] = min_ldev
body['endLdevId'] = max_ldev
return self.client.add_ldev(body, no_log=True)
- def create_ldev(self, size, pool_id, ldev_range):
+ def create_ldev(self, size, extra_specs, pool_id, ldev_range):
"""Create an LDEV of the specified size and the specified type."""
- ldev = self._create_ldev_on_storage(size, pool_id, ldev_range)
+ ldev = self._create_ldev_on_storage(
+ size, extra_specs, pool_id, ldev_range)
LOG.debug('Created logical device. (LDEV: %s)', ldev)
return ldev
@@ -301,12 +374,23 @@ class HBSDREST(common.HBSDCommon):
def delete_ldev_from_storage(self, ldev):
"""Delete the specified LDEV from the storage."""
- result = self.client.get_ldev(ldev)
+ result = self.get_ldev_info(['emulationType',
+ 'dataReductionMode',
+ 'dataReductionStatus'], ldev)
+ if result['dataReductionStatus'] == 'FAILED':
+ msg = self.output_log(
+ MSG.CONSISTENCY_NOT_GUARANTEE, ldev=ldev)
+ self.raise_error(msg)
+ if result['dataReductionStatus'] in _DR_VOL_PATTERN.get(
+ result['dataReductionMode'], ()):
+ body = {'isDataReductionDeleteForceExecute': True}
+ else:
+ body = None
if result['emulationType'] == 'NOT DEFINED':
- utils.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev)
+ self.output_log(MSG.LDEV_NOT_EXIST, ldev=ldev)
return
self.client.delete_ldev(
- ldev,
+ ldev, body,
timeout_message=(MSG.LDEV_DELETION_WAIT_TIMEOUT, {'ldev': ldev}))
def _get_snap_pool_id(self, pvol):
@@ -348,7 +432,7 @@ class HBSDREST(common.HBSDCommon):
_wait_for_copy_pair_status, timeutils.utcnow(),
ldev, status, timeout)
if not loop.start(interval=interval).wait():
- msg = utils.output_log(
+ msg = self.output_log(
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
self.raise_error(msg)
@@ -371,7 +455,7 @@ class HBSDREST(common.HBSDCommon):
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
rest_api.INVALID_SNAPSHOT_POOL and
not self.conf.hitachi_snap_pool):
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_PARAMETER,
param=self.driver_info['param_prefix'] + '_snap_pool')
self.raise_error(msg)
@@ -384,7 +468,7 @@ class HBSDREST(common.HBSDCommon):
try:
self._delete_pair_from_storage(pvol, svol)
except exception.VolumeDriverException:
- utils.output_log(
+ self.output_log(
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
def _create_clone_pair(self, pvol, svol, snap_pool_id):
@@ -413,7 +497,7 @@ class HBSDREST(common.HBSDCommon):
if (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
rest_api.INVALID_SNAPSHOT_POOL and
not self.conf.hitachi_snap_pool):
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_PARAMETER,
param=self.driver_info['param_prefix'] + '_snap_pool')
self.raise_error(msg)
@@ -426,7 +510,7 @@ class HBSDREST(common.HBSDCommon):
try:
self._delete_pair_from_storage(pvol, svol)
except exception.VolumeDriverException:
- utils.output_log(
+ self.output_log(
MSG.DELETE_PAIR_FAILED, pvol=pvol, svol=svol)
def create_pair_on_storage(
@@ -464,7 +548,7 @@ class HBSDREST(common.HBSDCommon):
loop = loopingcall.FixedIntervalLoopingCall(
_wait_for_copy_pair_smpl, timeutils.utcnow(), ldev)
if not loop.start(interval=interval).wait():
- msg = utils.output_log(
+ msg = self.output_log(
MSG.PAIR_STATUS_WAIT_TIMEOUT, svol=ldev)
self.raise_error(msg)
@@ -485,27 +569,65 @@ class HBSDREST(common.HBSDCommon):
pvol, mun, ignore_return_code=ignore_return_code)
self._wait_copy_pair_deleting(svol)
+ def _get_pair_ports(self):
+ return (self.storage_info['pair_ports'] or
+ self.storage_info['controller_ports'])
+
+ def terminate_pair_connection(self, ldev):
+ targets = {
+ 'list': [],
+ }
+ ldev_info = self.get_ldev_info(['status', 'attributes'], ldev)
+ if (ldev_info['status'] == NORMAL_STS and
+ self.driver_info['mirror_attr'] in ldev_info['attributes']):
+ LOG.debug(
+ 'The specified LDEV has replication pair. '
+ 'Therefore, unmapping operation was skipped. '
+ '(LDEV: %(ldev)s, vol_attr: %(info)s)',
+ {'ldev': ldev, 'info': ldev_info['attributes']})
+ return
+ self._find_mapped_targets_from_storage(
+ targets, ldev, self._get_pair_ports(), is_pair=True)
+ self.unmap_ldev(targets, ldev)
+
def delete_pair_based_on_svol(self, pvol, svol_info):
"""Disconnect all volume pairs to which the specified S-VOL belongs."""
# If the pair status does not satisfy the execution condition,
if not (svol_info['is_psus'] or
_STATUS_TABLE.get(svol_info['status']) == SMPP):
- utils.output_log(
+ self.output_log(
MSG.UNABLE_TO_DELETE_PAIR, pvol=pvol, svol=svol_info['ldev'])
self.raise_busy()
self._delete_pair_from_storage(pvol, svol_info['ldev'])
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
+ self.terminate_pair_connection(svol_info['ldev'])
+ self.terminate_pair_connection(pvol)
def check_param(self):
"""Check parameter values and consistency among them."""
super(HBSDREST, self).check_param()
self.check_opts(self.conf, REST_VOLUME_OPTS)
self.check_opts(self.conf, san.san_opts)
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
+ self.check_opts(self.conf, REST_PAIR_OPTS)
+ if (not self.conf.hitachi_target_ports and
+ not self.conf.hitachi_rest_pair_target_ports):
+ msg = self.output_log(
+ MSG.INVALID_PARAMETER,
+ param=self.driver_info['param_prefix'] +
+ '_target_ports or ' + self.driver_info['param_prefix'] +
+ '_rest_pair_target_ports')
+ self.raise_error(msg)
LOG.debug(
'Setting ldev_range: %s', self.storage_info['ldev_range'])
for opt in _REQUIRED_REST_OPTS:
if not self.conf.safe_get(opt):
- msg = utils.output_log(MSG.INVALID_PARAMETER, param=opt)
+ msg = self.output_log(MSG.INVALID_PARAMETER, param=opt)
self.raise_error(msg)
if not self.conf.safe_get('san_api_port'):
self.conf.san_api_port = _REST_DEFAULT_PORT
@@ -540,8 +662,8 @@ class HBSDREST(common.HBSDCommon):
else:
lun = assigned_lun
elif err_code == rest_api.ANOTHER_LDEV_MAPPED:
- utils.output_log(MSG.MAP_LDEV_FAILED,
- ldev=ldev, port=port, id=gid, lun=lun)
+ self.output_log(MSG.MAP_LDEV_FAILED,
+ ldev=ldev, port=port, id=gid, lun=lun)
return None
LOG.debug(
'Created logical unit path to the specified logical device. '
@@ -550,12 +672,18 @@ class HBSDREST(common.HBSDCommon):
{'ldev': ldev, 'port': port, 'gid': gid, 'lun': lun})
return lun
- def map_ldev(self, targets, ldev):
+ def map_ldev(self, targets, ldev, lun=None):
"""Create the path between the server and the LDEV and return LUN."""
- port, gid = targets['list'][0]
- lun = self._run_add_lun(ldev, port, gid)
- targets['lun'][port] = True
- for port, gid in targets['list'][1:]:
+ raise_err = False
+ if lun is not None:
+ head = 0
+ raise_err = True
+ else:
+ head = 1
+ port, gid = targets['list'][0]
+ lun = self._run_add_lun(ldev, port, gid)
+ targets['lun'][port] = True
+ for port, gid in targets['list'][head:]:
# When multipath is configured, Nova compute expects that
# target_lun define the same value in all storage target.
# Therefore, it should use same value of lun in other target.
@@ -563,12 +691,19 @@ class HBSDREST(common.HBSDCommon):
lun2 = self._run_add_lun(ldev, port, gid, lun=lun)
if lun2 is not None:
targets['lun'][port] = True
+ raise_err = False
except exception.VolumeDriverException:
- utils.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev,
- port=port, id=gid, lun=lun)
+ self.output_log(MSG.MAP_LDEV_FAILED, ldev=ldev,
+ port=port, id=gid, lun=lun)
+ if raise_err:
+ msg = self.output_log(
+ MSG.CONNECT_VOLUME_FAILED,
+ ldev=ldev, reason='Failed to attach in all ports.')
+ self.raise_error(msg)
return lun
- def attach_ldev(self, volume, ldev, connector, is_snapshot, targets):
+ def attach_ldev(
+ self, volume, ldev, connector, is_snapshot, targets, lun=None):
"""Initialize connection between the server and the volume."""
target_ports = self.get_target_ports(connector)
target_ports = self.filter_target_ports(target_ports, volume,
@@ -583,9 +718,10 @@ class HBSDREST(common.HBSDCommon):
targets['list'].sort()
for port in target_ports:
targets['lun'][port] = False
- return int(self.map_ldev(targets, ldev))
+ return int(self.map_ldev(targets, ldev, lun))
- def _find_mapped_targets_from_storage(self, targets, ldev, target_ports):
+ def _find_mapped_targets_from_storage(
+ self, targets, ldev, target_ports, is_pair=False):
"""Update port-gid list for the specified LDEV."""
ldev_info = self.get_ldev_info(['ports'], ldev)
if not ldev_info['ports']:
@@ -593,7 +729,7 @@ class HBSDREST(common.HBSDCommon):
for port_info in ldev_info['ports']:
if _is_valid_target(self, port_info['portId'],
port_info['hostGroupName'],
- target_ports):
+ target_ports, is_pair):
targets['list'].append(port_info)
def _get_unmap_targets_list(self, target_list, mapped_list):
@@ -645,7 +781,7 @@ class HBSDREST(common.HBSDCommon):
self.client.delete_host_grp(port, gid)
result = 0
except exception.VolumeDriverException:
- utils.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid)
+ self.output_log(MSG.DELETE_TARGET_FAILED, port=port, id=gid)
else:
LOG.debug(
'Deleted target. (port: %(port)s, gid: %(gid)s)',
@@ -713,7 +849,7 @@ class HBSDREST(common.HBSDCommon):
rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST])
if 'errorSource' in result:
- msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
+ msg = self.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
self.raise_error(msg)
tp_cap = result['totalPoolCapacity'] // units.Ki
@@ -727,7 +863,7 @@ class HBSDREST(common.HBSDCommon):
try:
result = self.client.get_pools()
except exception.VolumeDriverException:
- utils.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool='all')
+ self.output_log(MSG.POOL_INFO_RETRIEVAL_FAILED, pool='all')
pool_infos = []
for pool_id in pool_ids:
for pool_data in result:
@@ -735,7 +871,7 @@ class HBSDREST(common.HBSDCommon):
cap_data = self.get_pool_info(pool_id, pool_data)
break
else:
- utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
+ self.output_log(MSG.POOL_NOT_FOUND, pool=pool_id)
cap_data = None
pool_infos.append(cap_data)
return pool_infos
@@ -743,11 +879,11 @@ class HBSDREST(common.HBSDCommon):
def discard_zero_page(self, volume):
"""Return the volume's no-data pages to the storage pool."""
if self.conf.hitachi_discard_zero_page:
- ldev = utils.get_ldev(volume)
+ ldev = self.get_ldev(volume)
try:
self.client.discard_zero_page(ldev)
except exception.VolumeDriverException:
- utils.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev)
+ self.output_log(MSG.DISCARD_ZERO_PAGE_FAILED, ldev=ldev)
def _get_copy_pair_info(self, ldev):
"""Return info of the copy pair."""
@@ -828,7 +964,7 @@ class HBSDREST(common.HBSDCommon):
"""Return the size[GB] of the specified LDEV."""
ldev_info = self.get_ldev_info(
_CHECK_LDEV_SIZE_KEYS, ldev)
- _check_ldev_size(ldev_info, ldev, existing_ref)
+ _check_ldev_size(self, ldev_info, ldev, existing_ref)
return ldev_info['blockCapacity'] / utils.GIGABYTE_PER_BLOCK_SIZE
def _get_pool_id(self, pool_list, pool_name_or_id):
@@ -840,15 +976,15 @@ class HBSDREST(common.HBSDCommon):
for pool_data in pool_list['pool_list']:
if pool_data['poolName'] == pool_name_or_id:
return pool_data['poolId']
- msg = utils.output_log(MSG.POOL_NOT_FOUND, pool=pool_name_or_id)
+ msg = self.output_log(MSG.POOL_NOT_FOUND, pool=pool_name_or_id)
self.raise_error(msg)
def check_pool_id(self):
- """Check the pool id of hitachi_pool and hitachi_snap_pool."""
+ """Check the pool id of hitachi_pools and hitachi_snap_pool."""
pool_id_list = []
pool_list = {'pool_list': None}
- for pool in self.conf.hitachi_pool:
+ for pool in self.conf.hitachi_pools:
pool_id_list.append(self._get_pool_id(pool_list, pool))
snap_pool = self.conf.hitachi_snap_pool
@@ -938,11 +1074,11 @@ class HBSDREST(common.HBSDCommon):
obj_update['status'] = 'available' if isinstance(
exc, (exception.VolumeIsBusy,
exception.SnapshotIsBusy)) else 'error'
- utils.output_log(
+ self.output_log(
MSG.GROUP_OBJECT_DELETE_FAILED,
obj='snapshot' if is_snapshot else 'volume',
group='group snapshot' if is_snapshot else 'group',
- group_id=group.id, obj_id=obj.id, ldev=utils.get_ldev(obj),
+ group_id=group.id, obj_id=obj.id, ldev=self.get_ldev(obj),
reason=exc.msg)
raise loopingcall.LoopingCallDone(obj_update)
@@ -973,9 +1109,9 @@ class HBSDREST(common.HBSDCommon):
def _create_group_volume_from_src(context, volume, src, from_snapshot):
volume_model_update = {'id': volume.id}
try:
- ldev = utils.get_ldev(src)
+ ldev = self.get_ldev(src)
if ldev is None:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='snapshot' if from_snapshot else 'volume',
id=src.id)
@@ -1005,7 +1141,7 @@ class HBSDREST(common.HBSDCommon):
msg = volume_model_update['msg']
else:
volumes_model_update.append(volume_model_update)
- ldev = utils.get_ldev(volume_model_update)
+ ldev = self.get_ldev(volume_model_update)
if ldev is not None:
new_ldevs.append(ldev)
if not is_success:
@@ -1016,18 +1152,18 @@ class HBSDREST(common.HBSDCommon):
try:
self.delete_ldev(new_ldev)
except exception.VolumeDriverException:
- utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=new_ldev)
+ self.output_log(MSG.DELETE_LDEV_FAILED, ldev=new_ldev)
return None, volumes_model_update
def update_group(self, group, add_volumes=None):
if add_volumes and volume_utils.is_group_a_cg_snapshot_type(group):
for volume in add_volumes:
- ldev = utils.get_ldev(volume)
+ ldev = self.get_ldev(volume)
if ldev is None:
- msg = utils.output_log(MSG.LDEV_NOT_EXIST_FOR_ADD_GROUP,
- volume_id=volume.id,
- group='consistency group',
- group_id=group.id)
+ msg = self.output_log(MSG.LDEV_NOT_EXIST_FOR_ADD_GROUP,
+ volume_id=volume.id,
+ group='consistency group',
+ group_id=group.id)
self.raise_error(msg)
return None, None, None
@@ -1044,7 +1180,7 @@ class HBSDREST(common.HBSDCommon):
fields.SnapshotStatus.AVAILABLE)
except Exception:
snapshot_model_update['status'] = fields.SnapshotStatus.ERROR
- utils.output_log(
+ self.output_log(
MSG.GROUP_SNAPSHOT_CREATE_FAILED,
group=group_snapshot.group_id,
group_snapshot=group_snapshot.id,
@@ -1080,8 +1216,8 @@ class HBSDREST(common.HBSDCommon):
try:
self._delete_pair_from_storage(pair['pvol'], pair['svol'])
except exception.VolumeDriverException:
- utils.output_log(MSG.DELETE_PAIR_FAILED, pvol=pair['pvol'],
- svol=pair['svol'])
+ self.output_log(MSG.DELETE_PAIR_FAILED, pvol=pair['pvol'],
+ svol=pair['svol'])
def _create_ctg_snap_pair(self, pairs):
snapshotgroup_name = self._create_ctg_snapshot_group_name(
@@ -1103,12 +1239,12 @@ class HBSDREST(common.HBSDCommon):
_MAX_CTG_COUNT_EXCEEDED_ADD_SNAPSHOT) or
(utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
_MAX_PAIR_COUNT_IN_CTG_EXCEEDED_ADD_SNAPSHOT)):
- msg = utils.output_log(MSG.FAILED_CREATE_CTG_SNAPSHOT)
+ msg = self.output_log(MSG.FAILED_CREATE_CTG_SNAPSHOT)
self.raise_error(msg)
elif (utils.safe_get_err_code(ex.kwargs.get('errobj')) ==
rest_api.INVALID_SNAPSHOT_POOL and
not self.conf.hitachi_snap_pool):
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_PARAMETER,
param=self.driver_info['param_prefix'] +
'_snap_pool')
@@ -1130,25 +1266,27 @@ class HBSDREST(common.HBSDCommon):
def _create_cgsnapshot_volume(snapshot):
pair = {'snapshot': snapshot}
try:
- pair['pvol'] = utils.get_ldev(snapshot.volume)
+ pair['pvol'] = self.get_ldev(snapshot.volume)
if pair['pvol'] is None:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY,
type='volume', id=snapshot.volume_id)
self.raise_error(msg)
size = snapshot.volume_size
pool_id = self.get_pool_id_of_volume(snapshot.volume)
ldev_range = self.storage_info['ldev_range']
- pair['svol'] = self.create_ldev(size, pool_id, ldev_range)
+ extra_specs = self.get_volume_extra_specs(snapshot.volume)
+ pair['svol'] = self.create_ldev(size, extra_specs,
+ pool_id, ldev_range)
except Exception as exc:
pair['msg'] = utils.get_exception_msg(exc)
raise loopingcall.LoopingCallDone(pair)
try:
for snapshot in snapshots:
- ldev = utils.get_ldev(snapshot.volume)
+ ldev = self.get_ldev(snapshot.volume)
if ldev is None:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
id=snapshot.volume_id)
self.raise_error(msg)
@@ -1173,7 +1311,7 @@ class HBSDREST(common.HBSDCommon):
try:
self.delete_ldev(pair['svol'])
except exception.VolumeDriverException:
- utils.output_log(
+ self.output_log(
MSG.DELETE_LDEV_FAILED, ldev=pair['svol'])
model_update = {'status': fields.GroupSnapshotStatus.ERROR}
for snapshot in snapshots:
@@ -1195,15 +1333,87 @@ class HBSDREST(common.HBSDCommon):
else:
return self._create_non_cgsnapshot(group_snapshot, snapshots)
+ def _init_pair_targets(self, targets_info):
+ self._pair_targets = []
+ for port in targets_info.keys():
+ if not targets_info[port]:
+ continue
+ params = {'portId': port}
+ host_grp_list = self.client.get_host_grps(params)
+ gid = None
+ for host_grp_data in host_grp_list:
+ if host_grp_data['hostGroupName'] == self._PAIR_TARGET_NAME:
+ gid = host_grp_data['hostGroupNumber']
+ break
+ if not gid:
+ try:
+ connector = {
+ 'ip': self._PAIR_TARGET_NAME_BODY,
+ 'wwpns': [self._PAIR_TARGET_NAME_BODY],
+ }
+ target_name, gid = self.create_target_to_storage(
+ port, connector, None)
+ LOG.debug(
+ 'Created host group for pair operation. '
+ '(port: %(port)s, gid: %(gid)s)',
+ {'port': port, 'gid': gid})
+ except exception.VolumeDriverException:
+ self.output_log(MSG.CREATE_HOST_GROUP_FAILED, port=port)
+ continue
+ self._pair_targets.append((port, gid))
+
+ if not self._pair_targets:
+ msg = self.output_log(MSG.PAIR_TARGET_FAILED)
+ self.raise_error(msg)
+ self._pair_targets.sort(reverse=True)
+ LOG.debug('Setting pair_targets: %s', self._pair_targets)
+
+ def init_cinder_hosts(self, **kwargs):
+ targets = {
+ 'info': {},
+ 'list': [],
+ 'iqns': {},
+ 'target_map': {},
+ }
+ super(HBSDREST, self).init_cinder_hosts(targets=targets)
+ if self.storage_info['pair_ports']:
+ targets['info'] = {}
+ ports = self._get_pair_ports()
+ for port in ports:
+ targets['info'][port] = True
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
+ self._init_pair_targets(targets['info'])
+
+ def initialize_pair_connection(self, ldev):
+ port, gid = None, None
+
+ for port, gid in self._pair_targets:
+ try:
+ targets = {
+ 'info': {},
+ 'list': [(port, gid)],
+ 'lun': {},
+ }
+ return self.map_ldev(targets, ldev)
+ except exception.VolumeDriverException:
+ self.output_log(
+ MSG.MAP_LDEV_FAILED, ldev=ldev, port=port, id=gid,
+ lun=None)
+
+ msg = self.output_log(MSG.MAP_PAIR_TARGET_FAILED, ldev=ldev)
+ self.raise_error(msg)
+
def migrate_volume(self, volume, host, new_type=None):
"""Migrate the specified volume."""
attachments = volume.volume_attachment
if attachments:
return False, None
- pvol = utils.get_ldev(volume)
+ pvol = self.get_ldev(volume)
if pvol is None:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume', id=volume.id)
self.raise_error(msg)
@@ -1222,7 +1432,7 @@ class HBSDREST(common.HBSDCommon):
(pvol, svol, copy_method, status)
for svol, copy_method, status in
zip(svols, copy_methods, svol_statuses)]
- msg = utils.output_log(
+ msg = self.output_log(
MSG.MIGRATE_VOLUME_FAILED,
volume=volume.id, ldev=pvol,
pair_info=', '.join(pair_info))
@@ -1235,7 +1445,7 @@ class HBSDREST(common.HBSDCommon):
pair_info = '(%s, %s, %s, %s)' % (
pair_info['pvol'], svol_info['ldev'],
utils.THIN, svol_info['status'])
- msg = utils.output_log(
+ msg = self.output_log(
MSG.MIGRATE_VOLUME_FAILED,
volume=volume.id, ldev=svol_info['ldev'],
pair_info=pair_info)
@@ -1255,20 +1465,21 @@ class HBSDREST(common.HBSDCommon):
if (new_type or old_pool_id != new_pool_id or
(ldev_range and
(pvol < ldev_range[0] or ldev_range[1] < pvol))):
-
+ extra_specs = self.get_volume_extra_specs(volume)
snap_pool_id = host['capabilities']['location_info'].get(
'snap_pool_id')
ldev_range = host['capabilities']['location_info'].get(
'ldev_range')
svol = self.copy_on_storage(
- pvol, volume.size, new_pool_id, snap_pool_id, ldev_range,
+ pvol, volume.size, extra_specs, new_pool_id,
+ snap_pool_id, ldev_range,
is_snapshot=False, sync=True)
self.modify_ldev_name(svol, volume['id'].replace("-", ""))
try:
self.delete_ldev(pvol)
except exception.VolumeDriverException:
- utils.output_log(MSG.DELETE_LDEV_FAILED, ldev=pvol)
+ self.output_log(MSG.DELETE_LDEV_FAILED, ldev=pvol)
return True, {
'provider_location': str(svol),
@@ -1276,32 +1487,90 @@ class HBSDREST(common.HBSDCommon):
return True, None
+ def _is_modifiable_dr_value(self, dr_mode, dr_status, new_dr_mode, volume):
+ if (dr_status == 'REHYDRATING' and
+ new_dr_mode == 'compression_deduplication'):
+ self.output_log(MSG.VOLUME_IS_BEING_REHYDRATED,
+ volume_id=volume['id'],
+ volume_type=volume['volume_type']['name'])
+ return False
+ elif dr_status == 'FAILED':
+ self.output_log(MSG.INCONSISTENCY_DEDUPLICATION_SYSTEM_VOLUME,
+ volume_id=volume['id'],
+ volume_type=volume['volume_type']['name'])
+ return False
+ elif new_dr_mode == 'disabled':
+ return dr_status in _DISABLE_ABLE_DR_STATUS.get(dr_mode, ())
+ elif new_dr_mode == 'compression_deduplication':
+ return dr_status in _DEDUPCOMP_ABLE_DR_STATUS.get(dr_mode, ())
+ return False
+
+ def _modify_capacity_saving(self, ldev, capacity_saving):
+ body = {'dataReductionMode': capacity_saving}
+ self.client.modify_ldev(
+ ldev, body,
+ timeout_message=(
+ MSG.NOT_COMPLETED_CHANGE_VOLUME_TYPE, {'ldev': ldev}))
+
def retype(self, ctxt, volume, new_type, diff, host):
"""Retype the specified volume."""
+ diff_items = []
- def _check_specs_diff(diff):
+ def _check_specs_diff(diff, allowed_extra_specs):
for specs_key, specs_val in diff.items():
for diff_key, diff_val in specs_val.items():
+ if (specs_key == 'extra_specs' and
+ diff_key in allowed_extra_specs):
+ diff_items.append(diff_key)
+ continue
if diff_val[0] != diff_val[1]:
return False
return True
- ldev = utils.get_ldev(volume)
+ extra_specs_capacity_saving = None
+ new_capacity_saving = None
+ allowed_extra_specs = []
+ if self.driver_info.get('driver_dir_name'):
+ extra_specs_capacity_saving = (
+ self.driver_info['driver_dir_name'] + ':capacity_saving')
+ new_capacity_saving = (
+ new_type['extra_specs'].get(extra_specs_capacity_saving))
+ allowed_extra_specs.append(extra_specs_capacity_saving)
+ new_dr_mode = _CAPACITY_SAVING_DR_MODE.get(new_capacity_saving)
+ if not new_dr_mode:
+ msg = self.output_log(
+ MSG.FAILED_CHANGE_VOLUME_TYPE,
+ key=extra_specs_capacity_saving,
+ value=new_capacity_saving)
+ self.raise_error(msg)
+ ldev = self.get_ldev(volume)
if ldev is None:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.INVALID_LDEV_FOR_VOLUME_COPY, type='volume',
id=volume['id'])
self.raise_error(msg)
ldev_info = self.get_ldev_info(
- ['poolId'], ldev)
+ ['dataReductionMode', 'dataReductionStatus', 'poolId'], ldev)
old_pool_id = ldev_info['poolId']
new_pool_id = host['capabilities']['location_info'].get('pool_id')
- if not _check_specs_diff(diff) or new_pool_id != old_pool_id:
+ if (not _check_specs_diff(diff, allowed_extra_specs)
+ or new_pool_id != old_pool_id):
snaps = SnapshotList.get_all_for_volume(ctxt, volume.id)
if not snaps:
return self.migrate_volume(volume, host, new_type)
return False
+ if (extra_specs_capacity_saving
+ and extra_specs_capacity_saving in diff_items):
+ ldev_info = self.get_ldev_info(
+ ['dataReductionMode', 'dataReductionStatus'], ldev)
+ if not self._is_modifiable_dr_value(
+ ldev_info['dataReductionMode'],
+ ldev_info['dataReductionStatus'], new_dr_mode, volume):
+ return False
+
+ self._modify_capacity_saving(ldev, new_dr_mode)
+
return True
def wait_copy_completion(self, pvol, svol):
@@ -1309,6 +1578,26 @@ class HBSDREST(common.HBSDCommon):
self._wait_copy_pair_status(svol, set([SMPL, PSUE]))
status = self._get_copy_pair_status(svol)
if status == PSUE:
- msg = utils.output_log(
- MSG.VOLUME_COPY_FAILED, pvol=pvol, svol=svol)
+ msg = self.output_log(MSG.VOLUME_COPY_FAILED, pvol=pvol, svol=svol)
self.raise_error(msg)
+
+ def create_target_name(self, connector):
+ if ('ip' in connector and connector['ip']
+ == self._PAIR_TARGET_NAME_BODY):
+ return self._PAIR_TARGET_NAME
+ wwn = (min(self.get_hba_ids_from_connector(connector)) if
+ self.format_info['group_name_var_cnt'][
+ common.GROUP_NAME_VAR_WWN] else '')
+ ip = (connector['ip'] if self.format_info[
+ 'group_name_var_cnt'][common.GROUP_NAME_VAR_IP] else '')
+ if not self.format_info['group_name_var_cnt'][
+ common.GROUP_NAME_VAR_HOST]:
+ return self.format_info['group_name_format'].format(wwn=wwn, ip=ip)
+ host = connector['host'] if 'host' in connector else ''
+ max_host_len = (self.group_name_format['group_name_max_len'] -
+ self.format_info['group_name_format_without_var_len'] -
+ len(wwn) - len(ip))
+ host = _GROUP_NAME_PROHIBITED_CHAR_PATTERN.sub(
+ '_', host[:max_host_len])
+ return self.format_info['group_name_format'].format(
+ host=host, wwn=wwn, ip=ip)
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest_api.py b/cinder/volume/drivers/hitachi/hbsd_rest_api.py
index 118b9db6d..93583396d 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest_api.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest_api.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2020, 2021, Hitachi, Ltd.
+# Copyright (C) 2020, 2022, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -26,8 +26,6 @@ from oslo_service import loopingcall
from oslo_utils import timeutils
import requests
from requests.adapters import HTTPAdapter
-from requests.packages.urllib3.connection import HTTPConnection
-from requests.packages.urllib3.poolmanager import PoolManager
from cinder import exception
from cinder.i18n import _
@@ -46,13 +44,18 @@ _REST_SERVER_RESTART_TIMEOUT = 10 * 60
_REST_SERVER_ERROR_TIMEOUT = 10 * 60
_KEEP_SESSION_LOOP_INTERVAL = 3 * 60
_ANOTHER_LDEV_MAPPED_RETRY_TIMEOUT = 10 * 60
+_LOCK_RESOURCE_GROUP_TIMEOUT = 3 * 60
_TCP_KEEPIDLE = 60
_TCP_KEEPINTVL = 15
_TCP_KEEPCNT = 4
+_MIRROR_RESERVED_VIRTUAL_LDEV_ID = 65535
+
_HTTPS = 'https://'
+_NOT_SPECIFIED = 'NotSpecified'
+
_REST_LOCKED_ERRORS = [
('2E11', '2205'),
('2E11', '2207'),
@@ -90,6 +93,13 @@ LOG = logging.getLogger(__name__)
MSG = utils.HBSDMsg
+def _get_device_group_name(remote_client, copy_group_name, is_secondary,
+ is_remote=False):
+ if remote_client is None and is_remote:
+ return _NOT_SPECIFIED
+ return copy_group_name + ('S' if is_secondary ^ is_remote else 'P')
+
+
def _build_base_url(ip_addr, ip_port):
return '%(https)s%(ip)s:%(port)s/ConfigurationManager' % {
'https': _HTTPS,
@@ -101,7 +111,8 @@ def _build_base_url(ip_addr, ip_port):
class KeepAliveAdapter(HTTPAdapter):
def __init__(self, conf):
- self.options = HTTPConnection.default_socket_options + [
+ self.socket_options = [
+ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
conf.hitachi_rest_tcp_keepidle),
@@ -113,11 +124,9 @@ class KeepAliveAdapter(HTTPAdapter):
super(KeepAliveAdapter, self).__init__()
- def init_poolmanager(self, connections, maxsize, block=False):
- self.poolmanager = PoolManager(num_pools=connections,
- maxsize=maxsize,
- block=block,
- socket_options=self.options)
+ def init_poolmanager(self, *args, **kwargs):
+ kwargs['socket_options'] = self.socket_options
+ super(KeepAliveAdapter, self).init_poolmanager(*args, **kwargs)
class ResponseData(dict):
@@ -226,7 +235,7 @@ class RestApiClient():
def __init__(self, conf, ip_addr, ip_port, storage_device_id,
user_id, user_pass, driver_prefix, tcp_keepalive=False,
- verify=False):
+ verify=False, is_rep=False):
"""Initialize instance variables."""
self.conf = conf
self.ip_addr = ip_addr
@@ -238,9 +247,12 @@ class RestApiClient():
self.tcp_keepalive = tcp_keepalive
self.verify = verify
self.connect_timeout = self.conf.hitachi_rest_connect_timeout
+ self.is_rep = is_rep
self.login_lock = threading.Lock()
self.keep_session_loop = loopingcall.FixedIntervalLoopingCall(
self._keep_session)
+ self.nested_count = 0
+ self.resource_lock = threading.Lock()
self.base_url = _build_base_url(ip_addr, self.ip_port)
self.object_url = '%(base_url)s/v1/objects/storages/%(storage_id)s' % {
@@ -295,6 +307,10 @@ class RestApiClient():
else:
read_timeout = self.conf.hitachi_rest_get_api_response_timeout
+ remote_auth = kwargs.get('remote_auth')
+ if remote_auth:
+ headers["Remote-Authorization"] = 'Session ' + remote_auth.token
+
auth_data = kwargs.get('auth', self.get_my_session())
timeout = (self.connect_timeout, read_timeout)
@@ -320,7 +336,7 @@ class RestApiClient():
verify=self.verify)
except Exception as e:
- msg = utils.output_log(
+ msg = self.output_log(
MSG.REST_SERVER_CONNECT_FAILED,
exception=type(e), message=e,
method=method, url=url, params=params, body=body)
@@ -361,11 +377,11 @@ class RestApiClient():
if (kwargs['no_retry'] or
utils.timed_out(
start_time, self.conf.hitachi_lock_timeout)):
- msg = utils.output_log(MSG.REST_API_FAILED,
- no_log=kwargs['no_log'],
- method=method, url=url,
- params=params, body=body,
- **response.get_errobj())
+ msg = self.output_log(MSG.REST_API_FAILED,
+ no_log=kwargs['no_log'],
+ method=method, url=url,
+ params=params, body=body,
+ **response.get_errobj())
if kwargs['do_raise']:
message = _(
'%(prefix)s error occurred. %(msg)s' % {
@@ -409,27 +425,27 @@ class RestApiClient():
retry = False
elif retry and utils.timed_out(start_time, kwargs['timeout']):
if kwargs['timeout_message']:
- utils.output_log(kwargs['timeout_message'][0],
- **kwargs['timeout_message'][1])
+ self.output_log(kwargs['timeout_message'][0],
+ **kwargs['timeout_message'][1])
if response.is_json():
- msg = utils.output_log(MSG.REST_API_TIMEOUT,
- no_log=kwargs['no_log'],
- method=method, url=url,
- params=params, body=body,
- **response.get_job_result())
+ msg = self.output_log(MSG.REST_API_TIMEOUT,
+ no_log=kwargs['no_log'],
+ method=method, url=url,
+ params=params, body=body,
+ **response.get_job_result())
if errobj:
- msg = utils.output_log(MSG.REST_API_FAILED,
- no_log=kwargs['no_log'],
- method=method, url=url,
- params=params, body=body,
- **response.get_errobj())
+ msg = self.output_log(MSG.REST_API_FAILED,
+ no_log=kwargs['no_log'],
+ method=method, url=url,
+ params=params, body=body,
+ **response.get_errobj())
else:
- msg = utils.output_log(MSG.REST_API_HTTP_ERROR,
- no_log=kwargs['no_log'],
- status_code=response['status_code'],
- response_body=rsp_body,
- method=method, url=url,
- params=params, body=body)
+ msg = self.output_log(MSG.REST_API_HTTP_ERROR,
+ no_log=kwargs['no_log'],
+ status_code=response['status_code'],
+ response_body=rsp_body,
+ method=method, url=url,
+ params=params, body=body)
if kwargs['do_raise']:
message = _(
'%(prefix)s error occurred. %(msg)s' % {
@@ -448,18 +464,18 @@ class RestApiClient():
if not retry:
if response.is_json():
- msg = utils.output_log(MSG.REST_API_FAILED,
- no_log=kwargs['no_log'],
- method=method, url=url,
- params=params, body=body,
- **response.get_errobj())
+ msg = self.output_log(MSG.REST_API_FAILED,
+ no_log=kwargs['no_log'],
+ method=method, url=url,
+ params=params, body=body,
+ **response.get_errobj())
else:
- msg = utils.output_log(MSG.REST_API_HTTP_ERROR,
- no_log=kwargs['no_log'],
- status_code=response['status_code'],
- response_body=rsp_body,
- method=method, url=url,
- params=params, body=body)
+ msg = self.output_log(MSG.REST_API_HTTP_ERROR,
+ no_log=kwargs['no_log'],
+ status_code=response['status_code'],
+ response_body=rsp_body,
+ method=method, url=url,
+ params=params, body=body)
if kwargs['do_raise']:
message = _(
'%(prefix)s error occurred. %(msg)s' % {
@@ -471,6 +487,39 @@ class RestApiClient():
message, errobj=errobj)
return retry, rsp_body, errobj
+ def lock_resource_group(self, waittime=_LOCK_RESOURCE_GROUP_TIMEOUT):
+ """Lock resources.
+
+ Lock resources of a resource group allocated to the user who
+ executes API requests, preventing other users from performing
+ operations on the resources.
+ """
+ with self.resource_lock:
+ if self.nested_count <= 0:
+ url = '%(url)s/resource-group-service/actions/%(action)s' % {
+ 'url': self.service_url,
+ 'action': 'lock',
+ } + '/invoke'
+ if waittime:
+ body = {"parameters": {"waitTime": waittime}}
+ self._invoke(url, body=body, timeout=waittime)
+ else:
+ self._invoke(url)
+ self.nested_count += 1
+
+ def unlock_resource_group(self):
+ """If the lock is already released, there is no need to unlock."""
+ with self.resource_lock:
+ if self.nested_count == 0:
+ return
+ self.nested_count -= 1
+ if self.nested_count <= 0:
+ url = '%(url)s/resource-group-service/actions/%(action)s' % {
+ 'url': self.service_url,
+ 'action': 'unlock',
+ } + '/invoke'
+ self._invoke(url)
+
def set_my_session(self, session):
self.session = session
@@ -527,7 +576,7 @@ class RestApiClient():
LOG.debug("Trying to re-login.")
retry = self._login(do_raise=False)
if not retry:
- utils.output_log(
+ self.output_log(
MSG.REST_LOGIN_FAILED,
no_log=no_log, user=self.user_id)
return retry
@@ -623,13 +672,13 @@ class RestApiClient():
}
self._delete_object(url, body=body, **kwargs)
- def modify_ldev(self, ldev_id, body):
+ def modify_ldev(self, ldev_id, body, **kwargs):
"""Modify a ldev information."""
url = '%(url)s/ldevs/%(id)s' % {
'url': self.object_url,
'id': ldev_id,
}
- self._invoke(url, body=body)
+ self._invoke(url, body=body, **kwargs)
def extend_ldev(self, ldev_id, body):
"""Expand a ldev size."""
@@ -838,3 +887,171 @@ class RestApiClient():
'action': 'discard-zero-page',
}
self._invoke(url)
+
+ def get_remote_copy_grps(self, remote_client):
+ url = '%(url)s/remote-mirror-copygroups' % {
+ 'url': self.object_url,
+ }
+ params = {"remoteStorageDeviceId": remote_client.storage_id}
+ with RemoteSession(remote_client) as session:
+ return self._get_objects(url, params=params, remote_auth=session)
+
+ def get_remote_copy_grp(self, remote_client, copy_group_name, **kwargs):
+ url = '%(url)s/remote-mirror-copygroups/%(id)s' % {
+ 'url': self.object_url,
+ 'id': self._remote_copygroup_id(remote_client, copy_group_name),
+ }
+ with RemoteSession(remote_client) as session:
+ return self._get_object(url, remote_auth=session, **kwargs)
+
+ def get_remote_copypair(self, remote_client, copy_group_name,
+ pvol_ldev_id, svol_ldev_id, is_secondary=False,
+ **kwargs):
+ url = '%(url)s/remote-mirror-copypairs/%(id)s' % {
+ 'url': self.object_url,
+ 'id': self._remote_copypair_id(
+ remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id,
+ is_secondary),
+ }
+ if remote_client:
+ with RemoteSession(remote_client) as session:
+ return self._get_object(url, remote_auth=session, **kwargs)
+ return self._get_object(url, **kwargs)
+
+ def add_remote_copypair(self, remote_client, body):
+ url = '%(url)s/remote-mirror-copypairs' % {
+ 'url': self.object_url,
+ }
+ if self.storage_id > remote_client.storage_id:
+ client1, client2 = self, remote_client
+ else:
+ client1, client2 = remote_client, self
+ with ResourceGroupLock(client1):
+ with ResourceGroupLock(client2):
+ session = remote_client.get_my_session()
+ return self._add_object(url, body=body,
+ no_relogin=True,
+ remote_auth=session,
+ job_nowait=True)[0]
+
+ @utils.synchronized_on_copy_group()
+ def split_remote_copypair(self, remote_client, copy_group_name,
+ pvol_ldev_id, svol_ldev_id, rep_type):
+ body = {"parameters": {"replicationType": rep_type}}
+ url = '%(url)s/remote-mirror-copypairs/%(id)s/actions/%(action)s' % {
+ 'url': self.object_url,
+ 'id': self._remote_copypair_id(remote_client, copy_group_name,
+ pvol_ldev_id, svol_ldev_id),
+ 'action': 'split',
+ } + '/invoke'
+ with RemoteSession(remote_client) as session:
+ self._invoke(url, body=body, remote_auth=session, job_nowait=True)
+
+ @utils.synchronized_on_copy_group()
+ def resync_remote_copypair(
+ self, remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id,
+ rep_type, copy_speed=None):
+ body = {"parameters": {"replicationType": rep_type}}
+ if copy_speed:
+ body["parameters"]["copyPace"] = copy_speed
+ url = '%(url)s/remote-mirror-copypairs/%(id)s/actions/%(action)s' % {
+ 'url': self.object_url,
+ 'id': self._remote_copypair_id(remote_client, copy_group_name,
+ pvol_ldev_id, svol_ldev_id),
+ 'action': 'resync',
+ } + '/invoke'
+ with RemoteSession(remote_client) as session:
+ self._invoke(url, body=body, remote_auth=session, job_nowait=True)
+
+ @utils.synchronized_on_copy_group()
+ def delete_remote_copypair(self, remote_client, copy_group_name,
+ pvol_ldev_id, svol_ldev_id):
+ url = '%(url)s/remote-mirror-copypairs/%(id)s' % {
+ 'url': self.object_url,
+ 'id': self._remote_copypair_id(
+ remote_client, copy_group_name, pvol_ldev_id, svol_ldev_id),
+ }
+ if self.storage_id > remote_client.storage_id:
+ client1, client2 = self, remote_client
+ else:
+ client1, client2 = remote_client, self
+ with ResourceGroupLock(client1):
+ with ResourceGroupLock(client2):
+ session = remote_client.get_my_session()
+ self._delete_object(
+ url, no_relogin=True, remote_auth=session)
+
+ def _remote_copygroup_id(self, remote_client, copy_group_name,
+ is_secondary=False):
+ storage_id = (remote_client.storage_id if remote_client
+ else _NOT_SPECIFIED)
+ return "%s,%s,%s,%s" % (
+ storage_id,
+ copy_group_name,
+ _get_device_group_name(remote_client, copy_group_name,
+ is_secondary),
+ _get_device_group_name(remote_client, copy_group_name,
+ is_secondary, is_remote=True))
+
+ def _remote_copypair_id(self, remote_client, copy_group_name,
+ pvol_ldev_id, svol_ldev_id, is_secondary=False):
+ return "%s,HBSD-LDEV-%d-%d" % (
+ self._remote_copygroup_id(remote_client, copy_group_name,
+ is_secondary),
+ pvol_ldev_id,
+ svol_ldev_id)
+
+ def assign_virtual_ldevid(
+ self, ldev_id,
+ virtual_ldev_id=_MIRROR_RESERVED_VIRTUAL_LDEV_ID):
+ url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % {
+ 'url': self.object_url,
+ 'id': ldev_id,
+ 'action': 'assign-virtual-ldevid',
+ }
+ body = {"parameters": {"virtualLdevId": virtual_ldev_id}}
+ ignore_error = [('2E21', '9305'), ('2E30', '0088')]
+ self._invoke(url, body=body, ignore_error=ignore_error)
+
+ def unassign_virtual_ldevid(
+ self, ldev_id,
+ virtual_ldev_id=_MIRROR_RESERVED_VIRTUAL_LDEV_ID):
+ url = '%(url)s/ldevs/%(id)s/actions/%(action)s/invoke' % {
+ 'url': self.object_url,
+ 'id': ldev_id,
+ 'action': 'unassign-virtual-ldevid',
+ }
+ body = {"parameters": {"virtualLdevId": virtual_ldev_id}}
+ self._invoke(url, body=body)
+
+ def output_log(self, msg_enum, **kwargs):
+ if self.is_rep:
+ return utils.output_log(
+ msg_enum, storage_id=self.storage_id, **kwargs)
+ else:
+ return utils.output_log(msg_enum, **kwargs)
+
+
+class RemoteSession(object):
+
+ def __init__(self, remote_client):
+ self.remote_client = remote_client
+
+ def __enter__(self):
+ return self.remote_client.get_my_session()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+class ResourceGroupLock(object):
+
+ def __init__(self, client):
+ self.client = client
+
+ def __enter__(self):
+ self.client.lock_resource_group()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.client.unlock_resource_group()
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest_fc.py b/cinder/volume/drivers/hitachi/hbsd_rest_fc.py
index 6006afa02..c4df4dee3 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest_fc.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest_fc.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2020, 2021, Hitachi, Ltd.
+# Copyright (C) 2020, 2023, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -57,6 +57,12 @@ class HBSDRESTFC(rest.HBSDREST):
"""Prepare for using the storage."""
target_ports = self.conf.hitachi_target_ports
compute_target_ports = self.conf.hitachi_compute_target_ports
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
+ pair_target_ports = self.conf.hitachi_rest_pair_target_ports
+ else:
+ pair_target_ports = []
available_ports = []
available_compute_ports = []
@@ -64,13 +70,15 @@ class HBSDRESTFC(rest.HBSDREST):
# The port attributes must contain TAR.
params = {'portAttributes': 'TAR'}
port_list = self.client.get_ports(params=params)
- for port in set(target_ports + compute_target_ports):
+ for port in set(target_ports + compute_target_ports +
+ pair_target_ports):
if port not in [port_data['portId'] for port_data in port_list]:
- utils.output_log(MSG.INVALID_PORT, port=port,
- additional_info='portAttributes: not TAR')
+ self.output_log(MSG.INVALID_PORT, port=port,
+ additional_info='portAttributes: not TAR')
for port_data in port_list:
port = port_data['portId']
- if port not in set(target_ports + compute_target_ports):
+ if port not in set(target_ports + compute_target_ports +
+ pair_target_ports):
continue
secure_fc_port = True
can_port_schedule = True
@@ -89,7 +97,7 @@ class HBSDRESTFC(rest.HBSDREST):
port_data.get('portConnection') == 'PtoP')):
can_port_schedule = False
if not secure_fc_port or not can_port_schedule:
- utils.output_log(
+ self.output_log(
MSG.INVALID_PORT, port=port,
additional_info='portType: %s, lunSecuritySetting: %s, '
'fabricMode: %s, portConnection: %s' %
@@ -107,6 +115,8 @@ class HBSDRESTFC(rest.HBSDREST):
can_port_schedule):
available_compute_ports.append(port)
self.storage_info['wwns'][port] = wwn
+ if pair_target_ports and port in pair_target_ports:
+ self.storage_info['pair_ports'].append(port)
if target_ports:
for port in target_ports:
@@ -118,8 +128,14 @@ class HBSDRESTFC(rest.HBSDREST):
self.storage_info['compute_ports'].append(port)
self.check_ports_info()
- utils.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list',
- value=self.storage_info['wwns'])
+ if pair_target_ports and not self.storage_info['pair_ports']:
+ msg = self.output_log(
+ MSG.RESOURCE_NOT_FOUND, resource="Pair target ports")
+ self.raise_error(msg)
+ self.output_log(MSG.SET_CONFIG_VALUE, object='pair_target_ports',
+ value=self.storage_info['pair_ports'])
+ self.output_log(MSG.SET_CONFIG_VALUE, object='port-wwn list',
+ value=self.storage_info['wwns'])
def check_param(self):
"""Check parameter values and consistency among them."""
@@ -128,11 +144,7 @@ class HBSDRESTFC(rest.HBSDREST):
def create_target_to_storage(self, port, connector, hba_ids):
"""Create a host group on the specified port."""
- wwpns = self.get_hba_ids_from_connector(connector)
- target_name = '%(prefix)s-%(wwpns)s' % {
- 'prefix': self.driver_info['driver_prefix'],
- 'wwpns': min(wwpns),
- }
+ target_name = self.create_target_name(connector)
try:
body = {'portId': port,
'hostGroupName': target_name}
@@ -154,15 +166,15 @@ class HBSDRESTFC(rest.HBSDREST):
self.client.add_hba_wwn(port, gid, wwn, no_log=True)
registered_wwns.append(wwn)
except exception.VolumeDriverException as ex:
- utils.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
- wwn=wwn)
+ self.output_log(MSG.ADD_HBA_WWN_FAILED, port=port, gid=gid,
+ wwn=wwn)
if (self.get_port_scheduler_param() and
utils.safe_get_err_code(ex.kwargs.get('errobj'))
== rest_api.EXCEED_WWN_MAX):
raise ex
if not registered_wwns:
- msg = utils.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
- gid=gid)
+ msg = self.output_log(MSG.NO_HBA_WWN_ADDED_TO_HOST_GRP, port=port,
+ gid=gid)
self.raise_error(msg)
def set_target_mode(self, port, gid):
@@ -232,12 +244,7 @@ class HBSDRESTFC(rest.HBSDREST):
self, targets, connector, target_ports):
"""Find mapped ports, memorize them and return unmapped port count."""
wwpns = self.get_hba_ids_from_connector(connector)
- target_names = [
- '%(prefix)s-%(wwpns)s' % {
- 'prefix': self.driver_info['driver_prefix'],
- 'wwpns': min(wwpns),
- }
- ]
+ target_names = [self.create_target_name(connector)]
if 'ip' in connector:
target_names.append(
'%(prefix)s-%(ip)s' % {
@@ -274,10 +281,12 @@ class HBSDRESTFC(rest.HBSDREST):
return not_found_count
- def initialize_connection(self, volume, connector, is_snapshot=False):
+ def initialize_connection(
+ self, volume, connector, is_snapshot=False, lun=None,
+ is_mirror=False):
"""Initialize connection between the server and the volume."""
conn_info, map_info = super(HBSDRESTFC, self).initialize_connection(
- volume, connector, is_snapshot)
+ volume, connector, is_snapshot, lun)
if self.conf.hitachi_zoning_request:
if (self.get_port_scheduler_param() and
not self.is_controller(connector)):
@@ -288,10 +297,11 @@ class HBSDRESTFC(rest.HBSDREST):
self._lookup_service)
if init_targ_map:
conn_info['data']['initiator_target_map'] = init_targ_map
- fczm_utils.add_fc_zone(conn_info)
+ if not is_mirror:
+ fczm_utils.add_fc_zone(conn_info)
return conn_info
- def terminate_connection(self, volume, connector):
+ def terminate_connection(self, volume, connector, is_mirror=False):
"""Terminate connection between the server and the volume."""
conn_info = super(HBSDRESTFC, self).terminate_connection(
volume, connector)
@@ -302,7 +312,8 @@ class HBSDRESTFC(rest.HBSDREST):
self._lookup_service)
if init_targ_map:
conn_info['data']['initiator_target_map'] = init_targ_map
- fczm_utils.remove_fc_zone(conn_info)
+ if not is_mirror:
+ fczm_utils.remove_fc_zone(conn_info)
return conn_info
def _get_wwpns(self, port, hostgroup):
@@ -344,8 +355,8 @@ class HBSDRESTFC(rest.HBSDREST):
active_hba_ids = list(set(active_hba_ids))
if not active_hba_ids:
- msg = utils.output_log(MSG.NO_ACTIVE_WWN, wwn=', '.join(hba_ids),
- volume=vol_id)
+ msg = self.output_log(MSG.NO_ACTIVE_WWN, wwn=', '.join(hba_ids),
+ volume=vol_id)
self.raise_error(msg)
active_target_wwns = list(set(active_target_wwns))
@@ -356,7 +367,7 @@ class HBSDRESTFC(rest.HBSDREST):
port_wwns += ", "
port_wwns += ("port, WWN: " + port +
", " + self.storage_info['wwns'][port])
- msg = utils.output_log(
+ msg = self.output_log(
MSG.NO_PORT_WITH_ACTIVE_WWN, port_wwns=port_wwns,
volume=vol_id)
self.raise_error(msg)
@@ -380,17 +391,17 @@ class HBSDRESTFC(rest.HBSDREST):
== rest_api.MSGID_SPECIFIED_OBJECT_DOES_NOT_EXIST)
or (_MSG_EXCEED_HOST_GROUP_MAX
in utils.safe_get_message(ex.kwargs.get('errobj')))):
- utils.output_log(
+ self.output_log(
MSG.HOST_GROUP_NUMBER_IS_MAXIMUM, port=ports[index])
elif (utils.safe_get_err_code(ex.kwargs.get('errobj'))
== rest_api.EXCEED_WWN_MAX):
- utils.output_log(
+ self.output_log(
MSG.WWN_NUMBER_IS_MAXIMUM, port=ports[index],
wwn=", ". join(hba_ids))
else:
raise ex
- msg = utils.output_log(
+ msg = self.output_log(
MSG.HOST_GROUP_OR_WWN_IS_NOT_AVAILABLE, ports=', '.join(ports))
self.raise_error(msg)
@@ -400,7 +411,7 @@ class HBSDRESTFC(rest.HBSDREST):
active_ports = []
if not devmap:
- msg = utils.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
+ msg = self.output_log(MSG.ZONE_MANAGER_IS_NOT_AVAILABLE)
self.raise_error(msg)
for fabric_name in devmap.keys():
available_ports = []
@@ -418,7 +429,7 @@ class HBSDRESTFC(rest.HBSDREST):
if port in available_ports and port in filter_ports:
active_ports.append(port)
elif port not in available_ports and port in filter_ports:
- utils.output_log(
+ self.output_log(
MSG.INVALID_PORT_BY_ZONE_MANAGER, port=port)
for wwpns in wwpn_groups:
try:
diff --git a/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py b/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py
index 2b3224fdf..33e5cb5ff 100644
--- a/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py
+++ b/cinder/volume/drivers/hitachi/hbsd_rest_iscsi.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2020, 2021, Hitachi, Ltd.
+# Copyright (C) 2020, 2023, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -46,20 +46,28 @@ class HBSDRESTISCSI(rest.HBSDREST):
"""Prepare for using the storage."""
target_ports = self.conf.hitachi_target_ports
compute_target_ports = self.conf.hitachi_compute_target_ports
+ if hasattr(
+ self.conf,
+ self.driver_info['param_prefix'] + '_rest_pair_target_ports'):
+ pair_target_ports = self.conf.hitachi_rest_pair_target_ports
+ else:
+ pair_target_ports = []
super(HBSDRESTISCSI, self).connect_storage()
# The port type must be ISCSI and the port attributes must contain TAR.
params = {'portType': 'ISCSI',
'portAttributes': 'TAR'}
port_list = self.client.get_ports(params=params)
- for port in set(target_ports + compute_target_ports):
+ for port in set(target_ports + compute_target_ports +
+ pair_target_ports):
if port not in [port_data['portId'] for port_data in port_list]:
- utils.output_log(
+ self.output_log(
MSG.INVALID_PORT, port=port, additional_info='(portType, '
'portAttributes): not (ISCSI, TAR)')
for port_data in port_list:
port = port_data['portId']
- if port not in set(target_ports + compute_target_ports):
+ if port not in set(target_ports + compute_target_ports +
+ pair_target_ports):
continue
has_addr = True
if not port_data['lunSecuritySetting']:
@@ -70,7 +78,7 @@ class HBSDRESTISCSI(rest.HBSDREST):
addr_info = (', ipv4Address: %s, tcpPort: %s' %
(ipv4_addr, tcp_port))
if not port_data['lunSecuritySetting'] or not has_addr:
- utils.output_log(
+ self.output_log(
MSG.INVALID_PORT, port=port,
additional_info='portType: %s, lunSecuritySetting: %s%s' %
(port_data['portType'], port_data['lunSecuritySetting'],
@@ -82,18 +90,24 @@ class HBSDRESTISCSI(rest.HBSDREST):
if (compute_target_ports and port in compute_target_ports and
has_addr):
self.storage_info['compute_ports'].append(port)
+ if pair_target_ports and port in pair_target_ports:
+ self.storage_info['pair_ports'].append(port)
self.check_ports_info()
- utils.output_log(MSG.SET_CONFIG_VALUE,
- object='port-<IP address:port> list',
- value=self.storage_info['portals'])
+ if pair_target_ports and not self.storage_info['pair_ports']:
+ msg = self.output_log(
+ MSG.RESOURCE_NOT_FOUND, resource="Pair target ports")
+ self.raise_error(msg)
+ self.output_log(MSG.SET_CONFIG_VALUE,
+ object='pair_target_ports',
+ value=self.storage_info['pair_ports'])
+ self.output_log(MSG.SET_CONFIG_VALUE,
+ object='port-<IP address:port> list',
+ value=self.storage_info['portals'])
def create_target_to_storage(self, port, connector, hba_ids):
"""Create an iSCSI target on the specified port."""
- target_name = '%(prefix)s-%(ip)s' % {
- 'prefix': self.driver_info['driver_prefix'],
- 'ip': connector['ip'],
- }
+ target_name = self.create_target_name(connector)
body = {'portId': port, 'hostGroupName': target_name}
if hba_ids:
body['iscsiName'] = '%(id)s%(suffix)s' % {
@@ -183,10 +197,7 @@ class HBSDRESTISCSI(rest.HBSDREST):
for port in target_ports:
targets['info'][port] = False
if 'ip' in connector:
- target_name = '%(prefix)s-%(ip)s' % {
- 'prefix': self.driver_info['driver_prefix'],
- 'ip': connector['ip'],
- }
+ target_name = self.create_target_name(connector)
if self._set_target_info_by_name(
targets, port, target_name, iqn):
continue
@@ -200,12 +211,19 @@ class HBSDRESTISCSI(rest.HBSDREST):
not_found_count += 1
return not_found_count
- def initialize_connection(self, volume, connector, is_snapshot=False):
+ def initialize_connection(
+ self, volume, connector, is_snapshot=False, lun=None,
+ is_mirror=False):
"""Initialize connection between the server and the volume."""
conn_info, map_info = super(HBSDRESTISCSI, self).initialize_connection(
- volume, connector, is_snapshot)
+ volume, connector, is_snapshot, lun)
return conn_info
+ def terminate_connection(self, volume, connector, is_mirror=False):
+ """Terminate connection between the server and the volume."""
+ return super(HBSDRESTISCSI, self).terminate_connection(
+ volume, connector)
+
def get_properties_iscsi(self, targets, multipath):
"""Return iSCSI-specific server-LDEV connection info."""
if not multipath:
@@ -219,8 +237,8 @@ class HBSDRESTISCSI(rest.HBSDREST):
target_info = self.client.get_host_grp(port, gid)
iqn = target_info.get('iscsiName') if target_info else None
if not iqn:
- msg = utils.output_log(MSG.RESOURCE_NOT_FOUND,
- resource='Target IQN')
+ msg = self.output_log(MSG.RESOURCE_NOT_FOUND,
+ resource='Target IQN')
self.raise_error(msg)
targets['iqns'][target] = iqn
LOG.debug(
diff --git a/cinder/volume/drivers/hitachi/hbsd_utils.py b/cinder/volume/drivers/hitachi/hbsd_utils.py
index 1868ee120..b5f2ee90a 100644
--- a/cinder/volume/drivers/hitachi/hbsd_utils.py
+++ b/cinder/volume/drivers/hitachi/hbsd_utils.py
@@ -15,17 +15,17 @@
"""Utility module for Hitachi HBSD Driver."""
import enum
+import functools
import logging as base_logging
-from oslo_config import cfg
from oslo_log import log as logging
-from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
from cinder import exception
+from cinder import utils as cinder_utils
-VERSION = '2.3.1'
+VERSION = '2.3.5'
CI_WIKI_NAME = 'Hitachi_VSP_CI'
PARAM_PREFIX = 'hitachi'
VENDOR_NAME = 'Hitachi'
@@ -38,9 +38,13 @@ HDT_VOL_ATTR = 'HDT'
NVOL_LDEV_TYPE = 'DP-VOL'
TARGET_IQN_SUFFIX = '.hbsd-target'
PAIR_ATTR = 'HTI'
+MIRROR_ATTR = 'GAD'
GIGABYTE_PER_BLOCK_SIZE = units.Gi / 512
+PRIMARY_STR = 'primary'
+SECONDARY_STR = 'secondary'
+
NORMAL_LDEV_TYPE = 'Normal'
FULL = 'Full copy'
@@ -187,6 +191,25 @@ class HBSDMsg(enum.Enum):
'%(volume_type)s)',
'suffix': WARNING_SUFFIX,
}
+ VOLUME_IS_BEING_REHYDRATED = {
+ 'msg_id': 333,
+ 'loglevel': base_logging.WARNING,
+ 'msg': 'Retyping the volume will be performed using migration '
+ 'because the specified volume is being rehydrated. '
+ 'This process may take a long time depending on the data '
+ 'size. (volume: %(volume_id)s, volume type: %(volume_type)s)',
+ 'suffix': WARNING_SUFFIX,
+ }
+ INCONSISTENCY_DEDUPLICATION_SYSTEM_VOLUME = {
+ 'msg_id': 334,
+ 'loglevel': base_logging.WARNING,
+ 'msg': 'Retyping the volume will be performed using migration '
+ 'because inconsistency was found in the deduplication '
+ 'system data volume. This process may take a long time '
+ 'depending on the data size. '
+ '(volume: %(volume_id)s, volume type: %(volume_type)s)',
+ 'suffix': WARNING_SUFFIX,
+ }
HOST_GROUP_NUMBER_IS_MAXIMUM = {
'msg_id': 335,
'loglevel': base_logging.WARNING,
@@ -202,6 +225,20 @@ class HBSDMsg(enum.Enum):
'(port: %(port)s, WWN: %(wwn)s)',
'suffix': WARNING_SUFFIX,
}
+ REPLICATION_VOLUME_OPERATION_FAILED = {
+ 'msg_id': 337,
+ 'loglevel': base_logging.WARNING,
+ 'msg': 'Failed to %(operation)s the %(type)s in a replication pair. '
+ '(volume: %(volume_id)s, reason: %(reason)s)',
+ 'suffix': WARNING_SUFFIX,
+ }
+ SITE_INITIALIZATION_FAILED = {
+ 'msg_id': 338,
+ 'loglevel': base_logging.WARNING,
+ 'msg': 'Failed to initialize the driver for the %(site)s storage '
+ 'system.',
+ 'suffix': WARNING_SUFFIX,
+ }
INVALID_PORT = {
'msg_id': 339,
'loglevel': base_logging.WARNING,
@@ -301,6 +338,19 @@ class HBSDMsg(enum.Enum):
'msg': 'Failed to add the logical device.',
'suffix': ERROR_SUFFIX,
}
+ PAIR_TARGET_FAILED = {
+ 'msg_id': 638,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to add the pair target.',
+ 'suffix': ERROR_SUFFIX,
+ }
+ MAP_PAIR_TARGET_FAILED = {
+ 'msg_id': 639,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to map a logical device to any pair targets. '
+ '(LDEV: %(ldev)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
POOL_NOT_FOUND = {
'msg_id': 640,
'loglevel': base_logging.ERROR,
@@ -391,11 +441,42 @@ class HBSDMsg(enum.Enum):
'This driver does not support unmanaging snapshots.',
'suffix': ERROR_SUFFIX,
}
+ INVALID_EXTRA_SPEC_KEY = {
+ 'msg_id': 723,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to create a volume. '
+ 'An invalid value is specified for the extra spec key '
+ '"%(key)s" of the volume type. (value: %(value)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
VOLUME_COPY_FAILED = {
'msg_id': 725,
'loglevel': base_logging.ERROR,
- 'msg': 'Failed to copy a volume. (copy method: %(copy_method)s, '
- 'P-VOL: %(pvol)s, S-VOL: %(svol)s)',
+ 'msg': 'Failed to copy a volume. (P-VOL: %(pvol)s, S-VOL: %(svol)s)',
+ 'suffix': ERROR_SUFFIX
+ }
+ CONSISTENCY_NOT_GUARANTEE = {
+ 'msg_id': 726,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'A volume or snapshot cannot be deleted. '
+ 'The consistency of logical device for '
+ 'a volume or snapshot cannot be guaranteed. (LDEV: %(ldev)s)',
+ 'suffix': ERROR_SUFFIX
+ }
+ FAILED_CHANGE_VOLUME_TYPE = {
+ 'msg_id': 727,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to change a volume type. '
+ 'An invalid value is specified for the extra spec key '
+ '"%(key)s" of the volume type after change. '
+ '(value: %(value)s)',
+ 'suffix': ERROR_SUFFIX
+ }
+ NOT_COMPLETED_CHANGE_VOLUME_TYPE = {
+ 'msg_id': 728,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'The volume type change could not be completed. '
+ '(LDEV: %(ldev)s)',
'suffix': ERROR_SUFFIX
}
REST_SERVER_CONNECT_FAILED = {
@@ -482,6 +563,70 @@ class HBSDMsg(enum.Enum):
'resource of host group or wwn was found. (ports: %(ports)s)',
'suffix': ERROR_SUFFIX,
}
+ SITE_NOT_INITIALIZED = {
+ 'msg_id': 751,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'The driver is not initialized for the %(site)s storage '
+ 'system.',
+ 'suffix': ERROR_SUFFIX,
+ }
+ CREATE_REPLICATION_VOLUME_FAILED = {
+ 'msg_id': 752,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to create the %(type)s for a %(rep_type)s pair. '
+ '(volume: %(volume_id)s, volume type: %(volume_type)s, '
+ 'size: %(size)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
+ DEDUPLICATION_IS_ENABLED = {
+ 'msg_id': 753,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to create a volume in a %(rep_type)s environment '
+ 'because deduplication is enabled for the volume type. '
+ '(volume: %(volume_id)s, volume type: %(volume_type)s, '
+ 'size: %(size)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
+ CREATE_REPLICATION_PAIR_FAILED = {
+ 'msg_id': 754,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to create a %(rep_type)s pair or '
+ 'to mirror data in a %(rep_type)s pair. '
+ '(P-VOL: %(pvol)s, S-VOL: %(svol)s, copy group: '
+ '%(copy_group)s, pair status: %(status)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
+ SPLIT_REPLICATION_PAIR_FAILED = {
+ 'msg_id': 755,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to split a %(rep_type)s pair. '
+ '(P-VOL: %(pvol)s, S-VOL: %(svol)s, '
+ 'copy group: %(copy_group)s, pair status: %(status)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
+ PAIR_CHANGE_TIMEOUT = {
+ 'msg_id': 756,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'A timeout occurred before the status of '
+ 'the %(rep_type)s pair changes. '
+ '(P-VOL: %(pvol)s, S-VOL: %(svol)s, copy group: '
+ '%(copy_group)s, current status: %(current_status)s, '
+ 'expected status: %(expected_status)s, timeout: %(timeout)s '
+ 'seconds)',
+ 'suffix': ERROR_SUFFIX,
+ }
+ EXTEND_REPLICATION_VOLUME_ERROR = {
+ 'msg_id': 758,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to extend a volume. The LDEVs for the volume are in '
+ 'a %(rep_type)s pair and the volume is attached. '
+ '(volume: %(volume_id)s, '
+ 'LDEV: %(ldev)s, source size: %(source_size)s, destination '
+ 'size: %(destination_size)s, P-VOL: %(pvol)s, S-VOL: %(svol)s, '
+ 'P-VOL[numOfPorts]: %(pvol_num_of_ports)s, '
+ 'S-VOL[numOfPorts]: %(svol_num_of_ports)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
MIGRATE_VOLUME_FAILED = {
'msg_id': 760,
'loglevel': base_logging.ERROR,
@@ -490,6 +635,21 @@ class HBSDMsg(enum.Enum):
'(P-VOL, S-VOL, copy method, status): %(pair_info)s)',
'suffix': ERROR_SUFFIX,
}
+ REPLICATION_PAIR_ERROR = {
+ 'msg_id': 766,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to %(operation)s. The LDEV for the volume is in '
+ 'a remote replication pair. (volume: %(volume)s, '
+ '%(snapshot_info)sLDEV: %(ldev)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
+ LDEV_NUMBER_NOT_FOUND = {
+ 'msg_id': 770,
+ 'loglevel': base_logging.ERROR,
+ 'msg': 'Failed to %(operation)s. The LDEV number is not found in the '
+ 'Cinder object. (%(obj)s: %(obj_id)s)',
+ 'suffix': ERROR_SUFFIX,
+ }
def __init__(self, error_info):
"""Initialize Enum attributes."""
@@ -498,48 +658,36 @@ class HBSDMsg(enum.Enum):
self.msg = error_info['msg']
self.suffix = error_info['suffix']
- def output_log(self, **kwargs):
+ def output_log(self, storage_id, **kwargs):
"""Output the message to the log file and return the message."""
msg = self.msg % kwargs
- LOG.log(self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
+ if storage_id:
+ LOG.log(
+ self.level,
+ "%(storage_id)s MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
+ {'storage_id': storage_id[-6:], 'msg_id': self.msg_id,
+ 'msg_suffix': self.suffix, 'msg': msg})
+ else:
+ LOG.log(
+ self.level, "MSGID%(msg_id)04d-%(msg_suffix)s: %(msg)s",
{'msg_id': self.msg_id, 'msg_suffix': self.suffix, 'msg': msg})
return msg
-def output_log(msg_enum, **kwargs):
+def output_log(msg_enum, storage_id=None, **kwargs):
"""Output the specified message to the log file and return the message."""
- return msg_enum.output_log(**kwargs)
+ return msg_enum.output_log(storage_id, **kwargs)
LOG = logging.getLogger(__name__)
MSG = HBSDMsg
-def get_ldev(obj):
- """Get the LDEV number from the given object and return it as integer."""
- if not obj:
- return None
- ldev = obj.get('provider_location')
- if not ldev or not ldev.isdigit():
- return None
- return int(ldev)
-
-
def timed_out(start_time, timeout):
"""Check if the specified time has passed."""
return timeutils.is_older_than(start_time, timeout)
-def check_opt_value(conf, names):
- """Check if the parameter names and values are valid."""
- for name in names:
- try:
- getattr(conf, name)
- except (cfg.NoSuchOptError, cfg.ConfigFileValueError):
- with excutils.save_and_reraise_exception():
- output_log(MSG.INVALID_PARAMETER, param=name)
-
-
def build_initiator_target_map(connector, target_wwns, lookup_service):
"""Return a dictionary mapping server-wwns and lists of storage-wwns."""
init_targ_map = {}
@@ -614,3 +762,52 @@ def get_exception_msg(exc):
exc, exception.CinderException) else exc.args[0]
else:
return ""
+
+
+def synchronized_on_copy_group():
+ def wrap(func):
+ @functools.wraps(func)
+ def inner(self, remote_client, copy_group_name, *args, **kwargs):
+ sync_key = '%s-%s' % (copy_group_name,
+ self.storage_id[-6:])
+
+ @cinder_utils.synchronized(sync_key, external=True)
+ def _inner():
+ return func(self, remote_client, copy_group_name,
+ *args, **kwargs)
+ return _inner()
+ return inner
+ return wrap
+
+
+DICT = '_dict'
+CONF = '_conf'
+
+
+class Config(object):
+
+ def __init__(self, conf):
+ super().__setattr__(CONF, conf)
+ super().__setattr__(DICT, dict())
+ self._opts = {}
+
+ def __getitem__(self, name):
+ return (super().__getattribute__(DICT)[name]
+ if name in super().__getattribute__(DICT)
+ else super().__getattribute__(CONF).safe_get(name))
+
+ def __getattr__(self, name):
+ return (super().__getattribute__(DICT)[name]
+ if name in super().__getattribute__(DICT)
+ else getattr(super().__getattribute__(CONF), name))
+
+ def __setitem__(self, key, value):
+ super().__getattribute__(DICT)[key] = value
+
+ def __setattr__(self, key, value):
+ self.__setitem__(key, value)
+
+ def safe_get(self, name):
+ return (super().__getattribute__(DICT)[name]
+ if name in super().__getattribute__(DICT)
+ else super().__getattribute__(CONF).safe_get(name))
diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py
index ba4d4ea5c..971fac3e8 100644
--- a/cinder/volume/drivers/hpe/hpe_3par_common.py
+++ b/cinder/volume/drivers/hpe/hpe_3par_common.py
@@ -3092,7 +3092,7 @@ class HPE3PARCommon(object):
def _get_updated_comment(self, vol_name, **values):
vol = self.client.getVolume(vol_name)
- comment = json.loads(vol['comment']) if vol['comment'] else {}
+ comment = json.loads(vol['comment']) if vol.get('comment') else {}
comment.update(values)
def _update_comment(self, vol_name, **values):
diff --git a/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py b/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py
index 15b82caad..6f42435d0 100644
--- a/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py
+++ b/cinder/volume/drivers/hpe/xp/hpe_xp_rest.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2022, Hewlett Packard Enterprise, Ltd.
+# Copyright (C) 2022, 2023, Hewlett Packard Enterprise, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -28,8 +28,9 @@ COMMON_VOLUME_OPTS = [
default=None,
help='Product number of the storage system.'),
cfg.ListOpt(
- 'hpexp_pool',
+ 'hpexp_pools',
default=[],
+ deprecated_name='hpexp_pool',
help='Pool number[s] or pool name[s] of the THP pool.'),
cfg.StrOpt(
'hpexp_snap_pool',
@@ -212,7 +213,7 @@ class HPEXPRESTFC(hbsd_rest_fc.HBSDRESTFC):
"""Update configuration"""
# COMMON_VOLUME_OPTS
self.conf.hitachi_storage_id = self.conf.hpexp_storage_id
- self.conf.hitachi_pool = self.conf.hpexp_pool
+ self.conf.hitachi_pools = self.conf.hpexp_pools
self.conf.hitachi_snap_pool = self.conf.hpexp_snap_pool
self.conf.hitachi_ldev_range = self.conf.hpexp_ldev_range
self.conf.hitachi_target_ports = self.conf.hpexp_target_ports
@@ -283,7 +284,7 @@ class HPEXPRESTISCSI(hbsd_rest_iscsi.HBSDRESTISCSI):
"""Update configuration"""
# COMMON_VOLUME_OPTS
self.conf.hitachi_storage_id = self.conf.hpexp_storage_id
- self.conf.hitachi_pool = self.conf.hpexp_pool
+ self.conf.hitachi_pools = self.conf.hpexp_pools
self.conf.hitachi_snap_pool = self.conf.hpexp_snap_pool
self.conf.hitachi_ldev_range = self.conf.hpexp_ldev_range
self.conf.hitachi_target_ports = self.conf.hpexp_target_ports
diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
index 4598e504e..ede96c72e 100644
--- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
+++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py
@@ -2921,6 +2921,18 @@ class StorwizeHelpers(object):
return
self.ssh.rmsnapshot(params)
+ def get_volume_name_from_metadata(self, volume):
+ """Get Volume name from metadata if metadata exists"""
+ if volume.metadata:
+ svc_volume_name = volume.metadata.get("Volume Name", None)
+ if svc_volume_name:
+ LOG.info('Volume %(cinder_id)s in cinder API is linked to '
+ 'volume_name %(svc_volume_name)s in SVC',
+ {'cinder_id': volume.name,
+ 'svc_volume_name': svc_volume_name})
+ volume.name_id = svc_volume_name.split("-", 1)[1]
+ return volume
+
def get_partnership_info(self, system_name):
partnership = self.ssh.lspartnership(system_name)
return partnership[0] if len(partnership) > 0 else None
@@ -6375,7 +6387,8 @@ class StorwizeSVCCommonDriver(san.SanDriver,
elif volume_utils.is_group_a_type(group, "volume_group_enabled"):
self._helpers.check_codelevel_for_volumegroup(
self._state['code_level'])
- model_update = self._delete_volumegroup(group)
+ model_update, volumes_model_update = self._delete_volumegroup(
+ group, volumes)
else:
for volume in volumes:
@@ -6837,8 +6850,6 @@ class StorwizeSVCCommonDriver(san.SanDriver,
easy_tier = pool_data['easy_tier'] in ['on', 'auto']
total_capacity_gb = float(pool_data['capacity']) / units.Gi
free_capacity_gb = float(pool_data['free_capacity']) / units.Gi
- allocated_capacity_gb = (float(pool_data['used_capacity']) /
- units.Gi)
provisioned_capacity_gb = float(
pool_data['virtual_capacity']) / units.Gi
@@ -6865,7 +6876,6 @@ class StorwizeSVCCommonDriver(san.SanDriver,
'pool_name': pool_data['name'],
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
- 'allocated_capacity_gb': allocated_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
'compression_support': compression_enabled,
'reserved_percentage':
@@ -6898,7 +6908,6 @@ class StorwizeSVCCommonDriver(san.SanDriver,
pool_stats = {'pool_name': pool,
'total_capacity_gb': 0,
'free_capacity_gb': 0,
- 'allocated_capacity_gb': 0,
'provisioned_capacity_gb': 0,
'thin_provisioning_support': True,
'thick_provisioning_support': False,
@@ -7102,9 +7111,41 @@ class StorwizeSVCCommonDriver(san.SanDriver,
{'vol': volume.name, 'exception': err})
return model_update, added_vols, removed_vols
- def _delete_volumegroup(self, group):
+ def _delete_volumegroup(self, group, volumes):
model_update = {'status': fields.GroupStatus.DELETED}
volumegroup_name = self._get_volumegroup_name(group)
+ volumes_model_update = []
+ force_unmap = True
+ if self._state['code_level'] < (7, 7, 0, 0):
+ force_unmap = False
+ for volume in volumes:
+ volume = self._helpers.get_volume_name_from_metadata(volume)
+ if self._active_backend_id:
+ msg = (_('Error: deleting non-replicated volume in '
+ 'failover mode is not allowed.'))
+ LOG.error(msg)
+ volume.name_id = None
+ raise exception.VolumeDriverException(message=msg)
+ else:
+ try:
+ self._helpers.delete_vdisk(
+ volume.name,
+ force_unmap=force_unmap,
+ force_delete=True)
+ volumes_model_update.append({'id': volume.id,
+ 'status': 'deleted'})
+ except exception.VolumeBackendAPIException as err:
+ model_update['status'] = (
+ fields.GroupStatus.ERROR_DELETING)
+ LOG.error("Failed to delete the volume %(vol)s of CG. "
+ "Exception: %(exception)s.",
+ {'vol': volume.name, 'exception': err})
+ volume.name_id = None
+ volumes_model_update.append(
+ {'id': volume.id,
+ 'status': fields.GroupStatus.ERROR_DELETING})
+ volume.name_id = None
+
try:
self._helpers.delete_volumegroup(volumegroup_name)
except exception.VolumeBackendAPIException as err:
@@ -7113,7 +7154,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
{'volumegroup': volumegroup_name, 'exception': err})
model_update = {'status': fields.GroupStatus.ERROR_DELETING}
- return model_update
+ return model_update, volumes_model_update
def _update_volumegroup(self, context, group, add_volumes,
remove_volumes):
diff --git a/cinder/volume/drivers/lvm.py b/cinder/volume/drivers/lvm.py
index 2e89cd289..b86ff4a0a 100644
--- a/cinder/volume/drivers/lvm.py
+++ b/cinder/volume/drivers/lvm.py
@@ -25,7 +25,6 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
-import six
from cinder.brick.local_dev import lvm
from cinder import exception
@@ -67,7 +66,11 @@ volume_opts = [
cfg.BoolOpt('lvm_suppress_fd_warnings',
default=False,
help='Suppress leaked file descriptor warnings in LVM '
- 'commands.')
+ 'commands.'),
+ cfg.BoolOpt('lvm_share_target',
+ default=False,
+ help='Whether to share the same target for all LUNs or not '
+ '(currently only supported by nvmet.'),
]
CONF = cfg.CONF
@@ -108,7 +111,18 @@ class LVMVolumeDriver(driver.VolumeDriver):
target_driver,
configuration=self.configuration,
executor=self._execute)
- self.protocol = self.target_driver.protocol
+ self.protocol = (self.target_driver.storage_protocol or
+ self.target_driver.protocol)
+ if (self.configuration.lvm_share_target
+ and not self.target_driver.SHARED_TARGET_SUPPORT):
+ raise exception.InvalidConfigurationValue(
+ f"{target_driver} doesn't support shared targets")
+
+ if (self.configuration.target_secondary_ip_addresses
+ and not self.target_driver.SECONDARY_IP_SUPPORT):
+ raise exception.InvalidConfigurationValue(
+ f"{target_driver} doesn't support secondary addresses")
+
self._sparse_copy_volume = False
@classmethod
@@ -120,7 +134,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
'target_ip_address', 'target_helper', 'target_protocol',
'volume_clear', 'volume_clear_size', 'reserved_percentage',
'max_over_subscription_ratio', 'volume_dd_blocksize',
- 'target_prefix', 'volumes_dir', 'iscsi_secondary_ip_addresses',
+ 'target_prefix', 'volumes_dir', 'target_secondary_ip_addresses',
'target_port',
'iscsi_write_cache', 'iscsi_target_flags', # TGT
'iet_conf', 'iscsi_iotype', # IET
@@ -285,8 +299,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
backend_state='up'
))
data["pools"].append(single_pool)
- data["shared_targets"] = False
-
+ data["shared_targets"] = self.configuration.lvm_share_target
# Check availability of sparse volume copy.
data['sparse_copy_volume'] = self._sparse_copy_volume
@@ -363,7 +376,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
- % six.text_type(exc.stderr))
+ % str(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
@@ -802,7 +815,7 @@ class LVMVolumeDriver(driver.VolumeDriver):
with excutils.save_and_reraise_exception():
LOG.error("Volume migration failed due to "
"exception: %(reason)s.",
- {'reason': six.text_type(e)}, resource=volume)
+ {'reason': str(e)}, resource=volume)
dest_vg_ref.delete(volume)
self._delete_volume(volume)
return (True, None)
@@ -858,11 +871,15 @@ class LVMVolumeDriver(driver.VolumeDriver):
# one attachment left for the host specified by the connector to
# remove, otherwise the ACL will be removed prematurely while other
# attachments on the same host are still accessing the volume.
+ def same_connector(attach):
+ return (attach.connector
+ and self.target_driver.are_same_connector(attach.connector,
+ connector))
+
attachments = volume.volume_attachment
- if volume.multiattach:
- if sum(1 for a in attachments if a.connector and
- a.connector['initiator'] == connector['initiator']) > 1:
- return True
+ if (volume.multiattach
+ and sum(1 for a in filter(same_connector, attachments)) > 1):
+ return True
self.target_driver.terminate_connection(volume, connector, **kwargs)
return len(attachments) > 1
diff --git a/cinder/volume/drivers/nec/v/nec_v_rest.py b/cinder/volume/drivers/nec/v/nec_v_rest.py
index ebe5942c7..27560dd3a 100644
--- a/cinder/volume/drivers/nec/v/nec_v_rest.py
+++ b/cinder/volume/drivers/nec/v/nec_v_rest.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2021 NEC corporation
+# Copyright (C) 2021, 2023, NEC corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -28,8 +28,9 @@ COMMON_VOLUME_OPTS = [
default=None,
help='Product number of the storage system.'),
cfg.ListOpt(
- 'nec_v_pool',
+ 'nec_v_pools',
default=[],
+ deprecated_name='nec_v_pool',
help='Pool number[s] or pool name[s] of the DP pool.'),
cfg.StrOpt(
'nec_v_snap_pool',
@@ -198,7 +199,7 @@ CONF.register_opts(FC_VOLUME_OPTS, group=configuration.SHARED_CONF_GROUP)
def update_conf(conf):
# COMMON_VOLUME_OPTS
conf.hitachi_storage_id = conf.nec_v_storage_id
- conf.hitachi_pool = conf.nec_v_pool
+ conf.hitachi_pools = conf.nec_v_pools
conf.hitachi_snap_pool = conf.nec_v_snap_pool
conf.hitachi_ldev_range = conf.nec_v_ldev_range
conf.hitachi_target_ports = conf.nec_v_target_ports
diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_base.py b/cinder/volume/drivers/netapp/dataontap/nfs_base.py
index e0116efe1..d5fec6f0a 100644
--- a/cinder/volume/drivers/netapp/dataontap/nfs_base.py
+++ b/cinder/volume/drivers/netapp/dataontap/nfs_base.py
@@ -25,7 +25,6 @@ import copy
import math
import os
import re
-import threading
import time
from oslo_concurrency import processutils
@@ -116,6 +115,13 @@ class NetAppNfsDriver(driver.ManageableVD,
self._handle_ems_logging,
loopingcalls.ONE_HOUR)
+ # Add the task that periodically cleanup old expired internal
+ # image caching.
+ self.loopingcalls.add_task(
+ self._clean_image_cache,
+ self.configuration.netapp_nfs_image_cache_cleanup_interval
+ )
+
def _delete_snapshots_marked_for_deletion(self):
snapshots = self.zapi_client.get_snapshots_marked_for_deletion()
for snapshot in snapshots:
@@ -547,49 +553,32 @@ class NetAppNfsDriver(driver.ManageableVD,
os.utime(src_path, None)
_do_clone()
- @utils.synchronized('clean_cache')
- def _spawn_clean_cache_job(self):
- """Spawns a clean task if not running."""
- if getattr(self, 'cleaning', None):
- LOG.debug('Image cache cleaning in progress. Returning... ')
- return
- else:
- # Set cleaning to True
- self.cleaning = True
- t = threading.Timer(0, self._clean_image_cache)
- t.start()
-
def _clean_image_cache(self):
"""Clean the image cache files in cache of space crunch."""
- try:
- LOG.debug('Image cache cleaning in progress.')
- thres_size_perc_start = (
- self.configuration.thres_avl_size_perc_start)
- thres_size_perc_stop = self.configuration.thres_avl_size_perc_stop
- for share in getattr(self, '_mounted_shares', []):
- try:
- total_size, total_avl = self._get_capacity_info(share)
- avl_percent = int((float(total_avl) / total_size) * 100)
- if avl_percent <= thres_size_perc_start:
- LOG.info('Cleaning cache for share %s.', share)
- eligible_files = self._find_old_cache_files(share)
- threshold_size = int(
- (thres_size_perc_stop * total_size) / 100)
- bytes_to_free = int(threshold_size - total_avl)
- LOG.debug('Files to be queued for deletion %s',
- eligible_files)
- self._delete_files_till_bytes_free(
- eligible_files, share, bytes_to_free)
- else:
- continue
- except Exception as e:
- LOG.warning('Exception during cache cleaning'
- ' %(share)s. Message - %(ex)s',
- {'share': share, 'ex': e})
+ LOG.debug('Image cache cleaning in progress.')
+ thres_size_perc_start = (
+ self.configuration.thres_avl_size_perc_start)
+ thres_size_perc_stop = self.configuration.thres_avl_size_perc_stop
+ for share in self._mounted_shares:
+ try:
+ total_size, total_avl = self._get_capacity_info(share)
+ avl_percent = int((float(total_avl) / total_size) * 100)
+ if avl_percent <= thres_size_perc_start:
+ LOG.info('Cleaning cache for share %s.', share)
+ eligible_files = self._find_old_cache_files(share)
+ threshold_size = int(
+ (thres_size_perc_stop * total_size) / 100)
+ bytes_to_free = int(threshold_size - total_avl)
+ LOG.debug('Files to be queued for deletion %s',
+ eligible_files)
+ self._delete_files_till_bytes_free(
+ eligible_files, share, bytes_to_free)
+ else:
continue
- finally:
- LOG.debug('Image cache cleaning done.')
- self.cleaning = False
+ except Exception as e:
+ LOG.warning('Exception during cache cleaning'
+ ' %(share)s. Message - %(ex)s',
+ {'share': share, 'ex': e})
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
diff --git a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
index 1d53c1a2c..7b546cb93 100644
--- a/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
+++ b/cinder/volume/drivers/netapp/dataontap/nfs_cmode.py
@@ -337,7 +337,6 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
# Used for service state report
data['replication_enabled'] = self.replication_enabled
- self._spawn_clean_cache_job()
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
diff --git a/cinder/volume/drivers/netapp/options.py b/cinder/volume/drivers/netapp/options.py
index 07dbdaf75..9a97df54c 100644
--- a/cinder/volume/drivers/netapp/options.py
+++ b/cinder/volume/drivers/netapp/options.py
@@ -122,6 +122,11 @@ netapp_cluster_opts = [
'provisioning of block storage volumes should occur.')), ]
netapp_img_cache_opts = [
+ cfg.IntOpt('netapp_nfs_image_cache_cleanup_interval',
+ default=600,
+ min=60,
+ help=('Sets time in seconds between NFS image cache '
+ 'cleanup tasks.')),
cfg.IntOpt('thres_avl_size_perc_start',
default=20,
help=('If the percentage of available space for an NFS share '
diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py
index 8699be15e..bf38d88f9 100644
--- a/cinder/volume/drivers/pure.py
+++ b/cinder/volume/drivers/pure.py
@@ -2787,7 +2787,7 @@ class PureISCSIDriver(PureBaseVolumeDriver, san.SanISCSIDriver):
the underlying storage connectivity with the FlashArray.
"""
- VERSION = "16.0.iscsi"
+ VERSION = "17.0.iscsi"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
@@ -3011,7 +3011,7 @@ class PureFCDriver(PureBaseVolumeDriver, driver.FibreChannelDriver):
supports the Cinder Fibre Channel Zone Manager.
"""
- VERSION = "16.0.fc"
+ VERSION = "17.0.fc"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
@@ -3208,7 +3208,7 @@ class PureNVMEDriver(PureBaseVolumeDriver, driver.BaseVD):
FlashArray.
"""
- VERSION = "16.0.nvme"
+ VERSION = "17.0.nvme"
def __init__(self, *args, **kwargs):
execute = kwargs.pop("execute", utils.execute)
diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py
index 07cc72174..6cc86c2c5 100644
--- a/cinder/volume/drivers/rbd.py
+++ b/cinder/volume/drivers/rbd.py
@@ -722,12 +722,15 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
total_capacity: float
free_capacity: float
+
+ # In Nautilus bytes_used was renamed to stored
+ bytes_used = pool_stats.get('stored', pool_stats['bytes_used'])
quota_outbuf = encodeutils.safe_decode(quota_outbuf)
bytes_quota = json.loads(quota_outbuf)['quota_max_bytes']
# With quota the total is the quota limit and free is quota - used
if bytes_quota:
total_capacity = bytes_quota
- free_capacity = max(min(total_capacity - pool_stats['bytes_used'],
+ free_capacity = max(min(total_capacity - bytes_used,
pool_stats['max_avail']),
0)
# Without quota free is pools max available and total is global size
@@ -737,7 +740,7 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
# If we want dynamic total capacity (default behavior)
if self.configuration.safe_get('report_dynamic_total_capacity'):
- total_capacity = free_capacity + pool_stats['bytes_used']
+ total_capacity = free_capacity + bytes_used
free_capacity = round((float(free_capacity) / units.Gi), 2)
total_capacity = round((float(total_capacity) / units.Gi), 2)
diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py
index d026d40b6..11a2fb97d 100644
--- a/cinder/volume/drivers/remotefs.py
+++ b/cinder/volume/drivers/remotefs.py
@@ -887,8 +887,8 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
})
else:
backing_file_template = \
- "(%(basedir)s/[0-9a-f]+/)?%" \
- "(volname)s(.(tmp-snap-)?[0-9a-f-]+)?%(valid_ext)s$" % {
+ "(%(basedir)s/[0-9a-f]+/)?" \
+ "%(volname)s(.(tmp-snap-)?[0-9a-f-]+)?%(valid_ext)s$" % {
'basedir': basedir,
'volname': volume_name,
'valid_ext': valid_ext,
diff --git a/cinder/volume/drivers/synology/synology_common.py b/cinder/volume/drivers/synology/synology_common.py
index dc2d1b87b..a571be7dc 100644
--- a/cinder/volume/drivers/synology/synology_common.py
+++ b/cinder/volume/drivers/synology/synology_common.py
@@ -993,7 +993,7 @@ class SynoCommon(object):
def get_provider_location(self, iqn, trg_id):
portals = ['%(ip)s:%(port)d' % {'ip': self.get_ip(),
'port': self.target_port}]
- sec_ips = self.config.safe_get('iscsi_secondary_ip_addresses')
+ sec_ips = self.config.safe_get('target_secondary_ip_addresses')
for ip in sec_ips:
portals.append('%(ip)s:%(port)d' %
{'ip': ip,
@@ -1288,7 +1288,7 @@ class SynoCommon(object):
'access_mode': 'rw',
'discard': False
}
- ips = self.config.safe_get('iscsi_secondary_ip_addresses')
+ ips = self.config.safe_get('target_secondary_ip_addresses')
if ips:
target_portals = [iscsi_properties['target_portal']]
for ip in ips:
diff --git a/cinder/volume/drivers/synology/synology_iscsi.py b/cinder/volume/drivers/synology/synology_iscsi.py
index 6d9ee86c5..8b2044e6b 100644
--- a/cinder/volume/drivers/synology/synology_iscsi.py
+++ b/cinder/volume/drivers/synology/synology_iscsi.py
@@ -49,7 +49,7 @@ class SynoISCSIDriver(driver.ISCSIDriver):
additional_opts = cls._get_oslo_driver_opts(
'target_ip_address', 'target_protocol', 'target_port',
'driver_use_ssl', 'use_chap_auth', 'chap_username',
- 'chap_password', 'iscsi_secondary_ip_addresses', 'target_prefix',
+ 'chap_password', 'target_secondary_ip_addresses', 'target_prefix',
'reserved_percentage', 'max_over_subscription_ratio')
return common.cinder_opts + additional_opts
diff --git a/cinder/volume/drivers/windows/iscsi.py b/cinder/volume/drivers/windows/iscsi.py
index 616356127..9ca2b3608 100644
--- a/cinder/volume/drivers/windows/iscsi.py
+++ b/cinder/volume/drivers/windows/iscsi.py
@@ -93,7 +93,7 @@ class WindowsISCSIDriver(driver.ISCSIDriver):
iscsi_port = self.configuration.target_port
iscsi_ips = ([self.configuration.target_ip_address] +
- self.configuration.iscsi_secondary_ip_addresses)
+ self.configuration.target_secondary_ip_addresses)
requested_portals = {':'.join([iscsi_ip, str(iscsi_port)])
for iscsi_ip in iscsi_ips}
diff --git a/cinder/volume/drivers/yadro/tatlin_client.py b/cinder/volume/drivers/yadro/tatlin_client.py
index 2ec6742b6..c858e099a 100644
--- a/cinder/volume/drivers/yadro/tatlin_client.py
+++ b/cinder/volume/drivers/yadro/tatlin_client.py
@@ -366,7 +366,7 @@ class TatlinClientCommon:
result, status = self._access_api(tatlin_api.POOLS)
except TatlinAPIException as exp:
message = _('Unable to get pool id for %s due to %s' %
- pool_name, exp.message)
+ (pool_name, exp.message))
LOG.error(message)
raise exception.VolumeBackendAPIException(message=message)
diff --git a/cinder/volume/flows/api/create_volume.py b/cinder/volume/flows/api/create_volume.py
index c96841f7c..4dbf38a8e 100644
--- a/cinder/volume/flows/api/create_volume.py
+++ b/cinder/volume/flows/api/create_volume.py
@@ -444,8 +444,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
cgsnapshot,
group,
group_snapshot,
- backup: Optional[dict],
- multiattach: bool = False) -> dict[str, Any]:
+ backup: Optional[dict]) -> dict[str, Any]:
utils.check_exclusive_options(snapshot=snapshot,
imageRef=image_id,
@@ -493,11 +492,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
volume_type = objects.VolumeType.get_by_name_or_id(
context, volume_type_id)
extra_specs = volume_type.get('extra_specs', {})
- # NOTE(tommylikehu): Although the parameter `multiattach` from
- # create volume API is deprecated now, we still need to consider
- # it when multiattach is not enabled in volume type.
- multiattach = (extra_specs.get(
- 'multiattach', '') == '<is> True' or multiattach)
+ multiattach = (extra_specs.get('multiattach', '') == '<is> True')
if multiattach and encryption_key_id:
msg = _('Multiattach cannot be used with encrypted volumes.')
raise exception.InvalidVolume(reason=msg)
@@ -914,8 +909,7 @@ def get_flow(db_api, image_service_api, availability_zones, create_what,
availability_zones,
rebind={'size': 'raw_size',
'availability_zone': 'raw_availability_zone',
- 'volume_type': 'raw_volume_type',
- 'multiattach': 'raw_multiattach'}))
+ 'volume_type': 'raw_volume_type'}))
api_flow.add(QuotaReserveTask(),
EntryCreateTask(),
QuotaCommitTask())
diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
index 0ae3cb59d..905258bf4 100644
--- a/cinder/volume/flows/manager/create_volume.py
+++ b/cinder/volume/flows/manager/create_volume.py
@@ -146,7 +146,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
volume.id)
def _reschedule(self, context, cause, request_spec, filter_properties,
- volume):
+ volume) -> None:
"""Actions that happen during the rescheduling attempt occur here."""
create_volume = self.scheduler_rpcapi.create_volume
@@ -170,8 +170,8 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
# Stringify to avoid circular ref problem in json serialization
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
- return create_volume(context, volume, request_spec=request_spec,
- filter_properties=filter_properties)
+ create_volume(context, volume, request_spec=request_spec,
+ filter_properties=filter_properties)
def _post_reschedule(self, volume):
"""Actions that happen after the rescheduling attempt occur here."""
diff --git a/cinder/volume/targets/driver.py b/cinder/volume/targets/driver.py
index 4ec2070f6..644664b45 100644
--- a/cinder/volume/targets/driver.py
+++ b/cinder/volume/targets/driver.py
@@ -31,6 +31,9 @@ class Target(object, metaclass=abc.ABCMeta):
well as force implementation of required methods.
"""
+ storage_protocol = None
+ SHARED_TARGET_SUPPORT = False
+ SECONDARY_IP_SUPPORT = True
def __init__(self, *args, **kwargs):
# TODO(stephenfin): Drop this in favour of using 'db' directly
@@ -68,3 +71,25 @@ class Target(object, metaclass=abc.ABCMeta):
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
+
+ @staticmethod
+ def are_same_connector(A, B):
+ """Whether 2 connectors belong to the same host or not.
+
+ This is used for multi attach volumes, to be able to know when there
+ are no more attachments on a given host.
+
+ This is the generic implementation, but specific targets may overwrite
+ it. For example iSCSI would check the the "initiator" key instead, and
+ NVMe-oF would check the "nqn" key.
+ """
+ a_host = A.get('host')
+ return a_host and (a_host == B.get('host'))
+
+ def extend_target(self, volume):
+ """Reinitializes a target after the volume has been extended.
+
+ Most drivers don't need to do anything, but in other cases this may
+ cause IO disruption.
+ """
+ pass
diff --git a/cinder/volume/targets/iscsi.py b/cinder/volume/targets/iscsi.py
index 1e89a06c3..1f4db6c7c 100644
--- a/cinder/volume/targets/iscsi.py
+++ b/cinder/volume/targets/iscsi.py
@@ -167,8 +167,8 @@ class ISCSITarget(driver.Target):
def _get_portals_config(self):
# Prepare portals configuration
- portals_ips = ([self.configuration.target_ip_address]
- + self.configuration.iscsi_secondary_ip_addresses or [])
+ portals_ips = ([self.configuration.target_ip_address] +
+ self.configuration.target_secondary_ip_addresses or [])
return {'portals_ips': portals_ips,
'portals_port': self.configuration.target_port}
@@ -201,7 +201,7 @@ class ISCSITarget(driver.Target):
data = {}
data['location'] = self._iscsi_location(
self.configuration.target_ip_address, tid, iscsi_name, lun,
- self.configuration.iscsi_secondary_ip_addresses)
+ self.configuration.target_secondary_ip_addresses)
LOG.debug('Set provider_location to: %s', data['location'])
data['auth'] = self._iscsi_authentication(
'CHAP', *chap_auth)
@@ -355,6 +355,11 @@ class ISCSITarget(driver.Target):
def _do_tgt_update(self, name, force=False):
pass
+ @staticmethod
+ def are_same_connector(A, B):
+ a_initiator = A.get('initiator')
+ return a_initiator and (a_initiator == B.get('initiator'))
+
class SanISCSITarget(ISCSITarget):
"""iSCSI target for san devices.
diff --git a/cinder/volume/targets/nvmeof.py b/cinder/volume/targets/nvmeof.py
index 0992de1c9..238edd00c 100644
--- a/cinder/volume/targets/nvmeof.py
+++ b/cinder/volume/targets/nvmeof.py
@@ -10,8 +10,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import abc
-
from oslo_log import log as logging
from cinder.common import constants
@@ -42,11 +40,15 @@ class NVMeOF(driver.Target):
"""Reads NVMeOF configurations."""
super(NVMeOF, self).__init__(*args, **kwargs)
- self.target_ip = self.configuration.target_ip_address
+ self.target_ips = ([self.configuration.target_ip_address] +
+ self.configuration.target_secondary_ip_addresses)
self.target_port = self.configuration.target_port
self.nvmet_port_id = self.configuration.nvmet_port_id
self.nvmet_ns_id = self.configuration.nvmet_ns_id
self.nvmet_subsystem_name = self.configuration.target_prefix
+ # Compatibility with non lvm drivers
+ self.share_targets = getattr(self.configuration,
+ 'lvm_share_target', False)
target_protocol = self.configuration.target_protocol
if target_protocol in self.target_protocol_map:
self.nvme_transport_type = self.target_protocol_map[
@@ -56,12 +58,19 @@ class NVMeOF(driver.Target):
protocol=target_protocol
)
+ # Secondary ip addresses only work with new connection info
+ if (self.configuration.target_secondary_ip_addresses
+ and self.configuration.nvmeof_conn_info_version == 1):
+ raise exception.InvalidConfigurationValue(
+ 'Secondary addresses need to use NVMe-oF connection properties'
+ ' format version 2 or greater (nvmeof_conn_info_version).')
+
def initialize_connection(self, volume, connector):
"""Returns the connection info.
In NVMeOF driver, :driver_volume_type: is set to 'nvmeof',
:data: is the driver data that has the value of
- _get_connection_properties.
+ _get_connection_properties_from_vol.
Example return value:
@@ -81,40 +90,105 @@ class NVMeOF(driver.Target):
"""
return {
'driver_volume_type': self.protocol,
- 'data': self._get_connection_properties(volume)
+ 'data': self._get_connection_properties_from_vol(volume)
}
- def _get_connection_properties(self, volume):
+ def _get_connection_properties_from_vol(self, volume):
"""Gets NVMeOF connection configuration.
- :return: dictionary of the following keys:
- :target_portal: NVMe target IP address
- :target_port: NVMe target port
- :nqn: NQN of the NVMe target
- :transport_type: Network fabric being used for an
- NVMe-over-Fabrics network
- :ns_id: namespace id associated with the subsystem
- """
+ Returns the connection info based on the volume's provider_location and
+ the _get_nvme_uuid method for the volume.
+ For the specific data returned check the _get_connection_properties
+ method.
+
+ :return: dictionary with the connection properties using one of the 2
+ existing formats depending on the nvmeof_conn_info_version
+ configuration option.
+ """
location = volume['provider_location']
target_connection, nvme_transport_type, nqn, nvmet_ns_id = (
location.split(' '))
- target_portal, target_port = target_connection.split(':')
+ target_portals, target_port = target_connection.split(':')
+ target_portals = target_portals.split(',')
+
+ uuid = self._get_nvme_uuid(volume)
+ return self._get_connection_properties(nqn,
+ target_portals, target_port,
+ nvme_transport_type,
+ nvmet_ns_id, uuid)
+
+ def _get_connection_properties(self, nqn, portals, port, transport, ns_id,
+ uuid):
+ """Get connection properties dictionary.
+
+ For nvmeof_conn_info_version set to 1 (default) the old format will
+ be sent:
+ {
+ 'target_portal': NVMe target IP address
+ 'target_port': NVMe target port
+ 'nqn': NQN of the NVMe target
+ 'transport_type': Network fabric being used for an NVMe-oF network
+ One of: tcp, rdma
+ 'ns_id': namespace id associated with the subsystem
+ }
- return {
- 'target_portal': target_portal,
- 'target_port': target_port,
+
+ For nvmeof_conn_info_version set to 2 the new format will be sent:
+ {
+ 'target_nqn': NQN of the NVMe target
+ 'vol_uuid': NVMe-oF UUID of the volume. May be different than Cinder
+ volume id and may be None if ns_id is provided.
+ 'portals': [(target_address, target_port, transport_type) ... ]
+ 'ns_id': namespace id associated with the subsystem, in case target
+ doesn't provide the volume_uuid.
+ }
+ Unlike the old format the transport_type can be one of RoCEv2 and tcp
+
+ :return: dictionary with the connection properties using one of the 2
+ existing formats depending on the nvmeof_conn_info_version
+ configuration option.
+ """
+ # NVMe-oF Connection Information Version 2
+ if self.configuration.nvmeof_conn_info_version == 2:
+ if transport == 'rdma':
+ transport = 'RoCEv2'
+
+ if transport == 'rdma':
+ transport = 'RoCEv2'
+
+ return {
+ 'target_nqn': nqn,
+ 'vol_uuid': uuid,
+ 'portals': [(portal, port, transport) for portal in portals],
+ 'ns_id': ns_id,
+ }
+
+ # NVMe-oF Connection Information Version 1
+ result = {
+ 'target_portal': portals[0],
+ 'target_port': port,
'nqn': nqn,
- 'transport_type': nvme_transport_type,
- 'ns_id': nvmet_ns_id
+ 'transport_type': transport,
+ 'ns_id': ns_id,
}
- def get_nvmeof_location(self, nqn, target_ip, target_port,
+ return result
+
+ def _get_nvme_uuid(self, volume):
+ """Return the NVMe uuid of a given volume.
+
+ Targets that want to support the nvmeof_conn_info_version=2 option need
+ to override this method and return the NVMe uuid of the given volume.
+ """
+ return None
+
+ def get_nvmeof_location(self, nqn, target_ips, target_port,
nvme_transport_type, nvmet_ns_id):
"""Serializes driver data into single line string."""
return "%(ip)s:%(port)s %(transport)s %(nqn)s %(ns_id)s" % (
- {'ip': target_ip,
+ {'ip': ','.join(target_ips),
'port': target_port,
'transport': nvme_transport_type,
'nqn': nqn,
@@ -123,13 +197,18 @@ class NVMeOF(driver.Target):
def terminate_connection(self, volume, connector, **kwargs):
pass
+ @staticmethod
+ def are_same_connector(A, B):
+ a_nqn = A.get('nqn')
+ return a_nqn and (a_nqn == B.get('nqn'))
+
def create_export(self, context, volume, volume_path):
"""Creates export data for a logical volume."""
return self.create_nvmeof_target(
volume['id'],
self.configuration.target_prefix,
- self.target_ip,
+ self.target_ips,
self.target_port,
self.nvme_transport_type,
self.nvmet_port_id,
@@ -150,18 +229,18 @@ class NVMeOF(driver.Target):
missing='initiator')
return True
- @abc.abstractmethod
def create_nvmeof_target(self,
volume_id,
subsystem_name,
- target_ip,
+ target_ips,
target_port,
transport_type,
nvmet_port_id,
ns_id,
volume_path):
+ """Targets that don't override create_export must implement this."""
pass
- @abc.abstractmethod
def delete_nvmeof_target(self, target_name):
+ """Targets that don't override remove_export must implement this."""
pass
diff --git a/cinder/volume/targets/nvmet.py b/cinder/volume/targets/nvmet.py
index f4e88769c..8ffcf99cd 100644
--- a/cinder/volume/targets/nvmet.py
+++ b/cinder/volume/targets/nvmet.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+
from oslo_log import log as logging
from oslo_utils import uuidutils
@@ -31,129 +33,296 @@ class NVMETTargetDeleteError(exception.CinderException):
class NVMET(nvmeof.NVMeOF):
+ SHARED_TARGET_SUPPORT = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._nvmet_root = nvmet.Root()
+ # ####### Connection initiation methods ########
+
+ def initialize_connection(self, volume, connector):
+ """Create an export & map if shared."""
+ # Non-shared connections was the original implementation where all the
+ # export & mapping was done on export and the connection info was
+ # stored in the volume, so let the original implementation handle it.
+ if not self.share_targets:
+ return super().initialize_connection(volume, connector)
+
+ # For the shared case the export only stores the path of the volume
+ volume_path = volume.provider_location
+ if not os.path.exists(volume_path):
+ raise exception.InvalidConfigurationValue(
+ 'Target driver configured with shared targets, but volume '
+ 'exported as non shared.')
+
+ nqn, ns_id = self._map_volume(volume, volume_path, connector)
+ uuid = self._get_nvme_uuid(volume)
+ return {
+ 'driver_volume_type': self.protocol,
+ 'data': self._get_connection_properties(nqn,
+ self.target_ips,
+ self.target_port,
+ self.nvme_transport_type,
+ ns_id, uuid),
+ }
+
+ def create_export(self, context, volume, volume_path):
+ """Create an export & map if not shared."""
+ # For shared targets everything gets done on initialize_connection
+ if self.share_targets:
+ location = volume_path
+ else:
+ nqn, ns_id = self._map_volume(volume, volume_path)
+ location = self.get_nvmeof_location(nqn,
+ self.target_ips,
+ self.target_port,
+ self.nvme_transport_type,
+ ns_id)
+
+ return {'location': location, 'auth': ''}
+
@utils.synchronized('nvmetcli', external=True)
- def create_nvmeof_target(self,
- volume_id,
- subsystem_name, # Ignoring this, using config
- target_ip,
- target_port,
- transport_type,
- nvmet_port_id,
- ns_id,
- volume_path):
+ def _map_volume(self, volume, volume_path, connector=None):
+ """Ensure a volume is exported and mapped in nvmet."""
# Create NVME subsystem for previously created LV
- nqn = self._get_target_nqn(volume_id)
+ nqn = self._get_target_nqn(volume.id, connector)
try:
- self._ensure_subsystem_exists(nqn, ns_id, volume_path)
- self._ensure_port_exports(nqn, target_ip, target_port,
- transport_type, nvmet_port_id)
+ uuid = self._get_nvme_uuid(volume)
+
+ ns_id = self._ensure_subsystem_exists(nqn, volume_path, uuid)
+
+ self._ensure_port_exports(nqn, self.target_ips, self.target_port,
+ self.nvme_transport_type,
+ self.nvmet_port_id)
except Exception:
LOG.error('Failed to add subsystem: %s', nqn)
raise NVMETTargetAddError(subsystem=nqn)
- LOG.info('Subsystem %s now exported on port %s', nqn, target_port)
- return {
- 'location': self.get_nvmeof_location(
- nqn,
- target_ip,
- target_port,
- transport_type,
- ns_id),
- 'auth': ''}
-
- def _ensure_subsystem_exists(self, nqn, nvmet_ns_id, volume_path):
+ LOG.info('Subsystem %s now exported on port %s', nqn, self.target_port)
+ return nqn, ns_id
+
+ def _ensure_subsystem_exists(self, nqn, volume_path, uuid):
+ """Ensure a subsystem and namespace exist in nvmet."""
# Assume if subsystem exists, it has the right configuration
try:
- nvmet.Subsystem(nqn)
+ subsystem = nvmet.Subsystem(nqn)
LOG.debug('Skip creating subsystem %s as it already exists.', nqn)
- return
+
+ ns_id = self._ensure_namespace_exists(subsystem, volume_path, uuid)
+ return ns_id
+
except nvmet.NotFound:
LOG.debug('Creating subsystem %s.', nqn)
+ ns_id = self.nvmet_ns_id
subsystem_section = {
"allowed_hosts": [],
"attr": {
"allow_any_host": "1"
},
- "namespaces": [
- {
- "device": {
- "nguid": str(uuidutils.generate_uuid()),
- "path": volume_path,
- },
- "enable": 1,
- "nsid": nvmet_ns_id
- }
- ],
+ "namespaces": [self._namespace_dict(uuid, volume_path, ns_id)],
"nqn": nqn}
nvmet.Subsystem.setup(subsystem_section) # privsep
LOG.debug('Added subsystem: %s', nqn)
+ return ns_id
- def _ensure_port_exports(self, nqn, addr, port, transport_type, port_id):
- # Assume if port exists, it has the right configuration
- try:
- port = nvmet.Port(port_id)
- LOG.debug('Skip creating port %s as it already exists.', port_id)
- except nvmet.NotFound:
- LOG.debug('Creating port %s.', port_id)
-
- # Port section
- port_section = {
- "addr": {
- "adrfam": "ipv4",
- "traddr": addr,
- "treq": "not specified",
- "trsvcid": port,
- "trtype": transport_type,
- },
- "portid": port_id,
- "referrals": [],
- "subsystems": [nqn]
- }
- nvmet.Port.setup(self._nvmet_root, port_section) # privsep
- LOG.debug('Added port: %s', port_id)
-
+ def _namespace_dict(self, uuid, volume_path, ns_id):
+ """Build the dict data for a new namespace in nvmet library format."""
+ if self.share_targets:
+ nguid = uuid
+ LOG.debug('Sharing subsystem, using nguid = uuid = %s', nguid)
else:
- if nqn in port.subsystems:
- LOG.debug('%s already exported on port %s', nqn, port_id)
+ nguid = str(uuidutils.generate_uuid())
+ LOG.debug('Not sharing subsystem, using randmo nguid = %s', nguid)
+ return {
+ "device": {
+ "nguid": nguid,
+ "uuid": uuid,
+ "path": volume_path,
+ },
+ "enable": 1,
+ "nsid": ns_id
+ }
+
+ def _ensure_namespace_exists(self, subsystem, volume_path, uuid):
+ """Ensure the namespace exists in nvmet."""
+ for ns in subsystem.namespaces:
+ if ns.get_attr('device', 'path') == volume_path:
+ return ns.nsid
+
+ ns_id = self._get_available_namespace_id(subsystem)
+ ns_data = self._namespace_dict(uuid, volume_path, ns_id)
+ nvmet.Namespace.setup(subsystem, ns_data)
+ return ns_id
+
+ def _get_available_namespace_id(self, subsystem):
+ """Get the next available ns_id.
+
+ Shared targets will have multiple namespaces under the same subsystem,
+ so we cannot use self.nvmet_ns_id for them all.
+
+ This method searches for an available namespace id in the provided
+ subsystem considering all ids below self.nvmet_ns_id as reserved.
+
+ We cannot let the nvmet library assign it automatically because it
+ starts assigning from 1.
+
+ For non shared the method returns configured nvmet_ns_id.
+ """
+ minimum = self.nvmet_ns_id
+
+ if not self.share_targets:
+ return minimum
+
+ used = [ns.nsid for ns in subsystem.namespaces if ns.nsid >= minimum]
+
+ if not used:
+ return minimum
+
+ higher = max(used)
+ # If there are no gaps return the next available id
+ if len(used) > higher - minimum:
+ if higher == nvmet.Namespace.MAX_NSID:
+ raise Exception('Reached max namespaces in subsystem')
+ return higher + 1
+
+ # Find an id in the gaps. Don't include higher, as we know it's used
+ available = set(range(minimum, higher)).difference(used)
+ return available.pop()
+
+ def _get_nvme_uuid(self, volume):
+ return volume.name_id
+
+ def _ensure_port_exports(self, nqn, addrs, port, transport_type, port_id):
+ for addr in addrs:
+ # Assume if port exists, it has the right configuration
+ try:
+ nvme_port = nvmet.Port(port_id)
+ LOG.debug('Skip creating port %s as it already exists.',
+ port_id)
+ except nvmet.NotFound:
+ LOG.debug('Creating port %s.', port_id)
+
+ # Port section
+ port_section = {
+ "addr": {
+ "adrfam": "ipv4",
+ "traddr": addr,
+ "treq": "not specified",
+ "trsvcid": port,
+ "trtype": transport_type,
+ },
+ "portid": port_id,
+ "referrals": [],
+ "subsystems": [nqn]
+ }
+ nvmet.Port.setup(self._nvmet_root, port_section) # privsep
+ LOG.debug('Added port: %s', port_id)
+
else:
- port.add_subsystem(nqn) # privsep
- LOG.debug('Exported %s on port %s', nqn, port_id)
+ if nqn in nvme_port.subsystems:
+ LOG.debug('%s already exported on port %s', nqn, port_id)
+ else:
+ nvme_port.add_subsystem(nqn) # privsep
+ LOG.debug('Exported %s on port %s', nqn, port_id)
+ port_id += 1
+
+ # ####### Connection termination methods ########
+
+ def terminate_connection(self, volume, connector, **kwargs):
+ """Remove the mapping for shared."""
+ # TODO: Add support for force and other parameters
+ if self.share_targets:
+ self._locked_unmap_volume(volume, connector)
+ LOG.info('Volume %s is no longer exported', volume.id)
+
+ def remove_export(self, context, volume):
+ """Remove the mapping for non shared."""
+ if not self.share_targets:
+ self._locked_unmap_volume(volume)
+ LOG.info('Volume %s is no longer exported', volume.id)
@utils.synchronized('nvmetcli', external=True)
- def delete_nvmeof_target(self, volume):
- subsystem_name = self._get_target_nqn(volume.id)
- LOG.debug('Removing subsystem: %s', subsystem_name)
+ def _locked_unmap_volume(self, volume, connector=None):
+ """Remove volume's ns from subsystem and subsystem if empty."""
+ if connector or not self.share_targets:
+ nqns = [self._get_target_nqn(volume.id, connector)]
+ else:
+ # We need to remove all existing maps (we are sharing)
+ LOG.debug('Removing EVERYTHING for volume %s', volume.id)
+ nqns = self._get_nqns_for_location(volume.provider_location)
- for port in self._nvmet_root.ports:
- if subsystem_name in port.subsystems:
- LOG.debug('Removing %s from port %s',
- subsystem_name, port.portid)
- port.remove_subsystem(subsystem_name)
+ exceptions = []
+ for nqn in nqns:
+ try:
+ self._unmap_volume(volume, nqn)
+ except Exception as exc:
+ exceptions.append(exc)
+ # TODO: Once we only support Python 3.11+ use ExceptionGroup to raise
+ # all the exceptions.
+ if exceptions:
+ raise exceptions[0]
+
+ def _unmap_volume(self, volume, nqn):
try:
- subsys = nvmet.Subsystem(subsystem_name)
- LOG.debug('Deleting %s', subsystem_name)
- subsys.delete() # privsep call
- LOG.info('Subsystem %s removed', subsystem_name)
+ subsystem = nvmet.Subsystem(nqn)
except nvmet.NotFound:
- LOG.info('Skipping remove_export. No NVMe subsystem for volume: '
- '%s', volume.id)
- except Exception:
- LOG.error('Failed to delete subsystem: %s', subsystem_name)
- raise NVMETTargetDeleteError(subsystem=subsystem_name)
- LOG.info('Volume %s is no longer exported', volume.id)
+ LOG.info('Skipping unmapping. No NVMe subsystem for volume: %s',
+ volume.id)
+ return
+
+ if self.share_targets:
+ volume_path = volume.provider_location
+ for ns in subsystem.namespaces:
+ if ns.get_attr('device', 'path') == volume_path:
+ LOG.debug('Deleting namespace %s', ns.nsid)
+ ns.delete() # privsep call
+ break
+
+ # If there are still namespaces we cannot remove the subsystem
+ if any(s for s in subsystem.namespaces):
+ return
+
+ for port in self._nvmet_root.ports:
+ if nqn in port.subsystems:
+ LOG.debug('Removing %s from port %s', nqn, port.portid)
+ port.remove_subsystem(nqn) # privsep call
+
+ LOG.debug('Deleting %s', nqn)
+ subsystem.delete() # privsep call
+ LOG.info('Subsystem %s removed', nqn)
+
+ # ####### General methods ########
+
+ def _get_target_nqn(self, volume_id, connector):
+ # For shared targets the subsystem is named after the host
+ if self.share_targets:
+ postfix = connector['host']
+ else:
+ postfix = volume_id
+ return f'nqn.{self.nvmet_subsystem_name}-{postfix}'
+
+ def _get_nqns_for_location(self, provider_location):
+ """Get all subystem nqns for a give provider location.
- def _get_available_nvmf_subsystems(self):
- nvme_root = nvmet.Root()
- subsystems = nvme_root.dump()
- return subsystems
+ This also returns empty subsystems, since we don't know if those were
+ created to try to use them for the volume of the provider_location and
+ failed during the creation.
- def _get_target_nqn(self, volume_id):
- return "nqn.%s-%s" % (self.nvmet_subsystem_name, volume_id)
+ This method needs to be called within the nvmetcli locked section.
+ """
+ nqns = []
+ for subsys in self._nvmet_root.subsystems:
+ empty = True # subsytems is an iterable, can check it with bool
+ found = False
+ for ns in subsys.namespaces:
+ empty = False
+ if ns.get_attr('device', 'path') == provider_location:
+ found = True
+ break
+ if found or empty:
+ nqns.append(subsys.nqn)
+ return nqns
diff --git a/cinder/volume/targets/spdknvmf.py b/cinder/volume/targets/spdknvmf.py
index a90b0b77d..e7b39fa89 100644
--- a/cinder/volume/targets/spdknvmf.py
+++ b/cinder/volume/targets/spdknvmf.py
@@ -51,6 +51,7 @@ LOG = logging.getLogger(__name__)
class SpdkNvmf(nvmeof.NVMeOF):
+ SECONDARY_IP_SUPPORT = False
def __init__(self, *args, **kwargs):
super(SpdkNvmf, self).__init__(*args, **kwargs)
@@ -131,7 +132,7 @@ class SpdkNvmf(nvmeof.NVMeOF):
def create_nvmeof_target(self,
volume_id,
subsystem_name,
- target_ip,
+ target_ips,
target_port,
transport_type,
nvmet_port_id,
@@ -158,7 +159,7 @@ class SpdkNvmf(nvmeof.NVMeOF):
listen_address = {
'trtype': transport_type,
- 'traddr': target_ip,
+ 'traddr': target_ips[0],
'trsvcid': str(target_port),
}
params = {
@@ -179,7 +180,7 @@ class SpdkNvmf(nvmeof.NVMeOF):
location = self.get_nvmeof_location(
nqn,
- target_ip,
+ target_ips,
target_port,
transport_type,
ns_id)
diff --git a/cinder/volume/volume_utils.py b/cinder/volume/volume_utils.py
index 611e12f7b..831b137b9 100644
--- a/cinder/volume/volume_utils.py
+++ b/cinder/volume/volume_utils.py
@@ -1634,3 +1634,8 @@ def log_unsupported_driver_warning(driver):
'version': driver.get_version()},
resource={'type': 'driver',
'id': driver.__class__.__name__})
+
+
+def is_all_zero(chunk: bytes) -> bool:
+ """Return true if the chunk of bytes is all zeroes."""
+ return chunk == bytes(len(chunk))
diff --git a/doc/source/admin/troubleshoot.rst b/doc/source/admin/troubleshoot.rst
index c597f3bf4..0446cb4ac 100644
--- a/doc/source/admin/troubleshoot.rst
+++ b/doc/source/admin/troubleshoot.rst
@@ -18,3 +18,4 @@ Storage installation.
ts-no-emulator-x86-64.rst
ts-non-existent-host.rst
ts-non-existent-vlun.rst
+ ts-db-cpu-spikes.rst
diff --git a/doc/source/admin/ts-db-cpu-spikes.rst b/doc/source/admin/ts-db-cpu-spikes.rst
new file mode 100644
index 000000000..ddafd73fb
--- /dev/null
+++ b/doc/source/admin/ts-db-cpu-spikes.rst
@@ -0,0 +1,37 @@
+=====================================
+Database CPU spikes during operations
+=====================================
+
+Query load upon the database can become a bottleneck that cascades across a
+deployment and ultimately degrades not only the Cinder service but also the
+whole OpenStack deployment.
+
+Often, depending on load, query patterns, periodic tasks, and so on and so
+forth, additional indexes may be needed to help provide hints to the database
+so it can most efficently attempt to reduce the number of rows which need to
+be examined in order to return a result set.
+
+Adding indexes
+--------------
+
+In older releases, before 2023.1 (Antelope), there were some tables that
+performed poorly in the presence of a large number of deleted resources
+(volumes, snapshots, backups, etc) which resulted in high CPU loads on the DB
+servers not only when listing those resources, but also when doing some
+operations on them. This was resolved by adding appropriate indexes to them.
+
+This example below is specific to MariaDB/MySQL, but the syntax should be easy
+to modify for operators using PostgreSQL, and it represents the changes that
+older releases could add to resolve these DB server CPU spikes in such a way
+that they would not conflict with the ones that Cinder introduced in 2023.1
+(Antelope).
+
+.. code-block:: sql
+
+ use cinder;
+ create index groups_deleted_project_id_idx on groups (deleted, project_id);
+ create index group_snapshots_deleted_project_id_idx on groups (deleted, project_id);
+ create index volumes_deleted_project_id_idx on volumes (deleted, project_id);
+ create index volumes_deleted_host_idx on volumes (deleted, host);
+ create index snapshots_deleted_project_id_idx on snapshots (deleted, project_id);
+ create index backups_deleted_project_id_idx on backups (deleted, project_id);
diff --git a/doc/source/cli/cinder-manage.rst b/doc/source/cli/cinder-manage.rst
index 8aa57df42..a0b39cd5c 100644
--- a/doc/source/cli/cinder-manage.rst
+++ b/doc/source/cli/cinder-manage.rst
@@ -59,7 +59,7 @@ allowing a finer control of when and what quotas are fixed.
**Checking if quotas and reservations are correct.**
-``cinder-manage quota check [-h] [--project-id PROJECT_ID] [--use-locks]``
+``cinder-manage quota check [-h] [--project-id PROJECT_ID]``
Accepted arguments are:
@@ -68,29 +68,19 @@ Accepted arguments are:
--project-id PROJECT_ID
The ID of the project where we want to sync the quotas
(defaults to all projects).
- --use-locks For precise results tables in the DB need to be
- locked.
This command checks quotas and reservations, for a specific project (passing
``--project-id``) or for all projects, to see if they are out of sync.
The check will also look for duplicated entries.
-By default it runs in the least accurate mode (where races have a higher
-chance of happening) to minimize the impact on running cinder services. This
-means that false errors are more likely to be reported due to race conditions
-when Cinder services are running.
-
-Accurate mode is also supported, but it will lock many tables (affecting all
-tenants) and is not recommended with services that are being used.
-
One way to use this action in combination with the sync action is to run the
check for all projects, take note of those that are out of sync, and the sync
them one by one at intervals to allow cinder to operate semi-normally.
**Fixing quotas and reservations**
-``cinder-manage quota sync [-h] [--project-id PROJECT_ID] [--no-locks]``
+``cinder-manage quota sync [-h] [--project-id PROJECT_ID]``
Accepted arguments are:
@@ -99,19 +89,14 @@ Accepted arguments are:
--project-id PROJECT_ID
The ID of the project where we want to sync the quotas
(defaults to all projects).
- --no-locks For less precise results, but also less intrusive.
This command refreshes existing quota usage and reservation count for a
specific project or for all projects.
The refresh will also remove duplicated entries.
-This operation is best executed when Cinder is not running, as it requires
-locking many tables (affecting all tenants) to make sure that then sync is
-accurate.
-
-If accuracy is not our top priority, or we know that a specific project is not
-in use, we can disable the locking.
+This operation is best executed when Cinder is not running, but it can
+be run with cinder services running as well.
A different transaction is used for each project's quota sync, so an action
failure will only rollback the current project's changes.
diff --git a/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst b/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst
index e80a617b3..5761a3552 100644
--- a/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/dell-emc-unity-driver.rst
@@ -472,7 +472,7 @@ To enable volume replications, follow below steps:
The way could be different depending on the type of replications - sync or async.
Refer to `Unity Replication White Paper
-<https://www.emc.com/collateral/white-papers/h15088-dell-emc-unity-replication-technologies.pdf>`_
+<https://dl.dell.com/content/docu69886_dell-emc-unity-replication-technologies-a-detailed-review.pdf>`_
for more detail.
2. Add `replication_device` to storage backend settings in `cinder.conf`, then
@@ -529,7 +529,7 @@ To enable consistency group replications, follow below steps:
The way could be different depending on the type of replications - sync or async.
Refer to `Unity Replication White Paper
-<https://www.emc.com/collateral/white-papers/h15088-dell-emc-unity-replication-technologies.pdf>`_
+<https://dl.dell.com/content/docu69886_dell-emc-unity-replication-technologies-a-detailed-review.pdf>`_
for more detail.
2. Add `replication_device` to storage backend settings in `cinder.conf`, then
diff --git a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst
index 78872ce8b..5ec0dde23 100644
--- a/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/hitachi-vsp-driver.rst
@@ -143,7 +143,7 @@ If you use Fibre Channel:
san_login = hitachiuser
san_password = password
hitachi_storage_id = 123456789012
- hitachi_pool = pool0
+ hitachi_pools = pool0
If you use iSCSI:
@@ -156,7 +156,7 @@ If you use iSCSI:
san_login = hitachiuser
san_password = password
hitachi_storage_id = 123456789012
- hitachi_pool = pool0
+ hitachi_pools = pool0
This table shows configuration options for Hitachi block storage driver.
@@ -182,5 +182,5 @@ Required options
- ``hitachi_storage_id``
Product number of the storage system.
-- ``hitachi_pool``
- Pool number or pool name of the DP pool.
+- ``hitachi_pools``
+ Pool number(s) or pool name(s) of the DP pool.
diff --git a/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst b/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst
index 34443f6b4..9aae63586 100644
--- a/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/hpe-xp-driver.rst
@@ -84,7 +84,7 @@ If you use Fibre Channel:
san_login = hpexpuser
san_password = password
hpexp_storage_id = 123456789012
- hpexp_pool = pool0
+ hpexp_pools = pool0
If you use iSCSI:
@@ -97,7 +97,7 @@ If you use iSCSI:
san_login = hpexpuser
san_password = password
hpexp_storage_id = 123456789012
- hpexp_pool = pool0
+ hpexp_pools = pool0
This table shows configuration options for HPE XP block storage driver.
@@ -121,6 +121,6 @@ Required options
- ``hpexp_storage_id``
Product number of the storage system.
-- ``hpexp_pool``
- Pool number or pool name of the THP pool.
+- ``hpexp_pools``
+ Pool number(s) or pool name(s) of the THP pool.
diff --git a/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst b/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst
index ee093a961..f6e99d25f 100644
--- a/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/infinidat-volume-driver.rst
@@ -27,6 +27,7 @@ Supported operations
* List manageable volumes and snapshots.
* Attach a volume to multiple instances at once (multi-attach).
* Host and storage assisted volume migration.
+* Efficient non-disruptive volume backup.
External package installation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -43,6 +44,11 @@ Setting up the storage array
Create a storage pool object on the InfiniBox array in advance.
The storage pool will contain volumes managed by OpenStack.
+Mixing OpenStack APIs and non-OpenStack methods are not supported
+when used to attach the same hosts via the same protocol.
+For example, it is not possible to create boot-from-SAN volumes
+and OpenStack volumes for the same host with Fibre Channel.
+Instead, use a different protocol for one of the volumes.
Refer to the InfiniBox manuals for details on pool management.
Driver configuration
@@ -89,7 +95,7 @@ Configure the driver back-end section with the parameters below.
The driver requires an InfiniBox user with administrative privileges.
We recommend creating a dedicated OpenStack user account
- that holds an administrative user role.
+ that holds a pool admin user role.
Refer to the InfiniBox manuals for details on user account management.
Configure the user credentials by adding the following parameters:
@@ -183,6 +189,20 @@ Configure the driver back-end section with the parameters below.
Volume compression is available on InfiniBox 3.0 onward.
+After modifying the ``cinder.conf`` file, restart the ``cinder-volume``
+service.
+
+Create a new volume type for each distinct ``volume_backend_name`` value
+that you added in the ``cinder.conf`` file. The example below assumes that
+the same ``volume_backend_name=infinidat-pool-a`` option was specified in
+all of the entries, and specifies that the volume type ``infinidat`` can be
+used to allocate volumes from any of them. Example of creating a volume type:
+
+ .. code-block:: console
+
+ $ openstack volume type create infinidat
+ $ openstack volume type set --property volume_backend_name=infinidat-pool-a infinidat
+
Configuration example
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/configuration/block-storage/drivers/linstor-driver.rst b/doc/source/configuration/block-storage/drivers/linstor-driver.rst
index c628c78a5..84453d801 100644
--- a/doc/source/configuration/block-storage/drivers/linstor-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/linstor-driver.rst
@@ -4,6 +4,16 @@ LINSTOR driver
The LINSTOR driver allows Cinder to use DRBD/LINSTOR instances.
+External package installation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The driver requires the ``python-linstor`` package for communication with the
+LINSTOR Controller. Install the package from PYPI using the following command:
+
+.. code-block:: console
+
+ $ python -m pip install python-linstor
+
Configuration
~~~~~~~~~~~~~
diff --git a/doc/source/configuration/block-storage/drivers/nec-storage-v-series-driver.rst b/doc/source/configuration/block-storage/drivers/nec-storage-v-series-driver.rst
index 72c514e3c..37623c010 100644
--- a/doc/source/configuration/block-storage/drivers/nec-storage-v-series-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/nec-storage-v-series-driver.rst
@@ -96,7 +96,7 @@ If you use Fibre Channel:
san_login = userid
san_password = password
nec_v_storage_id = 123456789012
- nec_v_pool = pool0
+ nec_v_pools = pool0
If you use iSCSI:
@@ -110,7 +110,7 @@ If you use iSCSI:
san_login = userid
san_password = password
nec_v_storage_id = 123456789012
- nec_v_pool = pool0
+ nec_v_pools = pool0
This table shows configuration options for NEC V series storage driver.
@@ -134,6 +134,6 @@ Required options
- ``nec_v_storage_id``
Product number of the storage system.
-- ``nec_v_pool``
- Pool number or pool name of the DP pool.
+- ``nec_v_pools``
+ Pool number(s) or pool name(s) of the DP pool.
diff --git a/doc/source/configuration/block-storage/service-token.rst b/doc/source/configuration/block-storage/service-token.rst
index 108f4b0db..1c48552f2 100644
--- a/doc/source/configuration/block-storage/service-token.rst
+++ b/doc/source/configuration/block-storage/service-token.rst
@@ -53,15 +53,20 @@ following:
3. Also in that section, fill in the appropriate configuration for
your service user (``username``, ``project_name``, etc.)
-.. note::
- There is no configuration required for a service to *receive*
- service tokens. This is automatically handled by the keystone
- middleware used by each service (beginning with the Pike release).
+4. If Cinder is going to receive service tokens from other services
+ it needs to have two options configured in the
+ ``[keystone_authtoken]`` section of the configuration file:
+
+ ``service_token_roles``
+ The value is a list of roles; the service user passing the service
+ token must have at least one of these roles or the token will be
+ rejected. The default value is ``service``.
- (The previous statement is true for the default configuration. It
- is possible for someone to change some settings so that service
- tokens will be ignored. See the :ref:`service-token-troubleshooting`
- section below.)
+ ``service_token_roles_required``
+ This is a boolean; the default value is ``False``. It governs whether
+ the keystone middleware used by the receiving service will pay any
+ attention to the ``service_token_roles`` setting. It should be set
+ to ``True``.
.. _service-token-troubleshooting:
@@ -85,34 +90,14 @@ Identity Service (Keystone).
requires Keystone validation (for example, the Swift backend) and the
user token has expired.
-2. Each receiving service, by default, is set up to accept service tokens.
- There are two options to be aware of, however, that can affect whether or
- not a receiving service (for example, Glance) will actually accept service
- tokens. These appear in the ``[keystone_authtoken]`` section of the
- **receiving service** configuration file (for example,
- ``/etc/glance/glance-api.conf``).
-
- ``service_token_roles``
- The value is a list of roles; the service user passing the service
- token must have at least one of these roles or the token will be
- rejected. (But see the next option.) The default value is
- ``service``.
-
- ``service_token_roles_required``
- This is a boolean; the default value is ``false``. It governs whether
- the keystone middleware used by the receiving service will pay any
- attention to the ``service_token_roles`` setting. (Eventually the
- default is supposed to become True, but it's still False as of Stein.)
-
-3. There are several things to pay attention to in Keystone:
+2. There are several things to pay attention to in Keystone:
- * If you've decided to turn on ``service_token_roles_required`` for any of
- the receiving services, then you must make sure that any service user who
- will be contacting that receiving service (and for whom you want to
- enable "service token" usage) has one of the roles specified in the
- receiving services's ``service_token_roles`` setting. (This is a matter
- of creating and assigning roles using the Identity Service API, it's
- not a configuration file issue.)
+ * When ``service_token_roles_required`` is enabled you must make sure that
+ any service user who will be contacting that receiving service (and for
+ whom you want to enable "service token" usage) has one of the roles
+ specified in the receiving services's ``service_token_roles`` setting.
+ (This is a matter of creating and assigning roles using the Identity
+ Service API, it's not a configuration file issue.)
* Even with a service token, an expired user token cannot be used
indefinitely. There's a Keystone configuration setting that controls
@@ -136,4 +121,4 @@ To summarize, you need to be aware of:
* Each source service: must be configured to be able to create and send
service tokens (default is OFF)
* Each receiving service: has to be configured to accept service tokens
- (default is ON)
+ (default is ON) and require role verification (default is OFF)
diff --git a/doc/source/contributor/releasecycle.rst b/doc/source/contributor/releasecycle.rst
index bc25f6e55..e4c838801 100644
--- a/doc/source/contributor/releasecycle.rst
+++ b/doc/source/contributor/releasecycle.rst
@@ -84,6 +84,10 @@ Between PTG and Milestone-1
.. _zuul.d/project-templates.yaml: https://opendev.org/openstack/openstack-zuul-jobs/src/branch/master/zuul.d/project-templates.yaml
.. _openstack/openstack-zuul-jobs repo: https://opendev.org/openstack/openstack-zuul-jobs
+ * Check the ``setup.cfg`` file in each cinder deliverable to make
+ sure that the claimed supported Python versions line up with
+ the cycle's supported Python versions.
+
#. Focus on spec reviews to get them approved and updated early in
the cycle to allow enough time for implementation.
diff --git a/doc/source/drivers-all-about.rst b/doc/source/drivers-all-about.rst
index 06b92adbc..3bc762418 100644
--- a/doc/source/drivers-all-about.rst
+++ b/doc/source/drivers-all-about.rst
@@ -57,6 +57,33 @@ can impact the drivers in two ways:
CI system will detect this and alert the driver maintainer that there
is a problem.
+New Driver CI Requirements
+--------------------------
+
+When adding a new driver, the following requirements are made of the driver
+and its associated 3rd Party CI system:
+
+* CI_WIKI_NAME correct in driver properties
+
+* CI wiki page exists under https://wiki.openstack.org/wiki/ThirdPartySystems
+
+* Email ping to contact in wiki page receives a pong
+
+* Recheck trigger functioning correctly
+
+* CI is responding on the new driver patch
+
+* CI is responding on other cinder patches
+
+* CI is responding on os-brick patches
+
+* CI runs all cinder-tempest-plugin tests
+
+* CI result is accessible
+
+Failure of any one of these requirements will preclude a new driver from being
+accepted into the Cinder project.
+
Driver Compliance
-----------------
diff --git a/doc/source/install/cinder-storage-install-ubuntu.rst b/doc/source/install/cinder-storage-install-ubuntu.rst
index 2f2a3bba5..a4d497349 100644
--- a/doc/source/install/cinder-storage-install-ubuntu.rst
+++ b/doc/source/install/cinder-storage-install-ubuntu.rst
@@ -249,6 +249,20 @@ Install and configure components
.. end
+#. Create the ``/etc/tgt/conf.d/cinder.conf`` file
+ with the following data:
+
+ .. note::
+
+ Perform this step only when using tgt target.
+
+ .. code-block:: shell
+
+ include /var/lib/cinder/volumes/*
+
+ .. end
+
+
Finalize installation
---------------------
diff --git a/driver-requirements.txt b/driver-requirements.txt
index f501e9fcd..2f05bc94e 100644
--- a/driver-requirements.txt
+++ b/driver-requirements.txt
@@ -49,3 +49,6 @@ dfs_sdk>=1.2.25 # Apache-2.0
# DataCore SANsymphony
websocket-client>=1.3.2 # LGPLv2+
+
+# LINSTOR
+python-linstor>=1.7.0 # LGPLv3
diff --git a/releasenotes/notes/backup-sparse-f396b35bfe17332e.yaml b/releasenotes/notes/backup-sparse-f396b35bfe17332e.yaml
new file mode 100644
index 000000000..a9731cb62
--- /dev/null
+++ b/releasenotes/notes/backup-sparse-f396b35bfe17332e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ `Bug #2007615 <https://bugs.launchpad.net/cinder/+bug/2007615>`_:
+ the restore operation of the Cinder backup service now restores into
+ sparse volumes, if possible. So, operators no longer need more space
+ than used previously when they restore from a disaster.
diff --git a/releasenotes/notes/bug-1981420-dell-powermax-fix-for-force-flag-9320910dfbf998d2.yaml b/releasenotes/notes/bug-1981420-dell-powermax-fix-for-force-flag-9320910dfbf998d2.yaml
new file mode 100644
index 000000000..aa626ac5c
--- /dev/null
+++ b/releasenotes/notes/bug-1981420-dell-powermax-fix-for-force-flag-9320910dfbf998d2.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ `Dell PowerMax Driver Bug #1981420 <https://bugs.launchpad.net/cinder/+bug/1981420>`_:
+ Fixed issue faced while creating synchronous volume which was caused by
+ incorrect handling of the force flag. This is corrected by checking volume
+ type extra specs for the value of "force_vol_edit" parameter along with the
+ "force" parameter.
diff --git a/releasenotes/notes/bug-2008017-netapp-fix-native-threads-04d8f58f4c29b03d.yaml b/releasenotes/notes/bug-2008017-netapp-fix-native-threads-04d8f58f4c29b03d.yaml
new file mode 100644
index 000000000..c6b05987d
--- /dev/null
+++ b/releasenotes/notes/bug-2008017-netapp-fix-native-threads-04d8f58f4c29b03d.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ `Bug #2008017 <https://bugs.launchpad.net/cinder/+bug/2008017>`_: Fixed
+ NetApp NFS driver to never spawn a native thread avoid thread starvation
+ and other related issues.
diff --git a/releasenotes/notes/bug-2008931-hpe-keyerror-on-migration-71d31e6c0a8ab0d9.yaml b/releasenotes/notes/bug-2008931-hpe-keyerror-on-migration-71d31e6c0a8ab0d9.yaml
new file mode 100644
index 000000000..1e5f09f86
--- /dev/null
+++ b/releasenotes/notes/bug-2008931-hpe-keyerror-on-migration-71d31e6c0a8ab0d9.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ HPE 3PAR driver `bug #2008931
+ <https://bugs.launchpad.net/cinder/+bug/2008931>`_: Fixed
+ issue when performing migrate volume operation when `comment`
+ attribute is missing from the volume.
diff --git a/releasenotes/notes/db-resource-indexes-8010c9a881277503.yaml b/releasenotes/notes/db-resource-indexes-8010c9a881277503.yaml
new file mode 100644
index 000000000..4d062c258
--- /dev/null
+++ b/releasenotes/notes/db-resource-indexes-8010c9a881277503.yaml
@@ -0,0 +1,24 @@
+---
+upgrade:
+ - |
+ The ``cinder-manage db sync`` command for this verison of cinder will add
+ additional database indexes. Depending on database size and complexity,
+ this will take time to complete for every single index to be created. On
+ MySQL or MariaDB, these indexes will only be created if an index does not
+ already exist with the same name:
+
+ * ``groups_deleted_project_id_idx``
+ * ``group_snapshots_deleted_project_id_idx``
+ * ``volumes_deleted_project_id_idx``
+ * ``volumes_deleted_host_idx``
+ * ``snapshots_deleted_project_id_idx``
+ * ``backups_deleted_project_id_idx``
+
+ An example of the SQL commands to generate these indexes can be found
+ in the `specific troubleshooting guide
+ <htts://docs.openstack.org/cinder/latest/admin/ts-db-cpu-spikes.html>`_.
+fixes:
+ - |
+ `Bug #1952443 <https://bugs.launchpad.net/cinder/+bug/1952443>`_: Improve
+ performance for creating volume from image, listing volumes, snapshots,
+ backups, groups, and group_snapshots.
diff --git a/releasenotes/notes/dell-powerflex-bugfix-1998136-self-signed-certificates-62e3cb444ab7ff2b.yaml b/releasenotes/notes/dell-powerflex-bugfix-1998136-self-signed-certificates-62e3cb444ab7ff2b.yaml
new file mode 100644
index 000000000..550390a89
--- /dev/null
+++ b/releasenotes/notes/dell-powerflex-bugfix-1998136-self-signed-certificates-62e3cb444ab7ff2b.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+ - |
+ Dell PowerFlex driver `bug #1998136
+ <https://bugs.launchpad.net/cinder/+bug/1998136>`_:
+ When using self signed certificates, the option
+ sent to os-brick via the connection_properties was
+ not correctly handled. It has now been fixed by
+ adding the 'verify_certificate' and 'certificate_path'
+ to the driver when initializing the connection.
diff --git a/releasenotes/notes/handle-external-events-in-extend-6ae53b822baf0004.yaml b/releasenotes/notes/handle-external-events-in-extend-6ae53b822baf0004.yaml
index d7f2c5489..13d74d74a 100644
--- a/releasenotes/notes/handle-external-events-in-extend-6ae53b822baf0004.yaml
+++ b/releasenotes/notes/handle-external-events-in-extend-6ae53b822baf0004.yaml
@@ -4,6 +4,6 @@ fixes:
`bug #2000724
<https://bugs.launchpad.net/cinder/+bug/2000724>`_:
Handled the case when glance is calling online extend
- and externals events were being sent to nova.
+ and external events were being sent to nova.
Now Cinder will only send external events when the volume,
to be extended, is attached to a nova instance.
diff --git a/releasenotes/notes/hitachi-vsp-add-gad-volume-514edf8ebeb2e983.yaml b/releasenotes/notes/hitachi-vsp-add-gad-volume-514edf8ebeb2e983.yaml
new file mode 100644
index 000000000..7cd672129
--- /dev/null
+++ b/releasenotes/notes/hitachi-vsp-add-gad-volume-514edf8ebeb2e983.yaml
@@ -0,0 +1,11 @@
+---
+features:
+ - |
+ Hitachi driver: Support Global-Active Device (GAD) volume.
+ GAD is a one of Hitachi storage fucntion uses volume replication
+ to provide a high-availability environment for hosts across storage
+ systems and sites. New properties will be added in configuration.
+ ``hbsd:topology`` sets to ``active_active_mirror_volumex`` would
+ specify a GAD volume. ``hitachi_mirror_xxx`` parameters would
+ specify a secondary storage for GAD volume.
+
diff --git a/releasenotes/notes/hitachi-vsp-add-hostgroup-name-format-option-4c8e4a5ddd69b9bd.yaml b/releasenotes/notes/hitachi-vsp-add-hostgroup-name-format-option-4c8e4a5ddd69b9bd.yaml
new file mode 100644
index 000000000..e96aa8b28
--- /dev/null
+++ b/releasenotes/notes/hitachi-vsp-add-hostgroup-name-format-option-4c8e4a5ddd69b9bd.yaml
@@ -0,0 +1,59 @@
+---
+features:
+ - |
+ Hitachi driver: Add a config option ``hitachi_group_name_format`` for
+ hostgroup name format.
+
+ When using this option, users can specify the name format of
+ host groups or iSCSI targets.
+ Rules of the format:
+
+ * Usable characters are alphanumerics, ".", "@", "_", ":", "-",
+ "{" and "}". "{" and "}" can be used only in variables.
+ * The specified value must start with ``HBSD-``.
+ * You can use the following variables:
+
+ ``{wwn}``
+ `FC driver only.` This is replaced with the smallest
+ WWPN of the WWPNs of the connecting node.
+ ``{ip}``
+ `iSCSI driver only.` This is replaced with the IP address of
+ the connecting node.
+ ``{host}``
+ This is replaced with the host name of the connecting node.
+
+ * You can use each variable in the specified value no more than once.
+ * The specified value must include the following variables:
+
+ * FC driver: ``{wwn}``
+ * iSCSI driver: ``{ip}``
+
+ * The maximum length of a specified value is as follows:
+
+ * FC driver: 64
+ * iSCSI driver: 32
+
+ * In the length calculation, use the following values as the length of
+ each variable:
+
+ * ``{wwn}``: 16
+ * ``{ip}``: 15
+ * ``{host}``: 1
+
+ * If the specified value includes ``{host}``, the following rules apply:
+
+ * characters that are not permitted for this parameter, they are
+ replaced with ``_``.
+ * If the length of the name after variable replacement exceeds
+ the maximum length of host group (iSCSI target) names, the
+ host name is truncated so that the length of the host groups or
+ iSCSI targets do not exceed the maximum length.
+
+ If you specify this parameter, it is recommended that you specify ``True``
+ for the ``hitachi_group_create`` parameter to collect necessary
+ information automatically.
+
+ Examples:
+
+ * FC driver: ``HBSD-{host}-{wwn}``
+ * iSCSI driver: ``HBSD-{host}-{ip}``
diff --git a/releasenotes/notes/hitachi-vsp-add-multi-pool-4c4589b93399e641.yaml b/releasenotes/notes/hitachi-vsp-add-multi-pool-4c4589b93399e641.yaml
index 817e9a567..0223d7a5b 100644
--- a/releasenotes/notes/hitachi-vsp-add-multi-pool-4c4589b93399e641.yaml
+++ b/releasenotes/notes/hitachi-vsp-add-multi-pool-4c4589b93399e641.yaml
@@ -1,5 +1,12 @@
---
features:
-- |
- Supported multi-pool for Hitachi driver and OEM storage driver.
-
+ - |
+ Supported multi-pools for Hitachi driver and OEM storage drivers.
+upgrades:
+ - |
+ Hitachi driver and OEM storage drivers: Changed option names
+ ``hitachi_pool`` to ``hitachi_pools``,
+ ``hpexp_pool`` to ``hpexp_pools`` and
+ ``nec_v_pool`` to ``nec_v_pools``.
+ The options ``hitachi_pool``, ``hpexp_pool`` and ``nec_v_pool``
+ are deprecated. \ No newline at end of file
diff --git a/releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml b/releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml
new file mode 100644
index 000000000..f996280d9
--- /dev/null
+++ b/releasenotes/notes/hitachi-vsp-fix-keyerr-when-backend-down-a5a35b15dc8f1132.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Hitachi, NEC V, HPE XP drivers `bug #2004140
+ <https://bugs.launchpad.net/cinder/+bug/2004140>`_: Fixed
+ ``KeyError`` when a backend is down. \ No newline at end of file
diff --git a/releasenotes/notes/hitachi-vsp-support-dedup-comp-4e27d95b34681f66.yaml b/releasenotes/notes/hitachi-vsp-support-dedup-comp-4e27d95b34681f66.yaml
new file mode 100644
index 000000000..da8392b62
--- /dev/null
+++ b/releasenotes/notes/hitachi-vsp-support-dedup-comp-4e27d95b34681f66.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Hitachi driver: Support data deduplication and compression, by storage assist.
+ The feature can be worked, if user enable deduplication and compression
+ for the DP-pool, by Configuration Manager REST API, and set the extra
+ spec ``hbsd:capacity_saving`` to ``deduplication_compression``
diff --git a/releasenotes/notes/ibm-svf-delete-volume-flag-support-for-deletevolumegroup-4224db1ca798a3bf.yaml b/releasenotes/notes/ibm-svf-delete-volume-flag-support-for-deletevolumegroup-4224db1ca798a3bf.yaml
new file mode 100644
index 000000000..ca6eb86f8
--- /dev/null
+++ b/releasenotes/notes/ibm-svf-delete-volume-flag-support-for-deletevolumegroup-4224db1ca798a3bf.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ IBM Spectrum Virtualize Family driver: Added `--delete-volumes` flag
+ support for delete volumegroup operation. After adding support,
+ the volumes can optionally be deleted when the volume group is deleted.
diff --git a/releasenotes/notes/lvm-nvmet-new-conn_props-25320e34d6ca6ac7.yaml b/releasenotes/notes/lvm-nvmet-new-conn_props-25320e34d6ca6ac7.yaml
new file mode 100644
index 000000000..e70049347
--- /dev/null
+++ b/releasenotes/notes/lvm-nvmet-new-conn_props-25320e34d6ca6ac7.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ LVM nvmet target: Added support for new nvmeof connection properties
+ format (version 2). Controlled with ``nvmeof_conn_info_version``
+ configuration option.
diff --git a/releasenotes/notes/nvmeof-premature-terminate-conn-63e3cc1fd1832874.yaml b/releasenotes/notes/nvmeof-premature-terminate-conn-63e3cc1fd1832874.yaml
new file mode 100644
index 000000000..0cb120fc1
--- /dev/null
+++ b/releasenotes/notes/nvmeof-premature-terminate-conn-63e3cc1fd1832874.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ nvmeof target `bug #1966513
+ <https://bugs.launchpad.net/cinder/+bug/1966513>`_: Fixed
+ LVM failing on terminate_connection if the connecting host doesn't have an
+ iSCSI initiator name setup, for example if LVM is using the nvmet target.
diff --git a/releasenotes/notes/nvmet-multipath-d35f55286f263e72.yaml b/releasenotes/notes/nvmet-multipath-d35f55286f263e72.yaml
new file mode 100644
index 000000000..7e4f0db5a
--- /dev/null
+++ b/releasenotes/notes/nvmet-multipath-d35f55286f263e72.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - |
+ nvmet target driver: Added support to serve volumes on multiple addresses
+ using the ``target_secondary_ip_addresses`` configuration option. This
+ allows os-brick to iterate through them in search of one connection that
+ works, and once os-brick supports NVMe-oF multipathing it will be
+ automatically supported.
+
+ This requires that ``nvmeof_conn_info_version`` configuration option is set
+ to ``2`` as well.
+deprecations:
+ - |
+ Configuration option ``iscsi_secondary_ip_addresses`` is deprecated in
+ favor of ``target_secondary_ip_addresses`` to follow the same naming
+ convention of ``target_ip_address``.
diff --git a/releasenotes/notes/nvmet-shared-targets-20ed7279ef29f002.yaml b/releasenotes/notes/nvmet-shared-targets-20ed7279ef29f002.yaml
new file mode 100644
index 000000000..2357c4fdb
--- /dev/null
+++ b/releasenotes/notes/nvmet-shared-targets-20ed7279ef29f002.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ nvmet target driver: Added support for shared subsystems/targets using the
+ ``lvm_share_target`` configuration option. Defaults to non shared, e.g.,
+ each volume has its own subsystem/target.
diff --git a/releasenotes/notes/rbd-total_capacity-60f10b45e3a8c8ea.yaml b/releasenotes/notes/rbd-total_capacity-60f10b45e3a8c8ea.yaml
new file mode 100644
index 000000000..1efa65097
--- /dev/null
+++ b/releasenotes/notes/rbd-total_capacity-60f10b45e3a8c8ea.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ RBD driver `bug #1960206
+ <https://bugs.launchpad.net/cinder/+bug/1960206>`_: Fixed
+ ``total_capacity`` reported by the driver to the scheduler on Ceph clusters
+ that have renamed the ``bytes_used`` field to ``stored``. (e.g., `Nautilus
+ <https://docs.ceph.com/en/nautilus/releases/nautilus/#upgrade-compatibility-notes>`_).
diff --git a/releasenotes/notes/remove-multiattach-request-param-4444e02533f919da.yaml b/releasenotes/notes/remove-multiattach-request-param-4444e02533f919da.yaml
new file mode 100644
index 000000000..0b11c4a48
--- /dev/null
+++ b/releasenotes/notes/remove-multiattach-request-param-4444e02533f919da.yaml
@@ -0,0 +1,20 @@
+---
+fixes:
+ - |
+ `Bug #2008259 <https://bugs.launchpad.net/cinder/+bug/2008259>`_:
+ Fixed the volume create functionality where non-admin users were
+ able to create multiattach volumes by providing the `multiattach`
+ parameter in the request body. Now we can only create multiattach
+ volumes using a multiattach volume type, which is also the
+ recommended way.
+other:
+ - |
+ Removed the ability to create multiattach volumes by specifying
+ `multiattach` parameter in the request body of a volume create
+ operation. This functionality is unsafe, can lead to data loss,
+ and has been deprecated since the Queens release.
+ The recommended method for creating a multiattach volume is to
+ use a volume type that supports multiattach. By default, volume
+ types can only be created by the operator. Users who have a need
+ for multiattach volumes should contact their operator if a suitable
+ volume type is not available.
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 000000000..d1238479b
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index b26aeca4d..36c9fa080 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@ Cinder Release Notes
:maxdepth: 1
unreleased
+ 2023.1
zed
yoga
xena
diff --git a/setup.cfg b/setup.cfg
index 7d3525a8d..d422d7244 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -18,6 +18,7 @@ classifiers =
Programming Language :: Python :: 3
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
project_urls:
Source=https://opendev.org/openstack/cinder
Tracker=https://bugs.launchpad.net/cinder
@@ -89,6 +90,7 @@ all =
storpool.spopenstack>=2.2.1 # Apache-2.0
dfs-sdk>=1.2.25 # Apache-2.0
rbd-iscsi-client>=0.1.8 # Apache-2.0
+ python-linstor>=1.7.0 # LGPLv3
datacore =
websocket-client>=1.3.2 # LGPLv2+
powermax =
@@ -118,6 +120,8 @@ datera =
dfs-sdk>=1.2.25 # Apache-2.0
rbd_iscsi =
rbd-iscsi-client>=0.1.8 # Apache-2.0
+linstor =
+ python-linstor>=1.7.0 # LGPLv3
[mypy]
@@ -132,3 +136,6 @@ show_error_codes = true
pretty = true
html_report = mypy-report
no_implicit_optional = true
+
+[options]
+packages = cinder
diff --git a/tox.ini b/tox.ini
index 7b318af82..25e262c09 100644
--- a/tox.ini
+++ b/tox.ini
@@ -109,7 +109,7 @@ allowlist_externals =
install_command = {[testenv:py3]install_command}
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
- pylint==2.13.4
+ pylint==2.17.0
commands =
{toxinidir}/tools/coding-checks.sh --pylint {posargs:all}