summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml30
-rw-r--r--bindep.txt1
-rw-r--r--cinder/api/v3/messages.py2
-rw-r--r--cinder/db/api.py132
-rw-r--r--cinder/db/sqlalchemy/api.py527
-rw-r--r--cinder/db/sqlalchemy/models.py2
-rw-r--r--cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py199
-rw-r--r--cinder/tests/unit/volume/drivers/test_pure.py54
-rw-r--r--cinder/tests/unit/volume/drivers/test_rbd.py23
-rw-r--r--cinder/volume/drivers/dell_emc/powerstore/utils.py4
-rw-r--r--cinder/volume/drivers/hpe/hpe_3par_common.py63
-rw-r--r--cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py5
-rw-r--r--cinder/volume/drivers/pure.py37
-rw-r--r--cinder/volume/drivers/rbd.py6
-rw-r--r--doc/source/admin/volume-multiattach.rst3
-rw-r--r--doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst72
-rw-r--r--doc/source/configuration/block-storage/drivers/pure-storage-driver.rst10
-rw-r--r--doc/source/reference/support-matrix.ini2
-rw-r--r--playbooks/enable-fips.yaml3
-rw-r--r--releasenotes/notes/hpe-3par-code-changes-for-new-wsapi-25865a65a428ce46.yaml4
-rw-r--r--releasenotes/notes/hpe-3par-convert-to-base-vol-delete-snap-a460a4b1c419804a.yaml11
-rw-r--r--releasenotes/notes/pure_nvme_tcp-a00efa8966a74f77.yaml4
-rw-r--r--releasenotes/notes/rbd-update-features-bugfix-df97b50864ce9712.yaml6
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po531
-rw-r--r--requirements.txt2
-rw-r--r--test-requirements.txt2
-rwxr-xr-xtools/test-setup.sh41
27 files changed, 1372 insertions, 404 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index a920ca039..22155827a 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -14,7 +14,7 @@
- cinder-mypy
- cinder-tox-bandit-baseline:
voting: false
- - openstack-tox-functional-py38:
+ - openstack-tox-functional-py39:
irrelevant-files: &functional-irrelevant-files
- ^.*\.rst$
- ^cinder/locale/.*$
@@ -58,6 +58,9 @@
irrelevant-files: *gate-irrelevant-files
- cinder-tempest-plugin-lvm-lio-barbican:
irrelevant-files: *gate-irrelevant-files
+ - cinder-tempest-plugin-lvm-lio-barbican-fips:
+ voting: false
+ irrelevant-files: *gate-irrelevant-files
- cinder-grenade-mn-sub-volbak:
irrelevant-files: *gate-irrelevant-files
- cinder-tempest-lvm-multibackend:
@@ -68,6 +71,9 @@
irrelevant-files: *gate-irrelevant-files
- devstack-plugin-nfs-tempest-full:
irrelevant-files: *gate-irrelevant-files
+ - devstack-plugin-nfs-tempest-full-fips:
+ voting: false
+ irrelevant-files: *gate-irrelevant-files
- tempest-slow-py3:
irrelevant-files: *gate-irrelevant-files
- tempest-integrated-storage:
@@ -82,6 +88,8 @@
irrelevant-files: *gate-irrelevant-files
- tempest-integrated-storage-ubuntu-focal:
irrelevant-files: *gate-irrelevant-files
+ - cinder-tox-py311:
+ voting: false
gate:
jobs:
- cinder-grenade-mn-sub-volbak:
@@ -179,6 +187,17 @@
volume_revert: True
- job:
+ # this depends on some ceph admin setup which is not yet complete
+ # TODO(alee) enable this test when ceph admin work is complete.
+ name: cinder-plugin-ceph-tempest-fips
+ parent: cinder-plugin-ceph-tempest
+ nodeset: devstack-single-node-centos-9-stream
+ pre-run: playbooks/enable-fips.yaml
+ vars:
+ configure_swap_size: 4096
+ nslookup_target: 'opendev.org'
+
+- job:
name: cinder-plugin-ceph-tempest-mn-aa
parent: devstack-plugin-ceph-multinode-tempest-py3
roles:
@@ -354,3 +373,12 @@
$TEMPEST_CONFIG:
volume:
build_timeout: 900
+
+- job:
+ # copied from cinderlib
+ name: cinder-tox-py311
+ parent: openstack-tox
+ nodeset: ubuntu-jammy
+ vars:
+ tox_envlist: py311
+ python_version: '3.11'
diff --git a/bindep.txt b/bindep.txt
index d32d02680..6311a1885 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -29,6 +29,7 @@ postgresql
postgresql-client [platform:dpkg]
postgresql-devel [platform:rpm]
postgresql-server [platform:rpm]
+python3-devel [platform:rpm test]
libpq-dev [platform:dpkg]
thin-provisioning-tools [platform:debian]
libxml2-dev [platform:dpkg test]
diff --git a/cinder/api/v3/messages.py b/cinder/api/v3/messages.py
index 45986d53c..b71f70ca5 100644
--- a/cinder/api/v3/messages.py
+++ b/cinder/api/v3/messages.py
@@ -70,7 +70,7 @@ class MessagesController(wsgi.Controller):
# Not found exception will be handled at the wsgi level
message = self.message_api.get(context, id)
context.authorize(policy.DELETE_POLICY, target_obj=message)
- self.message_api.delete(context, message)
+ self.message_api.delete(context, id)
return webob.Response(status_int=HTTPStatus.NO_CONTENT)
diff --git a/cinder/db/api.py b/cinder/db/api.py
index 03b0ab33b..a8ac3df65 100644
--- a/cinder/db/api.py
+++ b/cinder/db/api.py
@@ -25,15 +25,12 @@ interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
-
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
- `sqlite:///var/lib/cinder/cinder.sqlite`.
-
+ `sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
- pool of available hardware (Default: True)
-
+ pool of available hardware (Default: True)
"""
from oslo_config import cfg
@@ -97,18 +94,6 @@ def dispose_engine():
###################
-def resource_exists(context, model, resource_id):
- return IMPL.resource_exists(context, model, resource_id)
-
-
-def get_model_for_versioned_object(versioned_object):
- return IMPL.get_model_for_versioned_object(versioned_object)
-
-
-def get_by_id(context, model, id, *args, **kwargs):
- return IMPL.get_by_id(context, model, id, *args, **kwargs)
-
-
class Condition(object):
"""Class for normal condition values for conditional_update."""
def __init__(self, value, field=None):
@@ -160,6 +145,24 @@ class Case(object):
self.else_ = else_
+###################
+
+
+def resource_exists(context, model, resource_id):
+ return IMPL.resource_exists(context, model, resource_id)
+
+
+def get_model_for_versioned_object(versioned_object):
+ return IMPL.get_model_for_versioned_object(versioned_object)
+
+
+def get_by_id(context, model, id, *args, **kwargs):
+ return IMPL.get_by_id(context, model, id, *args, **kwargs)
+
+
+###################
+
+
def is_orm_value(obj):
"""Check if object is an ORM field."""
return IMPL.is_orm_value(obj)
@@ -170,7 +173,7 @@ def conditional_update(
model,
values,
expected_values,
- filters=(),
+ filters=None,
include_deleted='no',
project_only=False,
order=None,
@@ -363,20 +366,20 @@ def cluster_create(context, values):
return IMPL.cluster_create(context, values)
-def cluster_update(context, id, values):
+def cluster_update(context, cluster_id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
- return IMPL.cluster_update(context, id, values)
+ return IMPL.cluster_update(context, cluster_id, values)
-def cluster_destroy(context, id):
+def cluster_destroy(context, cluster_id):
"""Destroy the cluster or raise if it does not exist or has hosts.
:raise ClusterNotFound: If cluster doesn't exist.
"""
- return IMPL.cluster_destroy(context, id)
+ return IMPL.cluster_destroy(context, cluster_id)
###############
@@ -387,11 +390,25 @@ def volume_attach(context, values):
return IMPL.volume_attach(context, values)
-def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
- attach_mode='rw', mark_attached=True):
+def volume_attached(
+ context,
+ attachment_id,
+ instance_uuid,
+ host_name,
+ mountpoint,
+ attach_mode='rw',
+ mark_attached=True,
+):
"""Ensure that a volume is set as attached."""
- return IMPL.volume_attached(context, volume_id, instance_id, host_name,
- mountpoint, attach_mode, mark_attached)
+ return IMPL.volume_attached(
+ context,
+ attachment_id,
+ instance_uuid,
+ host_name,
+ mountpoint,
+ attach_mode,
+ mark_attached,
+ )
def volume_create(context, values):
@@ -623,9 +640,9 @@ def snapshot_get_all_by_host(context, host, filters=None):
return IMPL.snapshot_get_all_by_host(context, host, filters)
-def snapshot_get_all_for_cgsnapshot(context, project_id):
+def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
"""Get all snapshots belonging to a cgsnapshot."""
- return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
+ return IMPL.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id)
def snapshot_get_all_for_group_snapshot(context, group_snapshot_id):
@@ -833,9 +850,9 @@ def volume_type_qos_specs_get(context, type_id):
return IMPL.volume_type_qos_specs_get(context, type_id)
-def volume_type_destroy(context, id):
+def volume_type_destroy(context, type_id):
"""Delete a volume type."""
- return IMPL.volume_type_destroy(context, id)
+ return IMPL.volume_type_destroy(context, type_id)
def volume_get_all_active_by_window(context, begin, end=None, project_id=None):
@@ -951,9 +968,9 @@ def group_types_get_by_name_or_id(context, group_type_list):
return IMPL.group_types_get_by_name_or_id(context, group_type_list)
-def group_type_destroy(context, id):
+def group_type_destroy(context, type_id):
"""Delete a group type."""
- return IMPL.group_type_destroy(context, id)
+ return IMPL.group_type_destroy(context, type_id)
def group_type_access_get_all(context, type_id):
@@ -989,17 +1006,17 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
-def volume_type_extra_specs_update_or_create(context,
- volume_type_id,
- extra_specs):
+def volume_type_extra_specs_update_or_create(
+ context, volume_type_id, extra_specs,
+):
"""Create or update volume type extra specs.
This adds or modifies the key/value pairs specified in the extra specs dict
argument.
"""
- return IMPL.volume_type_extra_specs_update_or_create(context,
- volume_type_id,
- extra_specs)
+ return IMPL.volume_type_extra_specs_update_or_create(
+ context, volume_type_id, extra_specs,
+ )
###################
@@ -1039,14 +1056,13 @@ def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
-def volume_type_encryption_create(context, volume_type_id, encryption_specs):
+def volume_type_encryption_create(context, volume_type_id, values):
return IMPL.volume_type_encryption_create(context, volume_type_id,
- encryption_specs)
+ values)
-def volume_type_encryption_update(context, volume_type_id, encryption_specs):
- return IMPL.volume_type_encryption_update(context, volume_type_id,
- encryption_specs)
+def volume_type_encryption_update(context, volume_type_id, values):
+ return IMPL.volume_type_encryption_update(context, volume_type_id, values)
def volume_type_encryption_volume_get(context, volume_type_id):
@@ -1065,9 +1081,9 @@ def qos_specs_create(context, values):
return IMPL.qos_specs_create(context, values)
-def qos_specs_get(context, qos_specs_id):
+def qos_specs_get(context, qos_specs_id, inactive=False):
"""Get all specification for a given qos_specs."""
- return IMPL.qos_specs_get(context, qos_specs_id)
+ return IMPL.qos_specs_get(context, qos_specs_id, inactive)
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
@@ -1078,9 +1094,9 @@ def qos_specs_get_all(context, filters=None, marker=None, limit=None,
sort_keys=sort_keys, sort_dirs=sort_dirs)
-def qos_specs_get_by_name(context, name):
+def qos_specs_get_by_name(context, name, inactive=False):
"""Get all specification for a given qos_specs."""
- return IMPL.qos_specs_get_by_name(context, name)
+ return IMPL.qos_specs_get_by_name(context, name, inactive)
def qos_specs_associations_get(context, qos_specs_id):
@@ -1113,13 +1129,13 @@ def qos_specs_item_delete(context, qos_specs_id, key):
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
-def qos_specs_update(context, qos_specs_id, specs):
+def qos_specs_update(context, qos_specs_id, values):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
- return IMPL.qos_specs_update(context, qos_specs_id, specs)
+ return IMPL.qos_specs_update(context, qos_specs_id, values)
###################
@@ -1268,9 +1284,9 @@ def quota_class_update(context, class_name, resource, limit):
return IMPL.quota_class_update(context, class_name, resource, limit)
-def quota_class_update_resource(context, resource, new_resource):
+def quota_class_update_resource(context, old_res, new_res):
"""Update resource name in quota_class."""
- return IMPL.quota_class_update_resource(context, resource, new_resource)
+ return IMPL.quota_class_update_resource(context, old_res, new_res)
def quota_class_destroy(context, class_name, resource):
@@ -1580,9 +1596,19 @@ def group_get_all(context, filters=None, marker=None, limit=None,
sort_dirs=sort_dirs)
-def group_create(context, values, group_snapshot_id=None, group_id=None):
+def group_create(
+ context,
+ values,
+ group_snapshot_id=None,
+ source_group_id=None,
+):
"""Create a group from the values dictionary."""
- return IMPL.group_create(context, values, group_snapshot_id, group_id)
+ return IMPL.group_create(
+ context,
+ values,
+ group_snapshot_id,
+ source_group_id,
+ )
def group_get_all_by_project(context, project_id, filters=None,
diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py
index c59dea44c..7a69ce48e 100644
--- a/cinder/db/sqlalchemy/api.py
+++ b/cinder/db/sqlalchemy/api.py
@@ -16,7 +16,23 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of SQLAlchemy backend."""
+"""Defines interface for DB access.
+
+Functions in this module are imported into the cinder.db namespace. Call these
+functions from cinder.db namespace, not the cinder.db.api namespace.
+
+All functions in this module return objects that implement a dictionary-like
+interface. Currently, many of these objects are sqlalchemy objects that
+implement a dictionary interface. However, a future goal is to have all of
+these objects be simple dictionaries.
+
+**Related Flags**
+
+:connection: string specifying the sqlalchemy connection to use, like:
+ `sqlite:///var/lib/cinder/cinder.sqlite`.
+:enable_new_services: when adding a new service to the database, is it in the
+ pool of available hardware (Default: True)
+"""
import collections
from collections import abc
@@ -183,6 +199,9 @@ def require_context(f):
return wrapper
+###################
+
+
@require_context
@main_context_manager.reader
def resource_exists(context, model, resource_id):
@@ -318,131 +337,6 @@ def model_query(context, model, *args, **kwargs):
return query
-def _sync_volumes(
- context,
- project_id,
- volume_type_id=None,
- volume_type_name=None,
-):
- volumes, _ = _volume_data_get_for_project(
- context,
- project_id,
- volume_type_id=volume_type_id,
- )
- key = 'volumes'
- if volume_type_name:
- key += '_' + volume_type_name
- return {key: volumes}
-
-
-def _sync_snapshots(
- context,
- project_id,
- volume_type_id=None,
- volume_type_name=None,
-):
- snapshots, _ = _snapshot_data_get_for_project(
- context,
- project_id,
- volume_type_id=volume_type_id,
- )
- key = 'snapshots'
- if volume_type_name:
- key += '_' + volume_type_name
- return {key: snapshots}
-
-
-def _sync_backups(
- context,
- project_id,
- volume_type_id=None,
- volume_type_name=None,
-):
- backups, _ = _backup_data_get_for_project(
- context,
- project_id,
- volume_type_id=volume_type_id,
- )
- key = 'backups'
- return {key: backups}
-
-
-def _sync_gigabytes(
- context,
- project_id,
- volume_type_id=None,
- volume_type_name=None,
-):
- _, vol_gigs = _volume_data_get_for_project(
- context,
- project_id,
- volume_type_id=volume_type_id,
- )
-
- key = 'gigabytes'
- if volume_type_name:
- key += '_' + volume_type_name
-
- if CONF.no_snapshot_gb_quota:
- return {key: vol_gigs}
-
- _, snap_gigs = _snapshot_data_get_for_project(
- context,
- project_id,
- volume_type_id=volume_type_id,
- )
-
- return {key: vol_gigs + snap_gigs}
-
-
-def _sync_consistencygroups(
- context,
- project_id,
- volume_type_id=None,
- volume_type_name=None,
-):
- _, groups = _consistencygroup_data_get_for_project(context, project_id)
- key = 'consistencygroups'
- return {key: groups}
-
-
-def _sync_backup_gigabytes(
- context,
- project_id,
- volume_type_id=None,
- volume_type_name=None,
-):
- key = 'backup_gigabytes'
- _, backup_gigs = _backup_data_get_for_project(
- context,
- project_id,
- volume_type_id=volume_type_id,
- )
- return {key: backup_gigs}
-
-
-def _sync_groups(
- context,
- project_id,
- volume_type_id=None,
- volume_type_name=None,
-):
- _, groups = _group_data_get_for_project(context, project_id)
- key = 'groups'
- return {key: groups}
-
-
-QUOTA_SYNC_FUNCTIONS = {
- '_sync_volumes': _sync_volumes,
- '_sync_snapshots': _sync_snapshots,
- '_sync_gigabytes': _sync_gigabytes,
- '_sync_consistencygroups': _sync_consistencygroups,
- '_sync_backups': _sync_backups,
- '_sync_backup_gigabytes': _sync_backup_gigabytes,
- '_sync_groups': _sync_groups,
-}
-
-
###################
@@ -697,6 +591,134 @@ def conditional_update(
###################
+def _sync_volumes(
+ context,
+ project_id,
+ volume_type_id=None,
+ volume_type_name=None,
+):
+ volumes, _ = _volume_data_get_for_project(
+ context,
+ project_id,
+ volume_type_id=volume_type_id,
+ )
+ key = 'volumes'
+ if volume_type_name:
+ key += '_' + volume_type_name
+ return {key: volumes}
+
+
+def _sync_snapshots(
+ context,
+ project_id,
+ volume_type_id=None,
+ volume_type_name=None,
+):
+ snapshots, _ = _snapshot_data_get_for_project(
+ context,
+ project_id,
+ volume_type_id=volume_type_id,
+ )
+ key = 'snapshots'
+ if volume_type_name:
+ key += '_' + volume_type_name
+ return {key: snapshots}
+
+
+def _sync_backups(
+ context,
+ project_id,
+ volume_type_id=None,
+ volume_type_name=None,
+):
+ backups, _ = _backup_data_get_for_project(
+ context,
+ project_id,
+ volume_type_id=volume_type_id,
+ )
+ key = 'backups'
+ return {key: backups}
+
+
+def _sync_gigabytes(
+ context,
+ project_id,
+ volume_type_id=None,
+ volume_type_name=None,
+):
+ _, vol_gigs = _volume_data_get_for_project(
+ context,
+ project_id,
+ volume_type_id=volume_type_id,
+ )
+
+ key = 'gigabytes'
+ if volume_type_name:
+ key += '_' + volume_type_name
+
+ if CONF.no_snapshot_gb_quota:
+ return {key: vol_gigs}
+
+ _, snap_gigs = _snapshot_data_get_for_project(
+ context,
+ project_id,
+ volume_type_id=volume_type_id,
+ )
+
+ return {key: vol_gigs + snap_gigs}
+
+
+def _sync_consistencygroups(
+ context,
+ project_id,
+ volume_type_id=None,
+ volume_type_name=None,
+):
+ _, groups = _consistencygroup_data_get_for_project(context, project_id)
+ key = 'consistencygroups'
+ return {key: groups}
+
+
+def _sync_backup_gigabytes(
+ context,
+ project_id,
+ volume_type_id=None,
+ volume_type_name=None,
+):
+ key = 'backup_gigabytes'
+ _, backup_gigs = _backup_data_get_for_project(
+ context,
+ project_id,
+ volume_type_id=volume_type_id,
+ )
+ return {key: backup_gigs}
+
+
+def _sync_groups(
+ context,
+ project_id,
+ volume_type_id=None,
+ volume_type_name=None,
+):
+ _, groups = _group_data_get_for_project(context, project_id)
+ key = 'groups'
+ return {key: groups}
+
+
+QUOTA_SYNC_FUNCTIONS = {
+ '_sync_volumes': _sync_volumes,
+ '_sync_snapshots': _sync_snapshots,
+ '_sync_gigabytes': _sync_gigabytes,
+ '_sync_consistencygroups': _sync_consistencygroups,
+ '_sync_backups': _sync_backups,
+ '_sync_backup_gigabytes': _sync_backup_gigabytes,
+ '_sync_groups': _sync_groups,
+}
+
+
+###################
+
+
def _clean_filters(filters):
return {k: v for k, v in filters.items() if v is not None}
@@ -1128,22 +1150,22 @@ def cluster_create(context, values):
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
-def cluster_update(context, id, values):
+def cluster_update(context, cluster_id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
- query = _cluster_query(context, id=id)
+ query = _cluster_query(context, id=cluster_id)
result = query.update(values)
if not result:
- raise exception.ClusterNotFound(id=id)
+ raise exception.ClusterNotFound(id=cluster_id)
@require_admin_context
@main_context_manager.writer
-def cluster_destroy(context, id):
+def cluster_destroy(context, cluster_id):
"""Destroy the cluster or raise if it does not exist or has hosts."""
- query = _cluster_query(context, id=id)
+ query = _cluster_query(context, id=cluster_id)
query = query.filter(models.Cluster.num_hosts == 0)
# If the update doesn't succeed we don't know if it's because the
# cluster doesn't exist or because it has hosts.
@@ -1154,9 +1176,9 @@ def cluster_destroy(context, id):
if not result:
# This will fail if the cluster doesn't exist raising the right
# exception
- cluster_get(context, id=id)
+ cluster_get(context, id=cluster_id)
# If it doesn't fail, then the problem is that there are hosts
- raise exception.ClusterHasHosts(id=id)
+ raise exception.ClusterHasHosts(id=cluster_id)
###################
@@ -1863,6 +1885,8 @@ def quota_destroy_by_project(context, project_id):
quota_destroy_all_by_project(context, project_id, only_quotas=True)
+# TODO(stephenfin): No one is using this except 'quota_destroy_by_project'
+# above, so the only_quotas=False path could be removed.
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
@@ -1939,8 +1963,8 @@ def volume_attached(
instance_uuid,
host_name,
mountpoint,
- attach_mode,
- mark_attached,
+ attach_mode='rw',
+ mark_attached=True,
):
"""This method updates a volume attachment entry.
@@ -2521,9 +2545,10 @@ def volume_attachment_get_all_by_volume_id(context, volume_id):
return result
+# FIXME(jdg): Not using filters
@require_context
@main_context_manager.reader
-def volume_attachment_get_all_by_host(context, host):
+def volume_attachment_get_all_by_host(context, host, filters=None):
result = (
model_query(context, models.VolumeAttachment)
.filter_by(attached_host=host)
@@ -2544,9 +2569,14 @@ def volume_attachment_get(context, attachment_id):
return _attachment_get(context, attachment_id)
+# FIXME(jdg): Not using filters
@require_context
@main_context_manager.reader
-def volume_attachment_get_all_by_instance_uuid(context, instance_uuid):
+def volume_attachment_get_all_by_instance_uuid(
+ context,
+ instance_uuid,
+ filters=None,
+):
"""Fetch all attachment records associated with the specified instance."""
result = (
model_query(context, models.VolumeAttachment)
@@ -3576,7 +3606,12 @@ def volume_metadata_get(context, volume_id):
@require_volume_exists
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
-def volume_metadata_delete(context, volume_id, key, meta_type):
+def volume_metadata_delete(
+ context,
+ volume_id,
+ key,
+ meta_type=common.METADATA_TYPES.user,
+):
if meta_type == common.METADATA_TYPES.user:
query = _volume_user_metadata_get_query(context, volume_id).filter_by(
key=key
@@ -3614,7 +3649,13 @@ def volume_metadata_delete(context, volume_id, key, meta_type):
@handle_db_data_error
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
-def volume_metadata_update(context, volume_id, metadata, delete, meta_type):
+def volume_metadata_update(
+ context,
+ volume_id,
+ metadata,
+ delete,
+ meta_type=common.METADATA_TYPES.user,
+):
if meta_type == common.METADATA_TYPES.user:
return _volume_user_metadata_update(
context, volume_id, metadata, delete
@@ -4795,7 +4836,15 @@ def _group_type_get(context, id, inactive=False, expected_fields=None):
@require_context
@main_context_manager.reader
def volume_type_get(context, id, inactive=False, expected_fields=None):
- """Return a dict describing specific volume_type."""
+ """Get volume type by id.
+
+ :param context: context to query under
+ :param id: Volume type id to get.
+ :param inactive: Consider inactive volume types when searching
+ :param expected_fields: Return those additional fields.
+ Supported fields are: projects.
+ :returns: volume type
+ """
return _volume_type_get(
context, id, inactive=inactive, expected_fields=expected_fields
@@ -5035,15 +5084,17 @@ def volume_type_qos_specs_get(context, type_id):
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
-def volume_type_destroy(context, id):
+def volume_type_destroy(context, type_id):
utcnow = timeutils.utcnow()
vol_types = volume_type_get_all(context)
if len(vol_types) <= 1:
- raise exception.VolumeTypeDeletionError(volume_type_id=id)
- _volume_type_get(context, id)
+ raise exception.VolumeTypeDeletionError(volume_type_id=type_id)
+ _volume_type_get(context, type_id)
results = (
- model_query(context, models.Volume).filter_by(volume_type_id=id).all()
+ model_query(context, models.Volume)
+ .filter_by(volume_type_id=type_id)
+ .all()
)
group_count = (
@@ -5052,7 +5103,7 @@ def volume_type_destroy(context, id):
models.GroupVolumeTypeMapping,
read_deleted="no",
)
- .filter_by(volume_type_id=id)
+ .filter_by(volume_type_id=type_id)
.count()
)
cg_count = (
@@ -5060,14 +5111,14 @@ def volume_type_destroy(context, id):
context,
models.ConsistencyGroup,
)
- .filter(models.ConsistencyGroup.volume_type_id.contains(id))
+ .filter(models.ConsistencyGroup.volume_type_id.contains(type_id))
.count()
)
if results or group_count or cg_count:
- LOG.error('VolumeType %s deletion failed, VolumeType in use.', id)
- raise exception.VolumeTypeInUse(volume_type_id=id)
+ LOG.error('VolumeType %s deletion failed, VolumeType in use.', type_id)
+ raise exception.VolumeTypeInUse(volume_type_id=type_id)
- query = model_query(context, models.VolumeType).filter_by(id=id)
+ query = model_query(context, models.VolumeType).filter_by(id=type_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
@@ -5079,7 +5130,7 @@ def volume_type_destroy(context, id):
query = model_query(
context,
models.VolumeTypeExtraSpecs,
- ).filter_by(volume_type_id=id)
+ ).filter_by(volume_type_id=type_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
@@ -5090,7 +5141,7 @@ def volume_type_destroy(context, id):
)
query = model_query(context, models.Encryption).filter_by(
- volume_type_id=id
+ volume_type_id=type_id
)
entity = query.column_descriptions[0]['entity']
query.update(
@@ -5103,7 +5154,7 @@ def volume_type_destroy(context, id):
model_query(
context, models.VolumeTypeProjects, read_deleted="int_no"
- ).filter_by(volume_type_id=id).soft_delete(synchronize_session=False)
+ ).filter_by(volume_type_id=type_id).soft_delete(synchronize_session=False)
del updated_values['updated_at']
return updated_values
@@ -5111,16 +5162,18 @@ def volume_type_destroy(context, id):
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
-def group_type_destroy(context, id):
- _group_type_get(context, id)
+def group_type_destroy(context, type_id):
+ _group_type_get(context, type_id)
results = (
- model_query(context, models.Group).filter_by(group_type_id=id).all()
+ model_query(context, models.Group)
+ .filter_by(group_type_id=type_id)
+ .all()
)
if results:
- LOG.error('GroupType %s deletion failed, ' 'GroupType in use.', id)
- raise exception.GroupTypeInUse(group_type_id=id)
+ LOG.error('GroupType %s deletion failed, GroupType in use.', type_id)
+ raise exception.GroupTypeInUse(group_type_id=type_id)
- query = model_query(context, models.GroupType).filter_by(id=id)
+ query = model_query(context, models.GroupType).filter_by(id=type_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
@@ -5131,7 +5184,7 @@ def group_type_destroy(context, id):
)
query = model_query(context, models.GroupTypeSpecs).filter_by(
- group_type_id=id
+ group_type_id=type_id
)
entity = query.column_descriptions[0]['entity']
query.update(
@@ -5441,9 +5494,13 @@ def _volume_type_extra_specs_get_item(context, volume_type_id, key):
@handle_db_data_error
@require_context
@main_context_manager.writer
-def volume_type_extra_specs_update_or_create(context, volume_type_id, specs):
+def volume_type_extra_specs_update_or_create(
+ context,
+ volume_type_id,
+ extra_specs,
+):
spec_ref = None
- for key, value in specs.items():
+ for key, value in extra_specs.items():
try:
spec_ref = _volume_type_extra_specs_get_item(
context,
@@ -5463,7 +5520,7 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, specs):
)
spec_ref.save(context.session)
- return specs
+ return extra_specs
####################
@@ -5524,9 +5581,9 @@ def _group_type_specs_get_item(context, group_type_id, key):
@handle_db_data_error
@require_context
@main_context_manager.writer
-def group_type_specs_update_or_create(context, group_type_id, specs):
+def group_type_specs_update_or_create(context, group_type_id, group_specs):
spec_ref = None
- for key, value in specs.items():
+ for key, value in group_specs.items():
try:
spec_ref = _group_type_specs_get_item(context, group_type_id, key)
except exception.GroupTypeSpecsNotFound:
@@ -5541,7 +5598,7 @@ def group_type_specs_update_or_create(context, group_type_id, specs):
)
spec_ref.save(context.session)
- return specs
+ return group_specs
####################
@@ -5915,18 +5972,18 @@ def _qos_specs_get_item(context, qos_specs_id, key):
@require_admin_context
@require_qos_specs_exists
@main_context_manager.writer
-def qos_specs_update(context, qos_specs_id, updates):
+def qos_specs_update(context, qos_specs_id, values):
"""Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
- specs = updates.get('specs', {})
+ specs = values.get('specs', {})
- if 'consumer' in updates:
+ if 'consumer' in values:
# Massage consumer to the right place for DB and copy specs
# before updating so we don't modify dict for caller
specs = specs.copy()
- specs['consumer'] = updates['consumer']
+ specs['consumer'] = values['consumer']
spec_ref = None
for key in specs.keys():
try:
@@ -8254,12 +8311,9 @@ def message_create(context, values):
@require_admin_context
@main_context_manager.writer
-def message_destroy(context, message):
+def message_destroy(context, message_id):
now = timeutils.utcnow()
- query = model_query(
- context,
- models.Message,
- ).filter_by(id=message.get('id'))
+ query = model_query(context, models.Message).filter_by(id=message_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
@@ -8326,64 +8380,6 @@ def driver_initiator_data_get(context, initiator, namespace):
###############################
-PAGINATION_HELPERS = {
- models.Volume: (_volume_get_query, _process_volume_filters, _volume_get),
- models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get),
- models.Backup: (_backups_get_query, _process_backups_filters, _backup_get),
- models.QualityOfServiceSpecs: (
- _qos_specs_get_query,
- _process_qos_specs_filters,
- _qos_specs_get,
- ),
- models.VolumeType: (
- _volume_type_get_query,
- _process_volume_types_filters,
- _volume_type_get_db_object,
- ),
- models.ConsistencyGroup: (
- _consistencygroups_get_query,
- _process_consistencygroups_filters,
- _consistencygroup_get,
- ),
- models.Message: (
- _messages_get_query,
- _process_messages_filters,
- _message_get,
- ),
- models.GroupType: (
- _group_type_get_query,
- _process_group_types_filters,
- _group_type_get_db_object,
- ),
- models.Group: (_groups_get_query, _process_groups_filters, _group_get),
- models.GroupSnapshot: (
- _group_snapshot_get_query,
- _process_group_snapshot_filters,
- _group_snapshot_get,
- ),
- models.VolumeAttachment: (
- _attachment_get_query,
- _process_attachment_filters,
- _attachment_get,
- ),
- models.Transfer: (
- _transfer_get_query,
- _process_transfer_filters,
- _transfer_get,
- ),
-}
-
-
-CALCULATE_COUNT_HELPERS = {
- 'volume': (_volume_get_query, _process_volume_filters),
- 'snapshot': (_snaps_get_query, _process_snaps_filters),
- 'backup': (_backups_get_query, _process_backups_filters),
-}
-
-
-###############################
-
-
@require_context
@main_context_manager.writer
def image_volume_cache_create(
@@ -8550,9 +8546,14 @@ def worker_get(context, **filters):
@require_context
@main_context_manager.reader
-def worker_get_all(context, **filters):
+def worker_get_all(context, until=None, db_filters=None, **filters):
"""Get all workers that match given criteria."""
- query = _worker_query(context, **filters)
+ query = _worker_query(
+ context,
+ until=until,
+ db_filters=db_filters,
+ **filters,
+ )
return query.all() if query else []
@@ -8704,3 +8705,61 @@ def use_quota_online_data_migration(
# models.VolumeAdminMetadata.delete_values)
#
# return total, updated
+
+
+###############################
+
+
+PAGINATION_HELPERS = {
+ models.Volume: (_volume_get_query, _process_volume_filters, _volume_get),
+ models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get),
+ models.Backup: (_backups_get_query, _process_backups_filters, _backup_get),
+ models.QualityOfServiceSpecs: (
+ _qos_specs_get_query,
+ _process_qos_specs_filters,
+ _qos_specs_get,
+ ),
+ models.VolumeType: (
+ _volume_type_get_query,
+ _process_volume_types_filters,
+ _volume_type_get_db_object,
+ ),
+ models.ConsistencyGroup: (
+ _consistencygroups_get_query,
+ _process_consistencygroups_filters,
+ _consistencygroup_get,
+ ),
+ models.Message: (
+ _messages_get_query,
+ _process_messages_filters,
+ _message_get,
+ ),
+ models.GroupType: (
+ _group_type_get_query,
+ _process_group_types_filters,
+ _group_type_get_db_object,
+ ),
+ models.Group: (_groups_get_query, _process_groups_filters, _group_get),
+ models.GroupSnapshot: (
+ _group_snapshot_get_query,
+ _process_group_snapshot_filters,
+ _group_snapshot_get,
+ ),
+ models.VolumeAttachment: (
+ _attachment_get_query,
+ _process_attachment_filters,
+ _attachment_get,
+ ),
+ models.Transfer: (
+ _transfer_get_query,
+ _process_transfer_filters,
+ _transfer_get,
+ ),
+}
+
+
+CALCULATE_COUNT_HELPERS = {
+ 'volume': (_volume_get_query, _process_volume_filters),
+ 'snapshot': (_snaps_get_query, _process_snaps_filters),
+ 'backup': (_backups_get_query, _process_backups_filters),
+}
diff --git a/cinder/db/sqlalchemy/models.py b/cinder/db/sqlalchemy/models.py
index 5df343609..761d61ec9 100644
--- a/cinder/db/sqlalchemy/models.py
+++ b/cinder/db/sqlalchemy/models.py
@@ -314,7 +314,7 @@ class Volume(BASE, CinderBase):
__tablename__ = 'volumes'
__table_args__ = (
- sa.Index('volumes_service_uuid_idx', 'deleted', 'service_uuid'),
+ sa.Index('volumes_service_uuid_idx', 'service_uuid', 'deleted'),
# Speed up normal listings
sa.Index('volumes_deleted_project_id_idx', 'deleted', 'project_id'),
# Speed up service start, create volume from image when using direct
diff --git a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
index 1d1e440cb..018d93694 100644
--- a/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
+++ b/cinder/tests/unit/volume/drivers/hpe/test_hpe3par.py
@@ -675,6 +675,11 @@ class HPE3PARBaseDriver(test.TestCase):
'minor': 5,
'revision': 0}
+ wsapi_version_2023 = {'major': 1,
+ 'build': 100000050,
+ 'minor': 10,
+ 'revision': 0}
+
# Use this to point to latest version of wsapi
wsapi_version_latest = wsapi_version_for_compression
@@ -892,28 +897,41 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
mock_client.assert_has_calls(expected)
self.assertEqual(self.STATUS_DONE, status)
- def test_create_volume(self):
+ # (i) wsapi version is old/default
+ # (ii) wsapi version is 2023, then snapCPG isn't required
+ @ddt.data({'wsapi_version': None},
+ {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2023})
+ @ddt.unpack
+ def test_create_volume(self, wsapi_version):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
- mock_client = self.setup_driver()
+ mock_client = self.setup_driver(wsapi_version=wsapi_version)
+
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
- self.driver.create_volume(self.volume)
+ if not wsapi_version:
+ # (i) old/default
+ self.driver.create_volume(self.volume)
+ else:
+ # (ii) wsapi 2023
+ common = self.driver._login()
+ common.create_volume(self.volume)
comment = Comment({
"display_name": "Foo Volume",
"type": "OpenStack",
"name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
"volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"})
+ optional = {'comment': comment,
+ 'tpvv': True,
+ 'tdvv': False}
+ if not wsapi_version:
+ optional['snapCPG'] = HPE3PAR_CPG_SNAP
expected = [
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HPE3PAR_CPG,
- 2048, {
- 'comment': comment,
- 'tpvv': True,
- 'tdvv': False,
- 'snapCPG': HPE3PAR_CPG_SNAP})]
+ 2048, optional)]
mock_client.assert_has_calls(expected)
@@ -1255,6 +1273,89 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type')
+ def test_create_volume_replicated_periodic_2023(self, _mock_volume_types):
+ # setup_mock_client drive with default configuration
+ # and return the mock HTTP 3PAR client
+ conf = self.setup_configuration()
+ self.replication_targets[0]['replication_mode'] = 'periodic'
+ conf.replication_device = self.replication_targets
+ mock_client = self.setup_driver(conf, None, self.wsapi_version_2023)
+ mock_client.getStorageSystemInfo.return_value = (
+ {'id': self.CLIENT_ID})
+ mock_client.getRemoteCopyGroup.side_effect = (
+ hpeexceptions.HTTPNotFound)
+ mock_client.getCPG.return_value = {'domain': None}
+ mock_replicated_client = self.setup_driver(conf, None,
+ self.wsapi_version_2023)
+ mock_replicated_client.getStorageSystemInfo.return_value = (
+ {'id': self.REPLICATION_CLIENT_ID})
+
+ _mock_volume_types.return_value = {
+ 'name': 'replicated',
+ 'extra_specs': {
+ 'replication_enabled': '<is> True',
+ 'replication:mode': 'periodic',
+ 'replication:sync_period': '900',
+ 'volume_type': self.volume_type_replicated}}
+
+ with mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client, \
+ mock.patch.object(
+ hpecommon.HPE3PARCommon,
+ '_create_replication_client') as mock_replication_client:
+ mock_create_client.return_value = mock_client
+ mock_replication_client.return_value = mock_replicated_client
+
+ common = self.driver._login()
+ return_model = common.create_volume(self.volume_replicated)
+ comment = Comment({
+ "volume_type_name": "replicated",
+ "display_name": "Foo Volume",
+ "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "volume_type_id": "be9181f1-4040-46f2-8298-e7532f2bf9db",
+ "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7",
+ "qos": {},
+ "type": "OpenStack"})
+
+ backend_id = self.replication_targets[0]['backend_id']
+ expected = [
+ mock.call.createVolume(
+ self.VOLUME_3PAR_NAME,
+ HPE3PAR_CPG,
+ 2048, {
+ 'comment': comment,
+ 'tpvv': True,
+ 'tdvv': False}),
+ mock.call.getRemoteCopyGroup(self.RCG_3PAR_NAME),
+ mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.createRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ [{'userCPG': HPE3PAR_CPG_REMOTE,
+ 'targetName': backend_id,
+ 'mode': PERIODIC_MODE}],
+ {'localUserCPG': HPE3PAR_CPG}),
+ mock.call.addVolumeToRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ self.VOLUME_3PAR_NAME,
+ [{'secVolumeName': self.VOLUME_3PAR_NAME,
+ 'targetName': backend_id}],
+ optional={'volumeAutoCreation': True}),
+ mock.call.modifyRemoteCopyGroup(
+ self.RCG_3PAR_NAME,
+ {'targets': [{'syncPeriod': SYNC_PERIOD,
+ 'targetName': backend_id}]}),
+ mock.call.startRemoteCopy(self.RCG_3PAR_NAME)]
+ mock_client.assert_has_calls(
+ self.get_id_login +
+ self.standard_logout +
+ self.standard_login +
+ expected)
+ self.assertEqual({'replication_status': 'enabled',
+ 'provider_location': self.CLIENT_ID},
+ return_model)
+
+ @mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_replicated_sync(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
@@ -3398,6 +3499,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
}
mock_client = self.setup_driver(mock_conf=conf)
+ mock_client.getVolumeSnapshots.return_value = []
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
@@ -3426,6 +3528,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
{
'comment': comment,
'readOnly': False}),
+ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME),
mock.call.copyVolume(
osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
@@ -3449,6 +3552,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
}
mock_client = self.setup_driver(mock_conf=conf)
+ mock_client.getVolumeSnapshots.return_value = []
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
@@ -3490,6 +3594,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
'comment': comment,
'readOnly': False}),
mock.call.getCPG(HPE3PAR_CPG),
+ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME),
mock.call.copyVolume(
osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
@@ -3606,6 +3711,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
+ mock_client.getVolumeSnapshots.return_value = []
volume_type_hos = copy.deepcopy(self.volume_type_hos)
volume_type_hos['extra_specs']['convert_to_base'] = True
_mock_volume_types.return_value = volume_type_hos
@@ -3635,6 +3741,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
{
'comment': comment,
'readOnly': False}),
+ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME),
mock.call.copyVolume(
osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
@@ -3656,6 +3763,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
+ mock_client.getVolumeSnapshots.return_value = []
_mock_volume_types.return_value = self.volume_type_hos
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
@@ -3684,6 +3792,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
{
'comment': comment,
'readOnly': False}),
+ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME),
mock.call.copyVolume(
osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
@@ -3706,6 +3815,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
+ mock_client.getVolumeSnapshots.return_value = []
volume_type_hos = copy.deepcopy(self.volume_type_hos)
volume_type_hos['extra_specs']['convert_to_base'] = True
_mock_volume_types.return_value = volume_type_hos
@@ -3736,6 +3846,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
{
'comment': comment,
'readOnly': False}),
+ mock.call.getVolumeSnapshots(self.VOLUME_3PAR_NAME),
mock.call.copyVolume(
osv_matcher, omv_matcher, HPE3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
@@ -3834,6 +3945,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
}
mock_client = self.setup_driver(mock_conf=conf)
+ mock_client.getVolumeSnapshots.return_value = []
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
@@ -3857,6 +3969,7 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
}
mock_client = self.setup_driver(mock_conf=conf)
+ mock_client.getVolumeSnapshots.return_value = []
with mock.patch.object(hpecommon.HPE3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
@@ -3868,6 +3981,18 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
self.volume,
str(new_size))
+ def test__convert_to_base_volume_failure(self):
+ mock_client = self.setup_driver()
+ mock_client.getVolumeSnapshots.return_value = (
+ ['oss-nwJVbXaEQMi0w.xPutFRQw'])
+ with mock.patch.object(hpecommon.HPE3PARCommon,
+ '_create_client') as mock_create_client:
+ mock_create_client.return_value = mock_client
+ common = self.driver._login()
+ self.assertRaises(exception.VolumeIsBusy,
+ common._convert_to_base_volume,
+ self.volume)
+
@mock.patch.object(volume_types, 'get_volume_type')
def test_extend_volume_replicated(self, _mock_volume_types):
# Managed vs. unmanaged and periodic vs. sync are not relevant when
@@ -4245,10 +4370,16 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
expected_retype_specs)
self.assertEqual(expected_obj, obj)
+ # (i) wsapi version is old/default
+ # (ii) wsapi version is 2023, then snapCPG isn't required
+ @ddt.data({'wsapi_version': None},
+ {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2023})
+ @ddt.unpack
@mock.patch.object(volume_types, 'get_volume_type')
- def test_manage_existing_with_no_snap_cpg(self, _mock_volume_types):
+ def test_manage_existing_with_no_snap_cpg(self, _mock_volume_types,
+ wsapi_version):
_mock_volume_types.return_value = self.volume_type
- mock_client = self.setup_driver()
+ mock_client = self.setup_driver(wsapi_version=wsapi_version)
new_comment = Comment({
"display_name": "Foo Volume",
@@ -4280,15 +4411,20 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
obj = self.driver.manage_existing(volume, existing_ref)
+ optional = {'newName': osv_matcher,
+ 'comment': new_comment}
+
+ if not wsapi_version:
+ # (i) old/default
+ # manage_existing() should be setting
+ # blank snapCPG to the userCPG
+ optional['snapCPG'] = 'testUserCpg0'
+
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(
existing_ref['source-name'],
- {'newName': osv_matcher,
- 'comment': new_comment,
- # manage_existing() should be setting
- # blank snapCPG to the userCPG
- 'snapCPG': 'testUserCpg0'})
+ optional)
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
@@ -6052,16 +6188,21 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
mock_client.assert_has_calls(expected)
+ # (i) wsapi version is old/default
+ # (ii) wsapi version is 2023, then snapCPG isn't required
+ @ddt.data({'wsapi_version': None},
+ {'wsapi_version': HPE3PARBaseDriver.wsapi_version_2023})
+ @ddt.unpack
@mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.'
'get_volume_settings_from_type')
@mock.patch('cinder.volume.drivers.hpe.hpe_3par_common.HPE3PARCommon.'
'is_volume_group_snap_type')
@mock.patch('cinder.volume.volume_utils.is_group_a_cg_snapshot_type')
def test_create_group_from_src_group(self, cg_ss_enable, vol_ss_enable,
- typ_info):
+ typ_info, wsapi_version):
cg_ss_enable.return_value = True
vol_ss_enable.return_value = True
- mock_client = self.setup_driver()
+ mock_client = self.setup_driver(wsapi_version=wsapi_version)
task_id = 1
mock_client.copyVolume.return_value = {'taskid': task_id}
mock_client.getStorageSystemInfo.return_value = {'id': self.CLIENT_ID}
@@ -6092,6 +6233,10 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
source_grp = self.fake_group_object(
grp_id=self.SRC_CONSIS_GROUP_ID)
+ optional = {'online': True,
+ 'tpvv': mock.ANY, 'tdvv': mock.ANY}
+ if not wsapi_version:
+ optional['snapCPG'] = HPE3PAR_CPG
expected = [
mock.call.getCPG(HPE3PAR_CPG),
mock.call.createVolumeSet(
@@ -6107,17 +6252,25 @@ class TestHPE3PARDriverBase(HPE3PARBaseDriver):
mock.ANY,
self.VOLUME_NAME_3PAR,
HPE3PAR_CPG,
- {'snapCPG': HPE3PAR_CPG, 'online': True,
- 'tpvv': mock.ANY, 'tdvv': mock.ANY}),
+ optional),
mock.call.addVolumeToVolumeSet(
self.CONSIS_GROUP_NAME,
self.VOLUME_NAME_3PAR)]
# Create a consistency group from a source consistency group.
- self.driver.create_group_from_src(
- context.get_admin_context(), group,
- [volume], source_group=source_grp,
- source_vols=[source_volume])
+ if not wsapi_version:
+ # (i) old/default
+ self.driver.create_group_from_src(
+ context.get_admin_context(), group,
+ [volume], source_group=source_grp,
+ source_vols=[source_volume])
+ else:
+ # (ii) wsapi 2023
+ common = self.driver._login()
+ common.create_group_from_src(
+ context.get_admin_context(), group,
+ [volume], source_group=source_grp,
+ source_vols=[source_volume])
mock_client.assert_has_calls(expected)
diff --git a/cinder/tests/unit/volume/drivers/test_pure.py b/cinder/tests/unit/volume/drivers/test_pure.py
index ff329a478..de59abc13 100644
--- a/cinder/tests/unit/volume/drivers/test_pure.py
+++ b/cinder/tests/unit/volume/drivers/test_pure.py
@@ -194,14 +194,12 @@ FC_PORTS = [{"name": name,
"nqn": None,
"portal": None,
"wwn": wwn,
- "nqn": None,
} for name, wwn in zip(FC_PORT_NAMES, FC_WWNS)]
AC_FC_PORTS = [{"name": name,
"iqn": None,
"nqn": None,
"portal": None,
"wwn": wwn,
- "nqn": None,
} for name, wwn in zip(FC_PORT_NAMES, AC_FC_WWNS)]
NON_ISCSI_PORT = {
"name": "ct0.fc1",
@@ -4934,14 +4932,54 @@ class PureNVMEDriverTestCase(PureBaseSharedDriverTestCase):
self.driver.initialize_connection(vol, multipath_connector)
def test_get_target_nvme_ports(self):
- self.array.list_ports.return_value = NVME_PORTS
+ ports = [{'name': 'CT0.ETH4',
+ 'wwn': None,
+ 'iqn': None,
+ 'nqn': TARGET_NQN},
+ {'name': 'CT0.ETH5',
+ 'wwn': None,
+ 'iqn': TARGET_IQN,
+ 'nqn': None},
+ {'name': 'CT0.ETH20',
+ 'wwn': None,
+ 'iqn': None,
+ 'nqn': TARGET_NQN},
+ {'name': 'CT0.FC4',
+ 'wwn': TARGET_WWN,
+ 'iqn': None,
+ 'nqn': TARGET_NQN}]
+ interfaces = [
+ {'name': 'ct0.eth4', 'services': ['nvme-tcp']},
+ {'name': 'ct0.eth5', 'services': ['iscsi']},
+ {'name': 'ct0.eth20', 'services': ['nvme-roce']},
+ {'name': 'ct0.fc4', 'services': ['nvme-fc']}
+ ]
+ # Test for the nvme-tcp port
+ self.driver.configuration.pure_nvme_transport = "tcp"
+ self.array.get_network_interface.return_value = interfaces[0]
+ self.array.list_ports.return_value = [ports[0]]
ret = self.driver._get_target_nvme_ports(self.array)
- self.assertEqual(NVME_PORTS, ret)
-
- def test_get_target_nvme_ports_with_nvme_and_fc(self):
- self.array.list_ports.return_value = NVME_PORTS_WITH
+ self.assertEqual([ports[0]], ret)
+ # Test for failure if no NVMe ports
+ self.array.get_network_interface.return_value = interfaces[1]
+ self.array.list_ports.return_value = [ports[1]]
+ self.assertRaises(
+ pure.PureDriverException,
+ self.driver._get_target_nvme_ports,
+ self.array,
+ )
+ # Test for the nvme-roce port
+ self.driver.configuration.pure_nvme_transport = "roce"
+ self.array.get_network_interface.return_value = interfaces[2]
+ self.array.list_ports.return_value = [ports[2]]
+ ret = self.driver._get_target_nvme_ports(self.array)
+ self.assertEqual([ports[2]], ret)
+ # Test for empty dict if only nvme-fc port
+ self.driver.configuration.pure_nvme_transport = "roce"
+ self.array.get_network_interface.return_value = interfaces[3]
+ self.array.list_ports.return_value = [ports[3]]
ret = self.driver._get_target_nvme_ports(self.array)
- self.assertEqual(NVME_PORTS, ret)
+ self.assertEqual([], ret)
def test_get_target_nvme_ports_with_no_ports(self):
# Should raise an exception if there are no ports
diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py
index 57dae3af1..f1ffeb89e 100644
--- a/cinder/tests/unit/volume/drivers/test_rbd.py
+++ b/cinder/tests/unit/volume/drivers/test_rbd.py
@@ -3361,6 +3361,17 @@ class RBDTestCase(test.TestCase):
{'provider_location':
"{\"saved_features\":%s}" % image_features}, ret)
+ @common_mocks
+ def test_enable_multiattach_no_features(self):
+ image = self.mock_proxy.return_value.__enter__.return_value
+ image.features.return_value = 0
+
+ ret = self.driver._enable_multiattach(self.volume_a)
+
+ image.update_features.assert_not_called()
+
+ self.assertEqual({'provider_location': '{"saved_features":0}'}, ret)
+
@ddt.data(MULTIATTACH_FULL_FEATURES, MULTIATTACH_REDUCED_FEATURES)
@common_mocks
def test_disable_multiattach(self, features):
@@ -3374,6 +3385,18 @@ class RBDTestCase(test.TestCase):
self.assertEqual({'provider_location': None}, ret)
+ @common_mocks
+ def test_disable_multiattach_no_features(self):
+ image = self.mock_proxy.return_value.__enter__.return_value
+ self.volume_a.provider_location = '{"saved_features": 0}'
+ image.features.return_value = 0
+
+ ret = self.driver._disable_multiattach(self.volume_a)
+
+ image.update_features.assert_not_called()
+
+ self.assertEqual({'provider_location': None}, ret)
+
class ManagedRBDTestCase(test_driver.BaseDriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
diff --git a/cinder/volume/drivers/dell_emc/powerstore/utils.py b/cinder/volume/drivers/dell_emc/powerstore/utils.py
index 52c74a587..dd02fe93c 100644
--- a/cinder/volume/drivers/dell_emc/powerstore/utils.py
+++ b/cinder/volume/drivers/dell_emc/powerstore/utils.py
@@ -15,12 +15,12 @@
"""Utilities for Dell EMC PowerStore Cinder driver."""
-from distutils import version
import functools
import re
from oslo_log import log as logging
from oslo_utils import units
+from packaging import version
from cinder.common import constants
from cinder import exception
@@ -186,4 +186,4 @@ def is_group_a_cg_snapshot_type(func):
def version_gte(ver1, ver2):
- return version.LooseVersion(ver1) >= version.LooseVersion(ver2)
+ return version.parse(ver1) >= version.parse(ver2)
diff --git a/cinder/volume/drivers/hpe/hpe_3par_common.py b/cinder/volume/drivers/hpe/hpe_3par_common.py
index 971fac3e8..2d9534a16 100644
--- a/cinder/volume/drivers/hpe/hpe_3par_common.py
+++ b/cinder/volume/drivers/hpe/hpe_3par_common.py
@@ -81,6 +81,7 @@ FLASH_CACHE_API_VERSION = 30201200
COMPRESSION_API_VERSION = 30301215
SRSTATLD_API_VERSION = 30201200
REMOTE_COPY_API_VERSION = 30202290
+API_VERSION_2023 = 100000000
hpe3par_opts = [
cfg.StrOpt('hpe3par_api_url',
@@ -300,11 +301,14 @@ class HPE3PARCommon(object):
4.0.16 - In multi host env, fix multi-detach operation. Bug #1958122
4.0.17 - Added get_manageable_volumes and get_manageable_snapshots.
Bug #1819903
+ 4.0.18 - During conversion of volume to base volume,
+ error out if it has child snapshot(s). Bug #1994521
+ 4.0.19 - Update code to work with new WSAPI (of 2023). Bug #2015746
"""
- VERSION = "4.0.17"
+ VERSION = "4.0.19"
stats = {}
@@ -704,9 +708,12 @@ class HPE3PARCommon(object):
compression = self.get_compression_policy(
type_info['hpe3par_keys'])
- optional = {'online': True, 'snapCPG': snapcpg,
+ optional = {'online': True,
'tpvv': tpvv, 'tdvv': tdvv}
+ if self.API_VERSION < API_VERSION_2023:
+ optional['snapCPG'] = snapcpg
+
if compression is not None:
optional['compression'] = compression
@@ -1004,7 +1011,7 @@ class HPE3PARCommon(object):
'comment': json.dumps(new_comment)}
# Ensure that snapCPG is set
- if 'snapCPG' not in vol:
+ if 'snapCPG' not in vol and self.API_VERSION < API_VERSION_2023:
new_vals['snapCPG'] = vol['userCPG']
LOG.info("Virtual volume %(disp)s '%(new)s' snapCPG "
"is empty so it will be set to: %(cpg)s",
@@ -2393,9 +2400,14 @@ class HPE3PARCommon(object):
comments['qos'] = qos
extras = {'comment': json.dumps(comments),
- 'snapCPG': snap_cpg,
'tpvv': tpvv}
+ LOG.debug("self.API_VERSION: %(version)s",
+ {'version': self.API_VERSION})
+
+ if self.API_VERSION < API_VERSION_2023:
+ extras['snapCPG'] = snap_cpg
+
# Only set the dedup option if the backend supports it.
if self.API_VERSION >= DEDUP_API_VERSION:
extras['tdvv'] = tdvv
@@ -2466,7 +2478,7 @@ class HPE3PARCommon(object):
{'src': src_name, 'dest': dest_name})
optional = {'tpvv': tpvv, 'online': True}
- if snap_cpg is not None:
+ if snap_cpg is not None and self.API_VERSION < API_VERSION_2023:
optional['snapCPG'] = snap_cpg
if self.API_VERSION >= DEDUP_API_VERSION:
@@ -3139,6 +3151,21 @@ class HPE3PARCommon(object):
compression = self.get_compression_policy(
type_info['hpe3par_keys'])
+
+ # If volume (osv-) has snapshot, while converting the volume
+ # to base volume (omv-), snapshot cannot be transferred to
+ # new base volume (omv-) i.e it remain with volume (osv-).
+ # So error out for such volume.
+ snap_list = self.client.getVolumeSnapshots(volume_name)
+ if snap_list:
+ snap_str = ",".join(snap_list)
+ msg = (_("Volume %(name)s has dependent snapshots: %(snap)s."
+ " Either flatten or remove the dependent snapshots:"
+ " %(snap)s for the conversion of volume %(name)s to"
+ " succeed." % {'name': volume_name,
+ 'snap': snap_str}))
+ raise exception.VolumeIsBusy(message=msg)
+
# Create a physical copy of the volume
task_id = self._copy_volume(volume_name, temp_vol_name,
cpg, cpg, type_info['tpvv'],
@@ -3162,16 +3189,18 @@ class HPE3PARCommon(object):
comment = self._get_3par_vol_comment(volume_name)
if comment:
self.client.modifyVolume(temp_vol_name, {'comment': comment})
- LOG.debug('Volume rename completed: convert_to_base_volume: '
- 'id=%s.', volume['id'])
+ LOG.debug('Assigned the comment: convert_to_base_volume: '
+ 'id=%s.', volume['id'])
- # Delete source volume after the copy is complete
+ # Delete source volume (osv-) after the copy is complete
self.client.deleteVolume(volume_name)
LOG.debug('Delete src volume completed: convert_to_base_volume: '
'id=%s.', volume['id'])
- # Rename the new volume to the original name
+ # Rename the new volume (omv-) to the original name (osv-)
self.client.modifyVolume(temp_vol_name, {'newName': volume_name})
+ LOG.debug('Volume rename completed: convert_to_base_volume: '
+ 'id=%s.', volume['id'])
LOG.info('Completed: convert_to_base_volume: '
'id=%s.', volume['id'])
@@ -4358,15 +4387,17 @@ class HPE3PARCommon(object):
local_cpg)
rcg_target = {'targetName': target['backend_id'],
'mode': replication_mode_num,
- 'snapCPG': cpg,
'userCPG': cpg}
+ if self.API_VERSION < API_VERSION_2023:
+ rcg_target['snapCPG'] = cpg
rcg_targets.append(rcg_target)
sync_target = {'targetName': target['backend_id'],
'syncPeriod': replication_sync_period}
sync_targets.append(sync_target)
- optional = {'localSnapCPG': vol_settings['snap_cpg'],
- 'localUserCPG': local_cpg}
+ optional = {'localUserCPG': local_cpg}
+ if self.API_VERSION < API_VERSION_2023:
+ optional['localSnapCPG'] = vol_settings['snap_cpg']
pool = volume_utils.extract_host(volume['host'], level='pool')
domain = self.get_domain(pool)
if domain:
@@ -4381,6 +4412,8 @@ class HPE3PARCommon(object):
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
+ LOG.debug("created rcg %(name)s", {'name': rcg_name})
+
# Add volume to remote copy group.
rcg_targets = []
for target in self._replication_targets:
@@ -5300,7 +5333,11 @@ class ModifyVolumeTask(flow_utils.CinderTask):
comment_dict = self._get_new_comment(
old_comment, new_vvs, new_qos, new_type_name, new_type_id)
- if new_snap_cpg != old_snap_cpg:
+ LOG.debug("API_VERSION: %(ver_1)s, API_VERSION_2023: %(ver_2)s",
+ {'ver_1': common.API_VERSION,
+ 'ver_2': API_VERSION_2023})
+ if (new_snap_cpg != old_snap_cpg and
+ common.API_VERSION < API_VERSION_2023):
# Modify the snap_cpg. This will fail with snapshots.
LOG.info("Modifying %(volume_name)s snap_cpg from "
"%(old_snap_cpg)s to %(new_snap_cpg)s.",
diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py
index 36f283b40..dfed6d49f 100644
--- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py
+++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_connection.py
@@ -94,10 +94,7 @@ class DS8KHTTPSConnection(connection.VerifiedHTTPSConnection):
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
#
- # disable pylint because pylint doesn't support importing
- # from six.moves yet. see:
- # https://bitbucket.org/logilab/pylint/issue/550/
- self._tunnel() # pylint: disable=E1101
+ self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py
index bf38d88f9..fcaecda08 100644
--- a/cinder/volume/drivers/pure.py
+++ b/cinder/volume/drivers/pure.py
@@ -111,9 +111,8 @@ PURE_OPTS = [
"IPv4 and IPv6 subnets. This parameter supersedes "
"pure_nvme_cidr."),
cfg.StrOpt("pure_nvme_transport", default="roce",
- choices=['roce'],
- help="The NVMe transport layer to be used by the NVMe driver. "
- "This only supports RoCE at this time."),
+ choices=['roce', 'tcp'],
+ help="The NVMe transport layer to be used by the NVMe driver."),
cfg.BoolOpt("pure_eradicate_on_delete",
default=False,
help="When enabled, all Pure volumes, snapshots, and "
@@ -159,6 +158,7 @@ ERR_MSG_NOT_CONNECTED = "is not connected"
ERR_MSG_ALREADY_BELONGS = "already belongs to"
ERR_MSG_EXISTING_CONNECTIONS = "cannot be deleted due to existing connections"
ERR_MSG_ALREADY_IN_USE = "already in use"
+ERR_MSG_ARRAY_LIMIT = "limit reached"
EXTRA_SPECS_REPL_ENABLED = "replication_enabled"
EXTRA_SPECS_REPL_TYPE = "replication_type"
@@ -406,6 +406,13 @@ class PureBaseVolumeDriver(san.SanDriver):
"unsupported. Please upgrade your backend to "
"a supported version.")
raise PureDriverException(msg)
+ if version.parse(array_info["version"]) < version.parse(
+ '6.4.2'
+ ) and self._storage_protocol == constants.NVMEOF_TCP:
+ msg = _("FlashArray Purity version less than 6.4.2 "
+ "unsupported for NVMe-TCP. Please upgrade your "
+ "backend to a supported version.")
+ raise PureDriverException(msg)
self._array.array_name = array_info["array_name"]
self._array.array_id = array_info["id"]
@@ -2418,8 +2425,9 @@ class PureBaseVolumeDriver(san.SanDriver):
except purestorage.PureHTTPError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.code == 400 and (
- ERR_MSG_ALREADY_EXISTS
- in err.text):
+ ERR_MSG_ALREADY_EXISTS in err.text
+ or ERR_MSG_ARRAY_LIMIT in err.text
+ ):
ctxt.reraise = False
LOG.info("Skipping add array %(target_array)s to pod"
" %(pod_name)s since it's already added.",
@@ -3217,6 +3225,9 @@ class PureNVMEDriver(PureBaseVolumeDriver, driver.BaseVD):
if self.configuration.pure_nvme_transport == "roce":
self.transport_type = "rdma"
self._storage_protocol = constants.NVMEOF_ROCE
+ else:
+ self.transport_type = "tcp"
+ self._storage_protocol = constants.NVMEOF_TCP
def _get_nguid(self, pure_vol_name):
"""Return the NGUID based on the volume's serial number
@@ -3331,14 +3342,24 @@ class PureNVMEDriver(PureBaseVolumeDriver, driver.BaseVD):
return props
def _get_target_nvme_ports(self, array):
- """Return list of nvme-enabled port descriptions."""
+ """Return list of correct nvme-enabled port descriptions."""
ports = array.list_ports()
+ valid_nvme_ports = []
nvme_ports = [port for port in ports if port["nqn"]]
+ for port in range(0, len(nvme_ports)):
+ if "ETH" in nvme_ports[port]["name"]:
+ port_detail = array.get_network_interface(
+ interface=nvme_ports[port]["name"]
+ )
+ if port_detail["services"][0] == "nvme-" + \
+ self.configuration.pure_nvme_transport:
+ valid_nvme_ports.append(nvme_ports[port])
if not nvme_ports:
raise PureDriverException(
- reason=_("No nvme-enabled ports on target array.")
+ reason=_("No %(type)s enabled ports on target array.") %
+ {"type": self._storage_protocol}
)
- return nvme_ports
+ return valid_nvme_ports
@utils.retry(PureRetryableException, retries=HOST_CREATE_MAX_RETRIES)
def _connect(self, array, vol_name, connector):
diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py
index 6cc86c2c5..e710fd356 100644
--- a/cinder/volume/drivers/rbd.py
+++ b/cinder/volume/drivers/rbd.py
@@ -968,7 +968,8 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
with RBDVolumeProxy(self, vol_name) as image:
image_features = image.features()
change_features = self.MULTIATTACH_EXCLUSIONS & image_features
- image.update_features(change_features, False)
+ if change_features != 0:
+ image.update_features(change_features, False)
return {'provider_location':
self._dumps({'saved_features': image_features})}
@@ -980,7 +981,8 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
provider_location = json.loads(volume.provider_location)
image_features = provider_location['saved_features']
change_features = self.MULTIATTACH_EXCLUSIONS & image_features
- image.update_features(change_features, True)
+ if change_features != 0:
+ image.update_features(change_features, True)
except IndexError:
msg = "Could not find saved image features."
raise RBDDriverException(reason=msg)
diff --git a/doc/source/admin/volume-multiattach.rst b/doc/source/admin/volume-multiattach.rst
index 577a9660e..25fcfa991 100644
--- a/doc/source/admin/volume-multiattach.rst
+++ b/doc/source/admin/volume-multiattach.rst
@@ -36,7 +36,8 @@ In order to be able to attach a volume to multiple server instances you need to
have the 'multiattach' flag set to 'True' in the volume details. Please ensure
you have the right role and policy settings before performing the operation.
-Currently you can create a multiattach volume in two ways.
+The only way to create a multiattach volume is by creating a multiattach volume
+type and using it to create the volume.
.. note::
diff --git a/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst b/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst
index f707ac5eb..d3c4c560c 100644
--- a/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/dell-emc-powermax-driver.rst
@@ -27,22 +27,17 @@ in the backend to perform PowerMax and VMAX storage operations.
While ``PowerMax`` will be used throughout this document, it will be used
to collectively categorize the following supported arrays, PowerMax 2000,
- 8000, VMAX All Flash 250F, 450F, 850F and 950F and VMAX-Hybrid_.
+ 8000, 2500, 8500, VMAX All Flash 250F, 450F, 850F and 950F and VMAX-Hybrid_.
System requirements and licensing
=================================
The Dell PowerMax Cinder driver supports the VMAX-Hybrid_ series,
-VMAX All-Flash series and the PowerMax arrays.
-
-The array operating system software, Solutions Enabler 9.2.2 series, and
-Unisphere for PowerMax 9.2.2 series are required to run Dell PowerMax
-Cinder driver for the Wallaby release. Please refer to support-matrix-table_
-for the support matrix of previous OpenStack versions.
+VMAX All-Flash series and the PowerMax v3 and v4 arrays.
Download Solutions Enabler and Unisphere from the Dell's support web site
-(login is required). See the `Dell Solutions Enabler 9.2.2 Installation
+(login is required). See the `Dell Solutions Enabler Installation
and Configuration Guide` and `Dell Unisphere for PowerMax Installation
Guide` at the `Dell Support`_ site.
@@ -53,26 +48,54 @@ Guide` at the `Dell Support`_ site.
reach out your local PowerMax representative to see if these versions
are still valid.
+ Starting with Antelope, the PowerMax OS version is now aligned with the
+ Unisphere version scheme.
.. _support-matrix-table:
.. table:: PowerMax Management software and OS for OpenStack release
- +-----------+------------------------+-------------+
- | OpenStack | Unisphere for PowerMax | PowerMax OS |
- +===========+========================+=============+
- | Xena | 9.2.2 | 5978.711 |
- +-----------+------------------------+-------------+
- | Wallaby | 9.2.1 | 5978.711 |
- +-----------+------------------------+-------------+
- | Victoria | 9.2.x | 5978.669 |
- +-----------+------------------------+-------------+
- | Ussuri | 9.1.x | 5978.479 |
- +-----------+------------------------+-------------+
- | Train | 9.1.x | 5978.444 |
- +-----------+------------------------+-------------+
- | Stein | 9.0.x | 5978.221 |
- +-----------+------------------------+-------------+
+ +-----------+--------------+-------------+--------------------------------+
+ | OpenStack | Unisphere | PowerMax OS | Supported Arrays |
+ | release | for PowerMax | | |
+ +===========+==============+=============+================================+
+ | Antelope | 10.0.1 | 10.0.1 | PowerMax 2500,8500 |
+ | | | (6079.175) | |
+ | | +-------------+--------------------------------+
+ | | | 5978.711 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ +-----------+--------------+-------------+--------------------------------+
+ | Zed | 9.2.2 | 5978.711 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ +-----------+--------------+-------------+--------------------------------+
+ | Yoga | 9.2.2 | 5978.711 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ | | | | VMAX 100K, 200K, 400K (Hybrid) |
+ +-----------+--------------+-------------+--------------------------------+
+ | Xena | 9.2.2 | 5978.711 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ | | | | VMAX 100K, 200K, 400K (Hybrid) |
+ +-----------+--------------+-------------+--------------------------------+
+ | Wallaby | 9.2.1 | 5978.711 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ | | | | VMAX 100K, 200K, 400K (Hybrid) |
+ +-----------+--------------+-------------+--------------------------------+
+ | Victoria | 9.2.0 | 5978.669 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ | | | | VMAX 100K, 200K, 400K (Hybrid) |
+ +-----------+--------------+-------------+--------------------------------+
+ | Ussuri | 9.1.x | 5978.479 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ | | | | VMAX 100K, 200K, 400K (Hybrid) |
+ +-----------+--------------+-------------+--------------------------------+
+ | Train | 9.1.x | 5978.444 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ | | | | VMAX 100K, 200K, 400K (Hybrid) |
+ +-----------+--------------+-------------+--------------------------------+
+ | Stein | 9.0.x | 5978.221 | PowerMax 2000,8000 |
+ | | | | VMAX 250F, 450F, 850F, 950F |
+ | | | | VMAX 100K, 200K, 400K (Hybrid) |
+ +-----------+--------------+-------------+--------------------------------+
.. note::
@@ -398,6 +421,9 @@ PowerMax driver integration
PowerMax). See ``Dell Solutions Enabler 9.2.1 Installation and
Configuration Guide`` at `Dell Support`_.
+#. Pay attention to the number of Gatekeepers device to have in your
+ environment. It may vary depending on simultaneous call to Unisphere.
+
2. FC zoning with PowerMax
--------------------------
diff --git a/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst b/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst
index 1a4a46a5d..459747a69 100644
--- a/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst
+++ b/doc/source/configuration/block-storage/drivers/pure-storage-driver.rst
@@ -8,12 +8,12 @@ operations.
Support for iSCSI storage protocol is available with the PureISCSIDriver
Volume Driver class, Fibre Channel with the PureFCDriver and
-NVMe-ROCE with the PureNVMEDriver.
+NVMe-ROCE or NVMe-TCP with the PureNVMEDriver.
-iSCSI and Fibre Channel drivers are compatible with Purity FlashArrays
-that support the REST API version 1.6 and higher (Purity 4.7.0 and newer).
-The NVMe driver is compatible with Purity FlashArrays
+iSCSI, Fibre Channel and NVMe-RoCE drivers are compatible with FlashArrays
that support the REST API version 1.16 and higher (Purity 5.2.0 and newer).
+The NVMe-TCP driver is compatible with FlashArrays
+that are running Purity 6.4.2 and higher.
Some features may require newer versions of Purity.
Limitations and known issues
@@ -161,7 +161,7 @@ Pure Storage FlashArray as back-end storage.
NVME connectivity.
If using the NVME driver, specify the ``pure_nvme_transport`` value.
- Currently only ``roce`` is supported.
+ Supported values are ``roce`` or ``tcp``.
IP_PURE_MGMT
The IP address of the Pure Storage array's management interface or a
diff --git a/doc/source/reference/support-matrix.ini b/doc/source/reference/support-matrix.ini
index 7b4b3c38d..dc4da1bcb 100644
--- a/doc/source/reference/support-matrix.ini
+++ b/doc/source/reference/support-matrix.ini
@@ -175,7 +175,7 @@ title=Open-E JovianDSS Storage Driver (iSCSI)
title=ProphetStor Flexvisor Driver (iSCSI, NFS)
[driver.pure]
-title=Pure Storage Driver (iSCSI, FC, NVMe-RoCE)
+title=Pure Storage Driver (iSCSI, FC, NVMe-RoCE, NVMe-TCP)
[driver.qnap]
title=QNAP Storage Driver (iSCSI)
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 000000000..bc1dc04ea
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+ roles:
+ - enable-fips
diff --git a/releasenotes/notes/hpe-3par-code-changes-for-new-wsapi-25865a65a428ce46.yaml b/releasenotes/notes/hpe-3par-code-changes-for-new-wsapi-25865a65a428ce46.yaml
new file mode 100644
index 000000000..82cbc97d9
--- /dev/null
+++ b/releasenotes/notes/hpe-3par-code-changes-for-new-wsapi-25865a65a428ce46.yaml
@@ -0,0 +1,4 @@
+fixes:
+ - |
+ HPE 3PAR driver `Bug #2015746 <https://bugs.launchpad.net/cinder/+bug/2015746>`_:
+ Fixed: minor code changes to work with new wsapi.
diff --git a/releasenotes/notes/hpe-3par-convert-to-base-vol-delete-snap-a460a4b1c419804a.yaml b/releasenotes/notes/hpe-3par-convert-to-base-vol-delete-snap-a460a4b1c419804a.yaml
new file mode 100644
index 000000000..e087e3353
--- /dev/null
+++ b/releasenotes/notes/hpe-3par-convert-to-base-vol-delete-snap-a460a4b1c419804a.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ HPE 3PAR driver `Bug #1994521 <https://bugs.launchpad.net/cinder/+bug/1994521>`_:
+ Fixed: While performing a delete snapshot (s1) operation, the volumes (v2)
+ dependent on the snapshot (s1) are converted to base volumes. This
+ operation fails if these dependent volumes (v2) have their own dependent
+ snapshots (s2). The errors during the failure were vague and not helpful.
+ With this release, we added conditions to fail this operation early and
+ also added useful error message.
+
diff --git a/releasenotes/notes/pure_nvme_tcp-a00efa8966a74f77.yaml b/releasenotes/notes/pure_nvme_tcp-a00efa8966a74f77.yaml
new file mode 100644
index 000000000..f4685aebe
--- /dev/null
+++ b/releasenotes/notes/pure_nvme_tcp-a00efa8966a74f77.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - |
+ Pure Storage FlashArray driver: Added support NVMe-TCP transport layer.
diff --git a/releasenotes/notes/rbd-update-features-bugfix-df97b50864ce9712.yaml b/releasenotes/notes/rbd-update-features-bugfix-df97b50864ce9712.yaml
new file mode 100644
index 000000000..fa066bd17
--- /dev/null
+++ b/releasenotes/notes/rbd-update-features-bugfix-df97b50864ce9712.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ `Bug #1997980 <https://bugs.launchpad.net/cinder/+bug/1997980>`_: RBD:
+ Fixed failure to update rbd image features for multi-attach when
+ features = 0.
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index 31f190235..b69d7d2d9 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -9,11 +9,11 @@ msgid ""
msgstr ""
"Project-Id-Version: Cinder Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2023-02-13 05:20+0000\n"
+"POT-Creation-Date: 2023-05-04 07:54+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2023-02-10 12:09+0000\n"
+"PO-Revision-Date: 2023-05-08 10:03+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -254,8 +254,8 @@ msgstr "15.5.0"
msgid "15.6.0"
msgstr "15.6.0"
-msgid "15.6.0-19"
-msgstr "15.6.0-19"
+msgid "15.6.0-21"
+msgstr "15.6.0-21"
msgid "16.0.0"
msgstr "16.0.0"
@@ -281,8 +281,8 @@ msgstr "16.4.1"
msgid "16.4.2"
msgstr "16.4.2"
-msgid "16.4.2-13"
-msgstr "16.4.2-13"
+msgid "16.4.2-15"
+msgstr "16.4.2-15"
msgid "17.0.0"
msgstr "17.0.0"
@@ -302,8 +302,8 @@ msgstr "17.3.0"
msgid "17.4.0"
msgstr "17.4.0"
-msgid "17.4.0-8"
-msgstr "17.4.0-8"
+msgid "17.4.0-9"
+msgstr "17.4.0-9"
msgid "18.0.0"
msgstr "18.0.0"
@@ -317,6 +317,9 @@ msgstr "18.2.0"
msgid "18.2.1"
msgstr "18.2.1"
+msgid "18.2.1-10"
+msgstr "18.2.1-10"
+
msgid "19.0.0"
msgstr "19.0.0"
@@ -329,8 +332,8 @@ msgstr "19.1.1"
msgid "19.2.0"
msgstr "19.2.0"
-msgid "19.2.0-2"
-msgstr "19.2.0-2"
+msgid "19.3.0"
+msgstr "19.3.0"
msgid ""
"2. The FlexGroup pool has a different view of aggregate capabilites, "
@@ -355,14 +358,26 @@ msgstr "20.0.1"
msgid "20.1.0"
msgstr "20.1.0"
+msgid "20.2.0"
+msgstr "20.2.0"
+
+msgid "2023.1 Series Release Notes"
+msgstr "2023.1 Series Release Notes"
+
msgid "21.0.0"
msgstr "21.0.0"
msgid "21.1.0"
msgstr "21.1.0"
-msgid "21.1.0-1"
-msgstr "21.1.0-1"
+msgid "21.2.0"
+msgstr "21.2.0"
+
+msgid "22.0.0"
+msgstr "22.0.0"
+
+msgid "22.0.0.0rc1-46"
+msgstr "22.0.0.0rc1-46"
msgid ""
"3. The ``utilization`` capability is not calculated to FlexGroup pools, it "
@@ -1088,9 +1103,27 @@ msgstr "Added Datera Multi-Tenancy Support."
msgid "Added Datera Template Support."
msgstr "Added Datera Template Support."
+msgid ""
+"Added Features like Trisync replication support for Pure driver, volume "
+"group snapshot support for IBM SVF driver, Unisphere 10 support for Dell EMC "
+"PowerMax driver, Host assisted migration and retype support for Hitachi VSP "
+"driver."
+msgstr ""
+"Added Features like Trisync replication support for Pure driver, volume "
+"group snapshot support for IBM SVF driver, Unisphere 10 support for Dell EMC "
+"PowerMax driver, Host assisted migration and retype support for Hitachi VSP "
+"driver."
+
msgid "Added HA support for NexentaEdge iSCSI driver"
msgstr "Added HA support for NexentaEdge iSCSI driver"
+msgid ""
+"Added HPE XP iSCSI and FC, Fungible NVMe-TCP, NetApp NVMe-TCP storage "
+"drivers."
+msgstr ""
+"Added HPE XP iSCSI and FC, Fungible NVMe-TCP, NetApp NVMe-TCP storage "
+"drivers."
+
msgid "Added ISCSI based driver for Veritas Access."
msgstr "Added iSCSI based driver for Veritas Access."
@@ -1715,6 +1748,9 @@ msgstr "Added multiple management IP support to Storwize SVC driver."
msgid "Added multiple pools support to Storwize SVC driver."
msgstr "Added multiple pools support to Storwize SVC driver."
+msgid "Added netapp copyoffload provider location."
+msgstr "Added NetApp copyoffload provider location."
+
msgid ""
"Added new APIs on microversion 3.32 to support dynamically changing log "
"levels in Cinder services without restart as well as retrieving current log "
@@ -2553,6 +2589,15 @@ msgstr ""
"volumes to be provisioned at 'thick' even if user had specified 'thin'."
msgid ""
+"An example of the SQL commands to generate these indexes can be found in the "
+"`specific troubleshooting guide <htts://docs.openstack.org/cinder/latest/"
+"admin/ts-db-cpu-spikes.html>`_."
+msgstr ""
+"An example of the SQL commands to generate these indexes can be found in the "
+"`specific troubleshooting guide <htts://docs.openstack.org/cinder/latest/"
+"admin/ts-db-cpu-spikes.html>`_."
+
+msgid ""
"An incorrect lock in the remotefs code, which is used for the NFS driver, "
"and other similar drivers, resulted in concurrent clone volume operations "
"failing. create_cloned_volume now locks on the source volume id, meaning "
@@ -3086,6 +3131,15 @@ msgid "Configrable migration rate in VNX driver via metadata"
msgstr "Configurable migration rate in VNX driver via metadata"
msgid ""
+"Configuration option ``iscsi_secondary_ip_addresses`` is deprecated in favor "
+"of ``target_secondary_ip_addresses`` to follow the same naming convention of "
+"``target_ip_address``."
+msgstr ""
+"Configuration option ``iscsi_secondary_ip_addresses`` is deprecated in "
+"favour of ``target_secondary_ip_addresses`` to follow the same naming "
+"convention of ``target_ip_address``."
+
+msgid ""
"Configuration options for the DRBD driver that will be applied to DRBD "
"resources; the default values should be okay for most installations."
msgstr ""
@@ -3620,6 +3674,22 @@ msgstr ""
msgid "Dell EMC XtremIO driver has added multiattach support."
msgstr "Dell EMC XtremIO driver has added multiattach support."
+msgid ""
+"Dell PowerFlex driver `bug #1998136 <https://bugs.launchpad.net/cinder/"
+"+bug/1998136>`_: When using self signed certificates, the option sent to os-"
+"brick via the connection_properties was not correctly handled. It has now "
+"been fixed by adding the 'verify_certificate' and 'certificate_path' to the "
+"driver when initializing the connection."
+msgstr ""
+"Dell PowerFlex driver `bug #1998136 <https://bugs.launchpad.net/cinder/"
+"+bug/1998136>`_: When using self-signed certificates, the option sent to os-"
+"brick via the connection_properties was not correctly handled. It has now "
+"been fixed by adding the 'verify_certificate' and 'certificate_path' to the "
+"driver when initialising the connection."
+
+msgid "Dell PowerMax driver now supports Unisphere for PowerMax 10.0"
+msgstr "Dell PowerMax driver now supports Unisphere for PowerMax 10.0"
+
msgid "Dell PowerStore driver: Added NVMe-TCP support."
msgstr "Dell PowerStore driver: Added NVMe-TCP support."
@@ -3904,6 +3974,9 @@ msgstr ""
"be used in production until it has been completed and the appropriate "
"release note has been issued stating its readiness for production."
+msgid "Examples:"
+msgstr "Examples:"
+
msgid ""
"Existing vxFlex OS configuration options, whose usage was DEPRECATED in the "
"Stein release, will no longer be recognized in this release. Thus all driver "
@@ -3936,6 +4009,15 @@ msgstr ""
"Extra spec ``RESKEY:availability_zones`` will only be used for filtering "
"backends when creating and retyping volumes."
+msgid "FC driver: 64"
+msgstr "FC driver: 64"
+
+msgid "FC driver: ``HBSD-{host}-{wwn}``"
+msgstr "FC driver: ``HBSD-{host}-{wwn}``"
+
+msgid "FC driver: ``{wwn}``"
+msgstr "FC driver: ``{wwn}``"
+
msgid "FalconStor FSS"
msgstr "FalconStor FSS"
@@ -4745,6 +4827,32 @@ msgstr ""
"environment."
msgid ""
+"HPE 3PAR driver `Bug #1994521 <https://bugs.launchpad.net/cinder/"
+"+bug/1994521>`_: Fixed: While performing a delete snapshot (s1) operation, "
+"the volumes (v2) dependent on the snapshot (s1) are converted to base "
+"volumes. This operation fails if these dependent volumes (v2) have their own "
+"dependent snapshots (s2). The errors during the failure were vague and not "
+"helpful. With this release, we added conditions to fail this operation early "
+"and also added useful error message."
+msgstr ""
+"HPE 3PAR driver `Bug #1994521 <https://bugs.launchpad.net/cinder/"
+"+bug/1994521>`_: Fixed: While performing a delete snapshot (s1) operation, "
+"the volumes (v2) dependent on the snapshot (s1) are converted to base "
+"volumes. This operation fails if these dependent volumes (v2) have their own "
+"dependent snapshots (s2). The errors during the failure were vague and not "
+"helpful. With this release, we added conditions to fail this operation early "
+"and also added a useful error message."
+
+msgid ""
+"HPE 3PAR driver `bug #2008931 <https://bugs.launchpad.net/cinder/"
+"+bug/2008931>`_: Fixed issue when performing migrate volume operation when "
+"`comment` attribute is missing from the volume."
+msgstr ""
+"HPE 3PAR driver `bug #2008931 <https://bugs.launchpad.net/cinder/"
+"+bug/2008931>`_: Fixed issue when performing migrate volume operation when "
+"`comment` attribute is missing from the volume."
+
+msgid ""
"HPE 3PAR driver adds following functionalities Creating thin/dedup "
"compresssed volume. Retype for tpvv/tdvv volumes to be compressed. Migration "
"of compressed volumes. Create compressed volume from compressed volume/"
@@ -4877,6 +4985,13 @@ msgstr ""
"correctly."
msgid ""
+"Hitachi driver: Add a config option ``hitachi_group_name_format`` for "
+"hostgroup name format."
+msgstr ""
+"Hitachi driver: Add a config option ``hitachi_group_name_format`` for "
+"hostgroup name format."
+
+msgid ""
"Hitachi driver: Support AIX as host OS type. When running ``cinder "
"attachment-create`` command with the option ``--ostype aix``, ``AIX`` is set "
"as host OS type."
@@ -4885,6 +5000,23 @@ msgstr ""
"attachment-create`` command with the option ``--ostype aix``, ``AIX`` is set "
"as host OS type."
+msgid ""
+"Hitachi driver: Support Global-Active Device (GAD) volume. GAD is a one of "
+"Hitachi storage fucntion uses volume replication to provide a high-"
+"availability environment for hosts across storage systems and sites. New "
+"properties will be added in configuration. ``hbsd:topology`` sets to "
+"``active_active_mirror_volumex`` would specify a GAD volume. "
+"``hitachi_mirror_xxx`` parameters would specify a secondary storage for GAD "
+"volume."
+msgstr ""
+"Hitachi driver: Support Global-Active Device (GAD) volume. GAD is one of "
+"Hitachi's storage functions that uses volume replication to provide a high-"
+"availability environment for hosts across storage systems and sites. New "
+"properties will be added in the configuration. ``hbsd:topology`` sets to "
+"``active_active_mirror_volumex`` would specify a GAD volume. "
+"``hitachi_mirror_xxx`` parameters would specify a secondary storage for GAD "
+"volume."
+
msgid "Hitachi driver: Add Cinder generic volume groups."
msgstr "Hitachi driver: Add Cinder generic volume groups."
@@ -4924,6 +5056,40 @@ msgstr ""
"parameters are used to create LUN paths during volume attach operations for "
"each volume type."
+msgid ""
+"Hitachi driver: Additionally support following storages, Hitachi VSP E590, "
+"Hitachi VSP E790 and Hitachi VSP E1090."
+msgstr ""
+"Hitachi driver: Additionally supports the following storage, Hitachi VSP "
+"E590, Hitachi VSP E790 and Hitachi VSP E1090."
+
+msgid ""
+"Hitachi driver: Support data deduplication and compression, by storage "
+"assist. The feature can be worked, if user enable deduplication and "
+"compression for the DP-pool, by Configuration Manager REST API, and set the "
+"extra spec ``hbsd:capacity_saving`` to ``deduplication_compression``"
+msgstr ""
+"Hitachi driver: Support data deduplication and compression, by storage "
+"assist. The feature can be worked, if the user enables deduplication and "
+"compression for the DP-pool, by Configuration Manager REST API, and set the "
+"extra spec ``hbsd:capacity_saving`` to ``deduplication_compression``"
+
+msgid ""
+"Hitachi driver: Update retype to different pool and support storage assisted "
+"migration. Storage assisted migration feature is also used when retype a "
+"volume, which doesn't have any snapshots, to different pool."
+msgstr ""
+"Hitachi driver: Update retype to a different pool and support storage-"
+"assisted migration. Storage-assisted migration feature is also used when "
+"retyping a volume, which doesn't have any snapshots, to a different pool."
+
+msgid ""
+"Hitachi, NEC V, HPE XP drivers `bug #2004140 <https://bugs.launchpad.net/"
+"cinder/+bug/2004140>`_: Fixed ``KeyError`` when a backend is down."
+msgstr ""
+"Hitachi, NEC V, HPE XP drivers `bug #2004140 <https://bugs.launchpad.net/"
+"cinder/+bug/2004140>`_: Fixed ``KeyError`` when a backend is down."
+
msgid "Huawei Cinder Driver Support Dorado V6 Storage.(iSCSI, FC)"
msgstr "Huawei Cinder Driver Support Dorado V6 Storage.(iSCSI, FC)"
@@ -5185,6 +5351,22 @@ msgstr ""
"volume-type with different mirror pool"
msgid ""
+"IBM Spectrum Virtualize Family driver: Added `--delete-volumes` flag support "
+"for delete volumegroup operation. After adding support, the volumes can "
+"optionally be deleted when the volume group is deleted."
+msgstr ""
+"IBM Spectrum Virtualise Family driver: Added `--delete-volumes` flag support "
+"for delete volume group operation. After adding support, the volumes can "
+"optionally be deleted when the volume group is deleted."
+
+msgid ""
+"IBM Spectrum Virtualize Family driver: Added `storwize_volume_group` "
+"parameter in the cinder configuration to support volume group feature."
+msgstr ""
+"IBM Spectrum Virtualise Family driver: Added `storwize_volume_group` "
+"parameter in the Cinder configuration to support the volume group feature."
+
+msgid ""
"IBM Spectrum Virtualize Family driver: Added fucntionality that returns "
"throttle rate of maximum IOPS and bandwidth of all VDisks of a specified "
"storage pool."
@@ -5541,6 +5723,20 @@ msgstr ""
"volume-type) will result in a HTTP 500 response."
msgid ""
+"If the length of the name after variable replacement exceeds the maximum "
+"length of host group (iSCSI target) names, the host name is truncated so "
+"that the length of the host groups or iSCSI targets do not exceed the "
+"maximum length."
+msgstr ""
+"If the length of the name after variable replacement exceeds the maximum "
+"length of host group (iSCSI target) names, the hostname is truncated so that "
+"the length of the host groups or iSCSI targets does not exceed the maximum "
+"length."
+
+msgid "If the specified value includes ``{host}``, the following rules apply:"
+msgstr "If the specified value includes ``{host}``, the following rules apply:"
+
+msgid ""
"If using the NetApp ONTAP drivers (7mode/cmode), the configuration value for "
"\"max_over_subscription_ratio\" may need to be increased to avoid scheduling "
"problems where storage pools that previously were valid to schedule new "
@@ -5637,6 +5833,15 @@ msgstr ""
"migrations."
msgid ""
+"If you specify this parameter, it is recommended that you specify ``True`` "
+"for the ``hitachi_group_create`` parameter to collect necessary information "
+"automatically."
+msgstr ""
+"If you specify this parameter, it is recommended that you specify ``True`` "
+"for the ``hitachi_group_create`` parameter to collect necessary information "
+"automatically."
+
+msgid ""
"If your deployment uses ``storage_protocol`` to differentiate between "
"backends that use the same protocol but report it using different variants, "
"be aware that they will no longer be differentiated."
@@ -5803,6 +6008,13 @@ msgstr ""
"python-openstackclient/latest/cli/decoder.html>`_."
msgid ""
+"In the length calculation, use the following values as the length of each "
+"variable:"
+msgstr ""
+"In the length calculation, use the following values as the length of each "
+"variable:"
+
+msgid ""
"In this release, sending ``os-reset_status`` notifications to the following "
"*nonstandard* publisher_ids is DEPRECATED:"
msgstr ""
@@ -6147,6 +6359,15 @@ msgstr ""
"+bug/1964394>`_: Fixed annoying kernel log message when exporting a volume."
msgid ""
+"LVM nvmet target: Added support for new nvmeof connection properties format "
+"(version 2). Controlled with ``nvmeof_conn_info_version`` configuration "
+"option."
+msgstr ""
+"LVM nvmet target: Added support for new nvmeof connection properties format "
+"(version 2). Controlled with ``nvmeof_conn_info_version`` configuration "
+"option."
+
+msgid ""
"Lenovo driver: Return additional configuration options from "
"``get_driver_options`` call"
msgstr ""
@@ -7793,6 +8014,10 @@ msgstr ""
msgid "Pure Storage FlashArray driver has added multiatach support."
msgstr "Pure Storage FlashArray driver has added multiattach support."
+msgid "Pure Storage FlashArray driver: Added support NVMe-TCP transport layer."
+msgstr ""
+"Pure Storage FlashArray driver: Added support NVMe-TCP transport layer."
+
msgid ""
"Pure Storage FlashArray driver: Enabled support for Active/Active "
"replication for the FlashArray driver. This allows users to configure "
@@ -7893,6 +8118,17 @@ msgstr ""
"for FC-based hosts"
msgid ""
+"Pure Storage driver: Added replication capability to backend pool "
+"information. Response will be ```async```, ```sync``` or```trisync```. "
+"```sync``` implies support for ```async``` and ```trisync``` implies support "
+"for ```sync``` and ```async```."
+msgstr ""
+"Pure Storage driver: Added replication capability to backend pool "
+"information. Response will be ```async```, ```sync``` or```trisync```. "
+"```sync``` implies support for ```async``` and ```trisync``` implies support "
+"for ```sync``` and ```async```."
+
+msgid ""
"Pure Storage driver: Added support for 3-site replication, aka trisync. "
"Requires two replication devices to be created, one async and one sync, plus "
"the addition of new parameters ``pure_trisync_enabled`` and "
@@ -8077,6 +8313,19 @@ msgstr ""
"image."
msgid ""
+"RBD driver `bug #1960206 <https://bugs.launchpad.net/cinder/+bug/1960206>`_: "
+"Fixed ``total_capacity`` reported by the driver to the scheduler on Ceph "
+"clusters that have renamed the ``bytes_used`` field to ``stored``. (e.g., "
+"`Nautilus <https://docs.ceph.com/en/nautilus/releases/nautilus/#upgrade-"
+"compatibility-notes>`_)."
+msgstr ""
+"RBD driver `bug #1960206 <https://bugs.launchpad.net/cinder/+bug/1960206>`_: "
+"Fixed ``total_capacity`` reported by the driver to the scheduler on Ceph "
+"clusters that have renamed the ``bytes_used`` field to ``stored``. (e.g., "
+"`Nautilus <https://docs.ceph.com/en/nautilus/releases/nautilus/#upgrade-"
+"compatibility-notes>`_)."
+
+msgid ""
"RBD driver can have bottlenecks if too many slow operations are happening at "
"the same time (for example many huge volume deletions), we can now use the "
"`backend_native_threads_pool_size` option in the RBD driver section to "
@@ -8291,6 +8540,25 @@ msgstr ""
"storwize_svc_iscsi.StorwizeSVCISCSIDriver"
msgid ""
+"Removed the ability to create multiattach volumes by specifying "
+"`multiattach` parameter in the request body of a volume create operation. "
+"This functionality is unsafe, can lead to data loss, and has been deprecated "
+"since the Queens release. The recommended method for creating a multiattach "
+"volume is to use a volume type that supports multiattach. By default, "
+"volume types can only be created by the operator. Users who have a need for "
+"multiattach volumes should contact their operator if a suitable volume type "
+"is not available."
+msgstr ""
+"Removed the ability to create multiattach volumes by specifying "
+"`multiattach` parameter in the request body of a volume create operation. "
+"This functionality is unsafe, can lead to data loss, and has been deprecated "
+"since the Queens release. The recommended method for creating a multiattach "
+"volume is to use a volume type that supports multiattach. By default, "
+"volume types can only be created by the operator. Users who have a need for "
+"multiattach volumes should contact their operator if a suitable volume type "
+"is not available."
+
+msgid ""
"Removed the ability to create volumes in a ScaleIO Storage Pool that has "
"zero-padding disabled. A new configuration option "
"``sio_allow_non_padded_volumes`` has been added to override this new "
@@ -8882,8 +9150,8 @@ msgstr ""
msgid "Supported ``project_id`` admin filters to limits API."
msgstr "Supported ``project_id`` admin filters to limits API."
-msgid "Supported multi-pool for Hitachi driver and OEM storage driver."
-msgstr "Supported multi-pool for Hitachi driver and OEM storage driver."
+msgid "Supported multi-pools for Hitachi driver and OEM storage drivers."
+msgstr "Supported multi-pools for Hitachi driver and OEM storage drivers."
msgid ""
"Swift backup driver: Added new configuration option "
@@ -10795,6 +11063,19 @@ msgstr ""
"successfully only when it returns exit status 0."
msgid ""
+"The ``cinder-manage db sync`` command for this verison of cinder will add "
+"additional database indexes. Depending on database size and complexity, "
+"this will take time to complete for every single index to be created. On "
+"MySQL or MariaDB, these indexes will only be created if an index does not "
+"already exist with the same name:"
+msgstr ""
+"The ``cinder-manage db sync`` command for this verison of cinder will add "
+"additional database indexes. Depending on database size and complexity, "
+"this will take time to complete for every single index to be created. On "
+"MySQL or MariaDB, these indexes will only be created if an index does not "
+"already exist with the same name:"
+
+msgid ""
"The ``cinder.quota.NestedDbQuotaDriver`` quota driver for handling nested "
"projects is now deprecated. There is an OpenStack-wide effort to move to "
"\"unified limits\" that will require changes in how quotas are handled for "
@@ -11482,6 +11763,9 @@ msgstr ""
"in specific circumstances, an operator may need to take some actions outside "
"the normal upgrade process). See the \"Upgrade Notes\" for more information."
+msgid "The maximum length of a specified value is as follows:"
+msgstr "The maximum length of a specified value is as follows:"
+
msgid ""
"The multiattach capability has been enabled and verified as working with the "
"ScaleIO driver. It is the user's responsibility to add some type of "
@@ -11680,6 +11964,12 @@ msgstr ""
"The sample file is YAML (because unlike JSON, YAML allows comments). If you "
"prefer, you may use a JSON policy file."
+msgid "The specified value must include the following variables:"
+msgstr "The specified value must include the following variables:"
+
+msgid "The specified value must start with ``HBSD-``."
+msgstr "The specified value must start with ``HBSD-``."
+
msgid ""
"The storage protocol reporting via the REST API will be now the same for "
"them all, using the preferred naming, FC, NVMe-oF, iSCSI, NFS..."
@@ -11901,6 +12191,9 @@ msgstr ""
"rootwrap/privsep update that could break compatibility when trying to do "
"rolling upgrades of the volume service."
+msgid "This is replaced with the host name of the connecting node."
+msgstr "This is replaced with the host name of the connecting node."
+
msgid ""
"This migration requires the existence of a ``__DEFAULT__`` volume type. If "
"you have renamed (or renamed and deleted) the ``__DEFAULT__`` volume type in "
@@ -12100,6 +12393,13 @@ msgstr ""
"bugs.launchpad.net/cinder/+bug/1823200>`_ in the previous os-brick release."
msgid ""
+"This requires that ``nvmeof_conn_info_version`` configuration option is set "
+"to ``2`` as well."
+msgstr ""
+"This requires that ``nvmeof_conn_info_version`` configuration option is set "
+"to ``2`` as well."
+
+msgid ""
"This setting is *not recommended* by the Cinder project team, as it may "
"allow end users to put a group snapshot into an invalid status with "
"indeterminate consequences."
@@ -12320,6 +12620,13 @@ msgstr ""
"conversion is requested."
msgid ""
+"Usable characters are alphanumerics, \".\", \"@\", \"_\", \":\", \"-\", "
+"\"{\" and \"}\". \"{\" and \"}\" can be used only in variables."
+msgstr ""
+"Usable characters are alphanumerics, \".\", \"@\", \"_\", \":\", \"-\", "
+"\"{\" and \"}\". \"{\" and \"}\" can be used only in variables."
+
+msgid ""
"Use of JSON formatted policy files was deprecated by the ``oslo.policy`` "
"library during the Victoria development cycle. As a result, this deprecation "
"is being noted in the Wallaby cycle with an anticipated future removal of "
@@ -12753,6 +13060,15 @@ msgstr ""
"port."
msgid ""
+"Welcome to the 2023.1 (Antelope) release of the OpenStack Block Storage "
+"service (cinder). With this release, we added several drivers and driver "
+"features as follows:"
+msgstr ""
+"Welcome to the 2023.1 (Antelope) release of the OpenStack Block Storage "
+"service (Cinder). With this release, we added several drivers and driver "
+"features as follows:"
+
+msgid ""
"Welcome to the Ussuri release of the OpenStack Block Storage service "
"(cinder). The cinder team would like to bring the following points to your "
"attention. Details may be found below."
@@ -13020,6 +13336,13 @@ msgstr ""
"which will reduce the load on the Ceph cluster and the volume service."
msgid ""
+"When using this option, users can specify the name format of host groups or "
+"iSCSI targets. Rules of the format:"
+msgstr ""
+"When using this option, users can specify the name format of host groups or "
+"iSCSI targets. Rules of the format:"
+
+msgid ""
"While configuring NetApp cDOT back ends, new configuration options "
"('replication_device' and 'netapp_replication_aggregate_map') must be added "
"in order to use the host-level failover feature."
@@ -13119,6 +13442,12 @@ msgstr "Yadro Tatlin Unified: Added initial version of the iSCSI driver."
msgid "Yoga Series Release Notes"
msgstr "Yoga Series Release Notes"
+msgid "You can use each variable in the specified value no more than once."
+msgstr "You can use each variable in the specified value no more than once."
+
+msgid "You can use the following variables:"
+msgstr "You can use the following variables:"
+
msgid "You upgraded to Train from Stein"
msgstr "You upgraded to Train from Stein"
@@ -14324,6 +14653,15 @@ msgstr ""
"key used on the new volume."
msgid ""
+"`Bug #1952443 <https://bugs.launchpad.net/cinder/+bug/1952443>`_: Improve "
+"performance for creating volume from image, listing volumes, snapshots, "
+"backups, groups, and group_snapshots."
+msgstr ""
+"`Bug #1952443 <https://bugs.launchpad.net/cinder/+bug/1952443>`_: Improve "
+"performance for creating volume from an image, listing volumes, snapshots, "
+"backups, groups, and group_snapshots."
+
+msgid ""
"`Bug #1952805 <https://bugs.launchpad.net/cinder/+bug/1952805>`_: Fixed the "
"cinder-backup posix driver's behavior with multiple backup hosts. Previously "
"cinder-backup would frequently schedule incremental backups on the wrong "
@@ -14341,9 +14679,6 @@ msgstr ""
"`Bug #1953168 <https://bugs.launchpad.net/cinder/+bug/1953168>`_: Fixed "
"missing parameter in the capacity filter log message."
-msgid "`Bug #1953168 <https://bugs.launchpad.net/cinder/+bug/1965952>`_:"
-msgstr "`Bug #1953168 <https://bugs.launchpad.net/cinder/+bug/1965952>`_:"
-
msgid ""
"`Bug #1960019 <https://bugs.launchpad.net/cinder/+bug/1960019>`_: Fixed "
"value of the x-openstack-request-id header when Cinder is using noauth."
@@ -14383,6 +14718,9 @@ msgstr ""
"issue where importing a backup record for a backup_id that currently existed "
"had the unfortunate side effect of deleting the existing backup record."
+msgid "`Bug #1965952 <https://bugs.launchpad.net/cinder/+bug/1965952>`_:"
+msgstr "`Bug #1965952 <https://bugs.launchpad.net/cinder/+bug/1965952>`_:"
+
msgid ""
"`Bug #1966103 <https://bugs.launchpad.net/cinder/+bug/1966103>`_: Fixed "
"inconsistent behavior of ``storage_protocol`` among different backends that "
@@ -14500,6 +14838,46 @@ msgstr ""
"'streamOptimized' and 'monolithicSparse' subformats."
msgid ""
+"`Bug #1997980 <https://bugs.launchpad.net/cinder/+bug/1997980>`_: RBD: Fixed "
+"failure to update rbd image features for multi-attach when features = 0."
+msgstr ""
+"`Bug #1997980 <https://bugs.launchpad.net/cinder/+bug/1997980>`_: RBD: Fixed "
+"failure to update RBD image features for multi-attach when features = 0."
+
+msgid ""
+"`Bug #2007615 <https://bugs.launchpad.net/cinder/+bug/2007615>`_: the "
+"restore operation of the Cinder backup service now restores into sparse "
+"volumes, if possible. So, operators no longer need more space than used "
+"previously when they restore from a disaster."
+msgstr ""
+"`Bug #2007615 <https://bugs.launchpad.net/cinder/+bug/2007615>`_: the "
+"restore operation of the Cinder backup service now restores into sparse "
+"volumes, if possible. So, operators no longer need more space than used "
+"previously when they restore from a disaster."
+
+msgid ""
+"`Bug #2008017 <https://bugs.launchpad.net/cinder/+bug/2008017>`_: Fixed "
+"NetApp NFS driver to never spawn a native thread avoid thread starvation and "
+"other related issues."
+msgstr ""
+"`Bug #2008017 <https://bugs.launchpad.net/cinder/+bug/2008017>`_: Fixed "
+"NetApp NFS driver to never spawn a native thread to avoid thread starvation "
+"and other related issues."
+
+msgid ""
+"`Bug #2008259 <https://bugs.launchpad.net/cinder/+bug/2008259>`_: Fixed the "
+"volume create functionality where non-admin users were able to create "
+"multiattach volumes by providing the `multiattach` parameter in the request "
+"body. Now we can only create multiattach volumes using a multiattach volume "
+"type, which is also the recommended way."
+msgstr ""
+"`Bug #2008259 <https://bugs.launchpad.net/cinder/+bug/2008259>`_: Fixed the "
+"volume create functionality where non-admin users were able to create "
+"multiattach volumes by providing the `multiattach` parameter in the request "
+"body. Now we can only create multiattach volumes using a multiattach volume "
+"type, which is also the recommended way."
+
+msgid ""
"`Bug 1809249 <https://bugs.launchpad.net/cinder/+bug/1809249>`_ - 3PAR "
"driver adds the config option `hpe3par_target_nsp` that can be set to the "
"3PAR backend to use when multipath is not enabled and the Fibre Channel Zone "
@@ -14522,6 +14900,26 @@ msgstr ""
"cluster.mon_command()"
msgid ""
+"`Dell PowerMax Driver Bug #1981420 <https://bugs.launchpad.net/cinder/"
+"+bug/1981420>`_: Fixed issue faced while creating synchronous volume which "
+"was caused by incorrect handling of the force flag. This is corrected by "
+"checking volume type extra specs for the value of \"force_vol_edit\" "
+"parameter along with the \"force\" parameter."
+msgstr ""
+"`Dell PowerMax Driver Bug #1981420 <https://bugs.launchpad.net/cinder/"
+"+bug/1981420>`_: Fixed issue faced while creating synchronous volume which "
+"was caused by incorrect handling of the force flag. This is corrected by "
+"checking volume type extra specs for the value of \"force_vol_edit\" "
+"parameter along with the \"force\" parameter."
+
+msgid ""
+"`FC driver only.` This is replaced with the smallest WWPN of the WWPNs of "
+"the connecting node."
+msgstr ""
+"`FC driver only.` This is replaced with the smallest WWPN of the WWPNs of "
+"the connecting node."
+
+msgid ""
"`PowerMax Driver - Allowing for an empty group on a clone volume <https://"
"review.opendev.org/#/q/I8a39887a2eb1f0a21772525ca4b0d13ab07bd014>`_"
msgstr ""
@@ -14610,6 +15008,9 @@ msgstr ""
"``backup_driver_stats_polling_interval`` to be similar with volume drivers "
"configuration. Old option name support will be dropped in U release."
+msgid "``backups_deleted_project_id_idx``"
+msgstr "``backups_deleted_project_id_idx``"
+
msgid "``choice_client``"
msgstr "``choice_client``"
@@ -14664,6 +15065,12 @@ msgstr "``group:group_types_specs``"
msgid "``group:group_types_specs`` is replaced by:"
msgstr "``group:group_types_specs`` is replaced by:"
+msgid "``group_snapshots_deleted_project_id_idx``"
+msgstr "``group_snapshots_deleted_project_id_idx``"
+
+msgid "``groups_deleted_project_id_idx``"
+msgstr "``groups_deleted_project_id_idx``"
+
msgid ""
"``iscsi_ip_address``, ``iscsi_port``, ``target_helper``, "
"``iscsi_target_prefix`` and ``iscsi_protocol`` config options are deprecated "
@@ -14722,6 +15129,9 @@ msgstr "``scheduler_driver_init_wait_time``"
msgid "``snapshot_check_timeout`` to ``disco_snapshot_check_timeout``"
msgstr "``snapshot_check_timeout`` to ``disco_snapshot_check_timeout``"
+msgid "``snapshots_deleted_project_id_idx``"
+msgstr "``snapshots_deleted_project_id_idx``"
+
msgid ""
"``volume:reimage_reserved`` - users who satisfy this policy may re-image a "
"volume in status ``reserved``"
@@ -14799,11 +15209,35 @@ msgstr "``volume_extension:volume_type_encryption:update``"
msgid "``volume_name_prefix`` to ``disco_volume_name_prefix``"
msgstr "``volume_name_prefix`` to ``disco_volume_name_prefix``"
+msgid "``volumes_deleted_host_idx``"
+msgstr "``volumes_deleted_host_idx``"
+
+msgid "``volumes_deleted_project_id_idx``"
+msgstr "``volumes_deleted_project_id_idx``"
+
+msgid "``{host}``"
+msgstr "``{host}``"
+
+msgid "``{host}``: 1"
+msgstr "``{host}``: 1"
+
+msgid "``{ip}``"
+msgstr "``{ip}``"
+
+msgid "``{ip}``: 15"
+msgstr "``{ip}``: 15"
+
+msgid "``{wwn}``"
+msgstr "``{wwn}``"
+
+msgid "``{wwn}``: 16"
+msgstr "``{wwn}``: 16"
+
msgid ""
"`bug #2000724 <https://bugs.launchpad.net/cinder/+bug/2000724>`_: Handled "
-"the case when glance is calling online extend and externals events were "
-"being sent to nova. Now Cinder will only send external events when the "
-"volume, to be extended, is attached to a nova instance."
+"the case when glance is calling online extend and external events were being "
+"sent to nova. Now Cinder will only send external events when the volume, to "
+"be extended, is attached to a nova instance."
msgstr ""
"`bug #2000724 <https://bugs.launchpad.net/cinder/+bug/2000724>`_: Handled "
"the case when Glance is calling online extend and external events were being "
@@ -14811,14 +15245,25 @@ msgstr ""
"be extended, is attached to a Nova instance."
msgid ""
+"`iSCSI driver only.` This is replaced with the IP address of the connecting "
+"node."
+msgstr ""
+"`iSCSI driver only.` This is replaced with the IP address of the connecting "
+"node."
+
+msgid ""
"a [nova] section is added to configure the connection to the compute "
"service, which is needed to the InstanceLocalityFilter, for example."
msgstr ""
"a [nova] section is added to configure the connection to the compute "
"service, which is needed to the InstanceLocalityFilter, for example."
-msgid "add netapp copyoffload provider location"
-msgstr "add netapp copyoffload provider location"
+msgid ""
+"characters that are not permitted for this parameter, they are replaced with "
+"``_``."
+msgstr ""
+"characters that are not permitted for this parameter, they are replaced with "
+"``_``."
msgid ""
"cinder-backup service is now decoupled from cinder-volume, which allows more "
@@ -14884,6 +15329,15 @@ msgstr "http://cinderstats.ivehearditbothways.com/cireport.txt"
msgid "https://bugs.launchpad.net/os-brick/+bugs?field.tag=nvme"
msgstr "https://bugs.launchpad.net/os-brick/+bugs?field.tag=nvme"
+msgid "iSCSI driver: 32"
+msgstr "iSCSI driver: 32"
+
+msgid "iSCSI driver: ``HBSD-{host}-{ip}``"
+msgstr "iSCSI driver: ``HBSD-{host}-{ip}``"
+
+msgid "iSCSI driver: ``{ip}``"
+msgstr "iSCSI driver: ``{ip}``"
+
msgid ""
"if a ``snapshot_id`` is supplied in the request, the volume type is inferred "
"from the volume type associated with the snapshot"
@@ -14920,6 +15374,39 @@ msgid "nova-compute version - needs to be the latest for Pike."
msgstr "nova-compute version - needs to be the latest for Pike."
msgid ""
+"nvmeof target `bug #1966513 <https://bugs.launchpad.net/cinder/"
+"+bug/1966513>`_: Fixed LVM failing on terminate_connection if the connecting "
+"host doesn't have an iSCSI initiator name setup, for example if LVM is using "
+"the nvmet target."
+msgstr ""
+"nvmeof target `bug #1966513 <https://bugs.launchpad.net/cinder/"
+"+bug/1966513>`_: Fixed LVM failing on terminate_connection if the connecting "
+"host doesn't have an iSCSI initiator name setup, for example if LVM is using "
+"the nvmet target."
+
+msgid ""
+"nvmet target driver: Added support for shared subsystems/targets using the "
+"``lvm_share_target`` configuration option. Defaults to non shared, e.g., "
+"each volume has its own subsystem/target."
+msgstr ""
+"nvmet target driver: Added support for shared subsystems/targets using the "
+"``lvm_share_target`` configuration option. Defaults to non shared, e.g., "
+"each volume has its own subsystem/target."
+
+msgid ""
+"nvmet target driver: Added support to serve volumes on multiple addresses "
+"using the ``target_secondary_ip_addresses`` configuration option. This "
+"allows os-brick to iterate through them in search of one connection that "
+"works, and once os-brick supports NVMe-oF multipathing it will be "
+"automatically supported."
+msgstr ""
+"nvmet target driver: Added support to serve volumes on multiple addresses "
+"using the ``target_secondary_ip_addresses`` configuration option. This "
+"allows os-brick to iterate through them in search of one connection that "
+"works, and once os-brick supports NVMe-oF multipathing it will be "
+"automatically supported."
+
+msgid ""
"only iscsi and fibre channel volume types are supported on the nova side "
"currently."
msgstr ""
diff --git a/requirements.txt b/requirements.txt
index a70876429..d9d68f928 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -40,7 +40,7 @@ pyparsing>=2.4.7 # MIT
python-barbicanclient>=5.0.1 # Apache-2.0
python-glanceclient>=3.2.2 # Apache-2.0
python-keystoneclient>=4.1.1 # Apache-2.0
-python-novaclient>=17.2.1 # Apache-2.0
+python-novaclient>=18.2.0 # Apache-2.0
python-swiftclient>=3.10.1 # Apache-2.0
pytz>=2020.1 # MIT
requests>=2.25.1 # Apache-2.0
diff --git a/test-requirements.txt b/test-requirements.txt
index 0097afc52..64762684e 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -18,5 +18,5 @@ SQLAlchemy-Utils>=0.37.8 # BSD License
testtools>=2.4.0 # MIT
doc8>=0.8.1 # Apache-2.0
-mypy>=1.0 # MIT
+mypy>=1.2.0 # MIT
moto>=2.2.5 # Apache-2.0
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
index 5b986ced3..fced9be5e 100755
--- a/tools/test-setup.sh
+++ b/tools/test-setup.sh
@@ -15,6 +15,47 @@ DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave}
DB_USER=openstack_citest
DB_PW=openstack_citest
+function is_rhel7 {
+ [ -f /usr/bin/yum ] && \
+ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \
+ cat /etc/*release | grep -q 'release 7'
+}
+
+function is_rhel8 {
+ [ -f /usr/bin/dnf ] && \
+ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \
+ cat /etc/*release | grep -q 'release 8'
+}
+
+function is_rhel9 {
+ [ -f /usr/bin/dnf ] && \
+ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \
+ cat /etc/*release | grep -q 'release 9'
+}
+
+function set_conf_line { # file regex value
+ sudo sh -c "grep -q -e '$2' $1 && \
+ sed -i 's|$2|$3|g' $1 || \
+ echo '$3' >> $1"
+}
+
+if is_rhel7 || is_rhel8 || is_rhel9; then
+ # mysql needs to be started on centos/rhel
+ sudo systemctl restart mariadb.service
+
+ # postgres setup for centos
+ sudo postgresql-setup --initdb
+ PG_CONF=/var/lib/pgsql/data/postgresql.conf
+ set_conf_line $PG_CONF '^password_encryption =.*' 'password_encryption = scram-sha-256'
+
+ PG_HBA=/var/lib/pgsql/data/pg_hba.conf
+ set_conf_line $PG_HBA '^local[ \t]*all[ \t]*all.*' 'local all all peer'
+ set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*127.0.0.1\/32.*' 'host all all 127.0.0.1/32 scram-sha-256'
+ set_conf_line $PG_HBA '^host[ \t]*all[ \t]*all[ \t]*::1\/128.*' 'host all all ::1/128 scram-sha-256'
+
+ sudo systemctl restart postgresql.service
+fi
+
sudo -H mysqladmin -u root password $DB_ROOT_PW
# It's best practice to remove anonymous users from the database. If