summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml2
-rw-r--r--doc/source/conf.py7
-rw-r--r--glance_store/_drivers/cinder.py206
-rw-r--r--glance_store/_drivers/s3.py2
-rw-r--r--glance_store/_drivers/swift/connection_manager.py9
-rw-r--r--glance_store/_drivers/vmware_datastore.py2
-rw-r--r--glance_store/common/cinder_utils.py193
-rw-r--r--glance_store/tests/functional/README.rst2
-rw-r--r--glance_store/tests/unit/common/test_cinder_utils.py157
-rw-r--r--glance_store/tests/unit/test_cinder_store.py121
-rw-r--r--glance_store/tests/unit/test_connection_manager.py1
-rw-r--r--glance_store/tests/unit/test_multistore_cinder.py131
-rw-r--r--glance_store/tests/unit/test_multistore_s3.py64
-rw-r--r--glance_store/tests/unit/test_opts.py2
-rw-r--r--glance_store/tests/unit/test_s3_store.py55
-rw-r--r--glance_store/tests/unit/test_swift_store.py25
-rw-r--r--lower-constraints.txt87
-rw-r--r--releasenotes/notes/cinder-nfs-block-qcow2-vol-4fed58b0afafc980.yaml6
-rw-r--r--releasenotes/notes/support-cinder-user-domain-420c76053dd50534.yaml10
-rw-r--r--setup.cfg2
-rw-r--r--test-requirements.txt3
-rw-r--r--tox.ini7
22 files changed, 850 insertions, 244 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 2d45156..dca51df 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -156,7 +156,6 @@
templates:
- check-requirements
- lib-forward-testing-python3
- - openstack-lower-constraints-jobs
- openstack-python3-xena-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
@@ -170,7 +169,6 @@
- ^releasenotes/.*$
- ^.*\.rst$
- ^(test-|)requirements.txt$
- - ^lower-constraints.txt$
- ^setup.cfg$
- ^tox.ini$
experimental:
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 582d001..e172918 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -95,4 +95,9 @@ latex_documents = [
# It would never happen in a real scenario as it is only imported
# from cinder store after the config are loaded but to handle doc
# failures, we mock it here.
-autodoc_mock_imports = ['glance_store.common.fs_mount'] \ No newline at end of file
+# The cinder_utils module imports external dependencies like
+# cinderclient, retrying etc which are not recognized by
+# autodoc, hence, are mocked here. These dependencies are installed
+# during an actual deployment and won't cause any issue during usage.
+autodoc_mock_imports = ['glance_store.common.fs_mount',
+ 'glance_store.common.cinder_utils']
diff --git a/glance_store/_drivers/cinder.py b/glance_store/_drivers/cinder.py
index 2db8e9b..d62feb3 100644
--- a/glance_store/_drivers/cinder.py
+++ b/glance_store/_drivers/cinder.py
@@ -25,11 +25,15 @@ import time
from keystoneauth1.access import service_catalog as keystone_sc
from keystoneauth1 import exceptions as keystone_exc
+from keystoneauth1 import identity as ksa_identity
+from keystoneauth1 import session as ksa_session
+from keystoneauth1 import token_endpoint as ksa_token_endpoint
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
from glance_store import capabilities
+from glance_store.common import cinder_utils
from glance_store.common import utils
import glance_store.driver
from glance_store import exceptions
@@ -37,6 +41,7 @@ from glance_store.i18n import _, _LE, _LI, _LW
import glance_store.location
try:
+ from cinderclient import api_versions
from cinderclient import exceptions as cinder_exception
from cinderclient.v3 import client as cinderclient
from os_brick.initiator import connector
@@ -79,6 +84,8 @@ Related options:
* cinder_store_user_name
* cinder_store_project_name
* cinder_store_password
+ * cinder_store_project_domain_name
+ * cinder_store_user_domain_name
"""),
cfg.StrOpt('cinder_endpoint_template',
@@ -104,6 +111,8 @@ Related options:
* cinder_store_user_name
* cinder_store_project_name
* cinder_store_password
+ * cinder_store_project_domain_name
+ * cinder_store_user_domain_name
* cinder_catalog_info
"""),
@@ -215,6 +224,8 @@ Related options:
* cinder_store_user_name
* cinder_store_password
* cinder_store_project_name
+ * cinder_store_project_domain_name
+ * cinder_store_user_domain_name
"""),
cfg.StrOpt('cinder_store_user_name',
@@ -222,8 +233,9 @@ Related options:
help="""
User name to authenticate against cinder.
-This must be used with all the following related options. If any of these are
-not specified, the user of the current context is used.
+This must be used with all the following non-domain-related options.
+If any of these are not specified (except domain-related options),
+the user of the current context is used.
Possible values:
* A valid user name
@@ -232,14 +244,33 @@ Related options:
* cinder_store_auth_address
* cinder_store_password
* cinder_store_project_name
+ * cinder_store_project_domain_name
+ * cinder_store_user_domain_name
+
+"""),
+ cfg.StrOpt('cinder_store_user_domain_name',
+ default='Default',
+ help="""
+Domain of the user to authenticate against cinder.
+
+Possible values:
+ * A valid domain name for the user specified by ``cinder_store_user_name``
+
+Related options:
+ * cinder_store_auth_address
+ * cinder_store_password
+ * cinder_store_project_name
+ * cinder_store_project_domain_name
+ * cinder_store_user_name
"""),
cfg.StrOpt('cinder_store_password', secret=True,
help="""
Password for the user authenticating against cinder.
-This must be used with all the following related options. If any of these are
-not specified, the user of the current context is used.
+This must be used with all the following related options.
+If any of these are not specified (except domain-related options),
+the user of the current context is used.
Possible values:
* A valid password for the user specified by ``cinder_store_user_name``
@@ -248,6 +279,8 @@ Related options:
* cinder_store_auth_address
* cinder_store_user_name
* cinder_store_project_name
+ * cinder_store_project_domain_name
+ * cinder_store_user_domain_name
"""),
cfg.StrOpt('cinder_store_project_name',
@@ -258,8 +291,9 @@ Project name where the image volume is stored in cinder.
If this configuration option is not set, the project in current context is
used.
-This must be used with all the following related options. If any of these are
-not specified, the project of the current context is used.
+This must be used with all the following related options.
+If any of these are not specified (except domain-related options),
+the user of the current context is used.
Possible values:
* A valid project name
@@ -268,6 +302,25 @@ Related options:
* ``cinder_store_auth_address``
* ``cinder_store_user_name``
* ``cinder_store_password``
+ * ``cinder_store_project_domain_name``
+ * ``cinder_store_user_domain_name``
+
+"""),
+ cfg.StrOpt('cinder_store_project_domain_name',
+ default='Default',
+ help="""
+Domain of the project where the image volume is stored in cinder.
+
+Possible values:
+ * A valid domain name of the project specified by
+ ``cinder_store_project_name``
+
+Related options:
+ * ``cinder_store_auth_address``
+ * ``cinder_store_user_name``
+ * ``cinder_store_password``
+ * ``cinder_store_project_domain_name``
+ * ``cinder_store_user_domain_name``
"""),
cfg.StrOpt('rootwrap_config',
@@ -350,6 +403,34 @@ Possible values:
"""),
]
+CINDER_SESSION = None
+
+
+def _reset_cinder_session():
+ global CINDER_SESSION
+ CINDER_SESSION = None
+
+
+def get_cinder_session(conf):
+ global CINDER_SESSION
+ if not CINDER_SESSION:
+ auth = ksa_identity.V3Password(
+ password=conf.cinder_store_password,
+ username=conf.cinder_store_user_name,
+ user_domain_name=conf.cinder_store_user_domain_name,
+ project_name=conf.cinder_store_project_name,
+ project_domain_name=conf.cinder_store_project_domain_name,
+ auth_url=conf.cinder_store_auth_address
+ )
+ if conf.cinder_api_insecure:
+ verify = False
+ elif conf.cinder_ca_certificates_file:
+ verify = conf.cinder_ca_certificates_file
+ else:
+ verify = True
+ CINDER_SESSION = ksa_session.Session(auth=auth, verify=verify)
+ return CINDER_SESSION
+
class StoreLocation(glance_store.location.StoreLocation):
@@ -397,6 +478,7 @@ class Store(glance_store.driver.Store):
self.store_conf = getattr(self.conf, self.backend_group)
else:
self.store_conf = self.conf.glance_store
+ self.volume_api = cinder_utils.API()
def _set_url_prefix(self):
self._url_prefix = "cinder://"
@@ -466,7 +548,8 @@ class Store(glance_store.driver.Store):
for key in ['user_name', 'password',
'project_name', 'auth_address']])
- def get_cinderclient(self, context=None, legacy_update=False):
+ def get_cinderclient(self, context=None, legacy_update=False,
+ version='3.0'):
# NOTE: For legacy image update from single store to multiple
# stores we need to use admin context rather than user provided
# credentials
@@ -476,15 +559,18 @@ class Store(glance_store.driver.Store):
else:
user_overriden = self.is_user_overriden()
+ session = get_cinder_session(self.store_conf)
+
if user_overriden:
username = self.store_conf.cinder_store_user_name
- password = self.store_conf.cinder_store_password
- project = self.store_conf.cinder_store_project_name
url = self.store_conf.cinder_store_auth_address
+ # use auth that is already in the session
+ auth = None
else:
username = context.user_id
- password = context.auth_token
project = context.project_id
+ # noauth extracts user_id:project_id from auth_token
+ token = context.auth_token or '%s:%s' % (username, project)
if self.store_conf.cinder_endpoint_template:
template = self.store_conf.cinder_endpoint_template
@@ -504,23 +590,19 @@ class Store(glance_store.driver.Store):
reason = _("Failed to find Cinder from a service catalog.")
raise exceptions.BadStoreConfiguration(store_name="cinder",
reason=reason)
+ auth = ksa_token_endpoint.Token(endpoint=url, token=token)
+ api_version = api_versions.APIVersion(version)
c = cinderclient.Client(
- username, password, project, auth_url=url,
+ session=session, auth=auth,
region_name=self.store_conf.cinder_os_region_name,
- insecure=self.store_conf.cinder_api_insecure,
retries=self.store_conf.cinder_http_retries,
- cacert=self.store_conf.cinder_ca_certificates_file)
+ api_version=api_version)
LOG.debug(
'Cinderclient connection created for user %(user)s using URL: '
'%(url)s.', {'user': username, 'url': url})
- # noauth extracts user_id:project_id from auth_token
- if not user_overriden:
- c.client.auth_token = context.auth_token or '%s:%s' % (username,
- project)
- c.client.management_url = url
return c
@contextlib.contextmanager
@@ -612,52 +694,57 @@ class Store(glance_store.driver.Store):
use_multipath = self.store_conf.cinder_use_multipath
enforce_multipath = self.store_conf.cinder_enforce_multipath
mount_point_base = self.store_conf.cinder_mount_point_base
+ volume_id = volume.id
- properties = connector.get_connector_properties(
+ connector_prop = connector.get_connector_properties(
root_helper, host, use_multipath, enforce_multipath)
- try:
- volume.reserve(volume)
- except cinder_exception.ClientException as e:
- msg = (_('Failed to reserve volume %(volume_id)s: %(error)s')
- % {'volume_id': volume.id, 'error': e})
- LOG.error(msg)
- raise exceptions.BackendException(msg)
+ attachment = self.volume_api.attachment_create(client, volume_id,
+ mode=attach_mode)
+ attachment = self.volume_api.attachment_update(
+ client, attachment['id'], connector_prop,
+ mountpoint='glance_store')
+ self.volume_api.attachment_complete(client, attachment.id)
+ volume = volume.manager.get(volume_id)
+ connection_info = attachment.connection_info
try:
- connection_info = volume.initialize_connection(volume, properties)
conn = connector.InitiatorConnector.factory(
connection_info['driver_volume_type'], root_helper,
conn=connection_info, use_multipath=use_multipath)
if connection_info['driver_volume_type'] == 'nfs':
- if volume.encrypted:
- volume.unreserve(volume)
- volume.delete()
- msg = (_('Encrypted volume creation for cinder nfs is not '
- 'supported from glance_store. Failed to create '
- 'volume %(volume_id)s')
- % {'volume_id': volume.id})
+ # The format info of nfs volumes is exposed via attachment_get
+ # API hence it is not available in the connection info of
+ # attachment object received from attachment_update and we
+ # need to do this call
+ vol_attachment = self.volume_api.attachment_get(
+ client, attachment.id)
+ if (volume.encrypted or
+ vol_attachment.connection_info['format'] == 'qcow2'):
+ issue_type = 'Encrypted' if volume.encrypted else 'qcow2'
+ msg = (_('%(issue_type)s volume creation for cinder nfs '
+ 'is not supported from glance_store. Failed to '
+ 'create volume %(volume_id)s')
+ % {'issue_type': issue_type,
+ 'volume_id': volume_id})
LOG.error(msg)
raise exceptions.BackendException(msg)
- @utils.synchronized(connection_info['data']['export'])
+ @utils.synchronized(connection_info['export'])
def connect_volume_nfs():
- data = connection_info['data']
- export = data['export']
- vol_name = data['name']
+ export = connection_info['export']
+ vol_name = connection_info['name']
mountpoint = self._get_mount_path(
export,
os.path.join(mount_point_base, 'nfs'))
- options = data['options']
+ options = connection_info['options']
self.mount.mount(
'nfs', export, vol_name, mountpoint, host,
root_helper, options)
return {'path': os.path.join(mountpoint, vol_name)}
device = connect_volume_nfs()
else:
- device = conn.connect_volume(connection_info['data'])
- volume.attach(None, 'glance_store', attach_mode, host_name=host)
- volume = self._wait_volume_status(volume, 'attaching', 'in-use')
+ device = conn.connect_volume(connection_info)
if (connection_info['driver_volume_type'] == 'rbd' and
not conn.do_local_attach):
yield device['path']
@@ -670,38 +757,23 @@ class Store(glance_store.driver.Store):
'%(volume_id)s.'), {'volume_id': volume.id})
raise
finally:
- if volume.status == 'in-use':
- volume.begin_detaching(volume)
- elif volume.status == 'attaching':
- volume.unreserve(volume)
-
if device:
try:
if connection_info['driver_volume_type'] == 'nfs':
- @utils.synchronized(connection_info['data']['export'])
+ @utils.synchronized(connection_info['export'])
def disconnect_volume_nfs():
path, vol_name = device['path'].rsplit('/', 1)
self.mount.umount(vol_name, path, host,
root_helper)
disconnect_volume_nfs()
else:
- conn.disconnect_volume(connection_info['data'], device)
+ conn.disconnect_volume(connection_info, device)
except Exception:
LOG.exception(_LE('Failed to disconnect volume '
'%(volume_id)s.'),
{'volume_id': volume.id})
- try:
- volume.terminate_connection(volume, properties)
- except Exception:
- LOG.exception(_LE('Failed to terminate connection of volume '
- '%(volume_id)s.'), {'volume_id': volume.id})
-
- try:
- client.volumes.detach(volume)
- except Exception:
- LOG.exception(_LE('Failed to detach volume %(volume_id)s.'),
- {'volume_id': volume.id})
+ self.volume_api.attachment_delete(client, attachment.id)
def _cinder_volume_data_iterator(self, client, volume, max_size, offset=0,
chunk_size=None, partial_length=None):
@@ -748,7 +820,7 @@ class Store(glance_store.driver.Store):
loc = location.store_location
self._check_context(context)
try:
- client = self.get_cinderclient(context)
+ client = self.get_cinderclient(context, version='3.54')
volume = client.volumes.get(loc.volume_id)
size = int(volume.metadata.get('image_size',
volume.size * units.Gi))
@@ -816,7 +888,7 @@ class Store(glance_store.driver.Store):
"""
self._check_context(context, require_tenant=True)
- client = self.get_cinderclient(context)
+ client = self.get_cinderclient(context, version='3.54')
os_hash_value = utils.get_hasher(hashing_algo, False)
checksum = utils.get_hasher('md5', False)
bytes_written = 0
@@ -838,9 +910,9 @@ class Store(glance_store.driver.Store):
"resize-before-write for each GB which "
"will be considerably slower than normal."))
try:
- volume = client.volumes.create(size_gb, name=name,
- metadata=metadata,
- volume_type=volume_type)
+ volume = self.volume_api.create(client, size_gb, name=name,
+ metadata=metadata,
+ volume_type=volume_type)
except cinder_exception.NotFound:
LOG.error(_LE("Invalid volume type %s configured. Please check "
"the `cinder_volume_type` configuration parameter."
@@ -949,9 +1021,9 @@ class Store(glance_store.driver.Store):
"""
loc = location.store_location
self._check_context(context)
+ client = self.get_cinderclient(context)
try:
- volume = self.get_cinderclient(context).volumes.get(loc.volume_id)
- volume.delete()
+ self.volume_api.delete(client, loc.volume_id)
except cinder_exception.NotFound:
raise exceptions.NotFound(image=loc.volume_id)
except cinder_exception.ClientException as e:
diff --git a/glance_store/_drivers/s3.py b/glance_store/_drivers/s3.py
index 1c18531..6eeba86 100644
--- a/glance_store/_drivers/s3.py
+++ b/glance_store/_drivers/s3.py
@@ -361,7 +361,7 @@ class Store(glance_store.driver.Store):
EXAMPLE_URL = "s3://<ACCESS_KEY>:<SECRET_KEY>@<S3_URL>/<BUCKET>/<OBJ>"
READ_CHUNKSIZE = 64 * units.Ki
- WRITE_CHUNKSIZE = READ_CHUNKSIZE
+ WRITE_CHUNKSIZE = 5 * units.Mi
@staticmethod
def get_schemes():
diff --git a/glance_store/_drivers/swift/connection_manager.py b/glance_store/_drivers/swift/connection_manager.py
index 56fa295..0d9ab24 100644
--- a/glance_store/_drivers/swift/connection_manager.py
+++ b/glance_store/_drivers/swift/connection_manager.py
@@ -143,11 +143,16 @@ class SingleTenantConnectionManager(SwiftConnectionManager):
def _get_storage_url(self):
"""Get swift endpoint from keystone
- Return endpoint for swift from service catalog. The method works only
- Keystone v3. If you are using different version (1 or 2)
+ Return endpoint for swift from service catalog if not overridden in
+ store configuration. The method works only Keystone v3.
+ If you are using different version (1 or 2)
it returns None.
:return: swift endpoint
"""
+
+ if self.store.conf_endpoint:
+ return self.store.conf_endpoint
+
if self.store.auth_version == '3':
try:
return self.client.session.get_endpoint(
diff --git a/glance_store/_drivers/vmware_datastore.py b/glance_store/_drivers/vmware_datastore.py
index 52ec77a..784f157 100644
--- a/glance_store/_drivers/vmware_datastore.py
+++ b/glance_store/_drivers/vmware_datastore.py
@@ -564,7 +564,7 @@ class Store(glance_store.Store):
"""Build ESX host session cookie header."""
if verify_session and not self.session.is_current_session_active():
self.reset_session()
- vim_cookies = self.session.vim.client.options.transport.cookiejar
+ vim_cookies = self.session.vim.client.cookiejar
if len(list(vim_cookies)) > 0:
cookie = list(vim_cookies)[0]
return cookie.name + '=' + cookie.value
diff --git a/glance_store/common/cinder_utils.py b/glance_store/common/cinder_utils.py
new file mode 100644
index 0000000..d66b783
--- /dev/null
+++ b/glance_store/common/cinder_utils.py
@@ -0,0 +1,193 @@
+# Copyright 2021 RedHat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+
+from cinderclient.apiclient import exceptions as apiclient_exception
+from cinderclient import exceptions as cinder_exception
+from keystoneauth1 import exceptions as keystone_exc
+from oslo_utils import excutils
+import retrying
+
+from glance_store import exceptions
+from glance_store.i18n import _LE
+
+LOG = logging.getLogger(__name__)
+
+
+def handle_exceptions(method):
+ """Transforms the exception for the volume but keeps its traceback intact.
+ """
+ def wrapper(self, ctx, volume_id, *args, **kwargs):
+ try:
+ res = method(self, ctx, volume_id, *args, **kwargs)
+ except (keystone_exc.NotFound,
+ cinder_exception.NotFound,
+ cinder_exception.OverLimit) as e:
+ raise exceptions.BackendException(str(e))
+ return res
+ return wrapper
+
+
+def _retry_on_internal_server_error(e):
+ if isinstance(e, apiclient_exception.InternalServerError):
+ return True
+ return False
+
+
+class API(object):
+ """API for interacting with the cinder."""
+
+ @handle_exceptions
+ def create(self, client, size, name,
+ volume_type=None, metadata=None):
+
+ kwargs = dict(volume_type=volume_type,
+ metadata=metadata,
+ name=name)
+
+ volume = client.volumes.create(size, **kwargs)
+ return volume
+
+ def delete(self, client, volume_id):
+ client.volumes.delete(volume_id)
+
+ @handle_exceptions
+ def attachment_create(self, client, volume_id, connector=None,
+ mountpoint=None, mode=None):
+ """Create a volume attachment. This requires microversion >= 3.54.
+
+ The attachment_create call was introduced in microversion 3.27. We
+ need 3.54 as minimum here as we need attachment_complete to finish the
+ attaching process and it which was introduced in version 3.44 and
+ we also pass the attach mode which was introduced in version 3.54.
+
+ :param client: cinderclient object
+ :param volume_id: UUID of the volume on which to create the attachment.
+ :param connector: host connector dict; if None, the attachment will
+ be 'reserved' but not yet attached.
+ :param mountpoint: Optional mount device name for the attachment,
+ e.g. "/dev/vdb". This is only used if a connector is provided.
+ :param mode: The mode in which the attachment is made i.e.
+ read only(ro) or read/write(rw)
+ :returns: a dict created from the
+ cinderclient.v3.attachments.VolumeAttachment object with a backward
+ compatible connection_info dict
+ """
+ if connector and mountpoint and 'mountpoint' not in connector:
+ connector['mountpoint'] = mountpoint
+
+ try:
+ attachment_ref = client.attachments.create(
+ volume_id, connector, mode=mode)
+ return attachment_ref
+ except cinder_exception.ClientException as ex:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Create attachment failed for volume '
+ '%(volume_id)s. Error: %(msg)s Code: %(code)s'),
+ {'volume_id': volume_id,
+ 'msg': str(ex),
+ 'code': getattr(ex, 'code', None)})
+
+ @handle_exceptions
+ def attachment_get(self, client, attachment_id):
+ """Gets a volume attachment.
+
+ :param client: cinderclient object
+ :param attachment_id: UUID of the volume attachment to get.
+ :returns: a dict created from the
+ cinderclient.v3.attachments.VolumeAttachment object with a backward
+ compatible connection_info dict
+ """
+ try:
+ attachment_ref = client.attachments.show(
+ attachment_id)
+ return attachment_ref
+ except cinder_exception.ClientException as ex:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Show attachment failed for attachment '
+ '%(id)s. Error: %(msg)s Code: %(code)s'),
+ {'id': attachment_id,
+ 'msg': str(ex),
+ 'code': getattr(ex, 'code', None)})
+
+ @handle_exceptions
+ def attachment_update(self, client, attachment_id, connector,
+ mountpoint=None):
+ """Updates the connector on the volume attachment. An attachment
+ without a connector is considered reserved but not fully attached.
+
+ :param client: cinderclient object
+ :param attachment_id: UUID of the volume attachment to update.
+ :param connector: host connector dict. This is required when updating
+ a volume attachment. To terminate a connection, the volume
+ attachment for that connection must be deleted.
+ :param mountpoint: Optional mount device name for the attachment,
+ e.g. "/dev/vdb". Theoretically this is optional per volume backend,
+ but in practice it's normally required so it's best to always
+ provide a value.
+ :returns: a dict created from the
+ cinderclient.v3.attachments.VolumeAttachment object with a backward
+ compatible connection_info dict
+ """
+ if mountpoint and 'mountpoint' not in connector:
+ connector['mountpoint'] = mountpoint
+
+ try:
+ attachment_ref = client.attachments.update(
+ attachment_id, connector)
+ return attachment_ref
+ except cinder_exception.ClientException as ex:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Update attachment failed for attachment '
+ '%(id)s. Error: %(msg)s Code: %(code)s'),
+ {'id': attachment_id,
+ 'msg': str(ex),
+ 'code': getattr(ex, 'code', None)})
+
+ @handle_exceptions
+ def attachment_complete(self, client, attachment_id):
+ """Marks a volume attachment complete.
+
+ This call should be used to inform Cinder that a volume attachment is
+ fully connected on the host so Cinder can apply the necessary state
+ changes to the volume info in its database.
+
+ :param client: cinderclient object
+ :param attachment_id: UUID of the volume attachment to update.
+ """
+ try:
+ client.attachments.complete(attachment_id)
+ except cinder_exception.ClientException as ex:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Complete attachment failed for attachment '
+ '%(id)s. Error: %(msg)s Code: %(code)s'),
+ {'id': attachment_id,
+ 'msg': str(ex),
+ 'code': getattr(ex, 'code', None)})
+
+ @handle_exceptions
+ @retrying.retry(stop_max_attempt_number=5,
+ retry_on_exception=_retry_on_internal_server_error)
+ def attachment_delete(self, client, attachment_id):
+ try:
+ client.attachments.delete(attachment_id)
+ except cinder_exception.ClientException as ex:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Delete attachment failed for attachment '
+ '%(id)s. Error: %(msg)s Code: %(code)s'),
+ {'id': attachment_id,
+ 'msg': str(ex),
+ 'code': getattr(ex, 'code', None)})
diff --git a/glance_store/tests/functional/README.rst b/glance_store/tests/functional/README.rst
index 7e0bd09..55ab8ef 100644
--- a/glance_store/tests/functional/README.rst
+++ b/glance_store/tests/functional/README.rst
@@ -54,7 +54,7 @@ format::
username: admin
identity_api_version: '3'
region_name: RegionOne
- volume_api_version: '2'
+ volume_api_version: '3'
The clouds.yaml format allows for a set of credentials to be defined for each
named cloud. By default, the tests will use the credentials for the cloud
diff --git a/glance_store/tests/unit/common/test_cinder_utils.py b/glance_store/tests/unit/common/test_cinder_utils.py
new file mode 100644
index 0000000..2acfaac
--- /dev/null
+++ b/glance_store/tests/unit/common/test_cinder_utils.py
@@ -0,0 +1,157 @@
+# Copyright 2021 RedHat Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+import uuid
+
+from cinderclient.apiclient import exceptions as apiclient_exception
+from cinderclient import exceptions as cinder_exception
+from oslo_config import cfg
+from oslotest import base
+
+from glance_store.common import cinder_utils
+
+CONF = cfg.CONF
+
+
+class FakeObject(object):
+ def __init__(self, **kwargs):
+ for name, value in kwargs.items():
+ setattr(self, name, value)
+
+
+class CinderUtilsTestCase(base.BaseTestCase):
+
+ def setUp(self):
+ super(CinderUtilsTestCase, self).setUp()
+ CONF.register_opt(cfg.DictOpt('enabled_backends'))
+ CONF.set_override('enabled_backends', 'fake:cinder')
+ self.volume_api = cinder_utils.API()
+ self.fake_client = FakeObject(attachments=FakeObject(
+ create=mock.MagicMock(), delete=mock.MagicMock(),
+ complete=mock.MagicMock(), update=mock.MagicMock(),
+ show=mock.MagicMock()))
+ self.fake_vol_id = uuid.uuid4()
+ self.fake_attach_id = uuid.uuid4()
+ self.fake_connector = {
+ 'platform': 'x86_64', 'os_type': 'linux', 'ip': 'fake_ip',
+ 'host': 'fake_host', 'multipath': False,
+ 'initiator': 'fake_initiator', 'do_local_attach': False,
+ 'uuid': '3e1a7217-104e-41c1-b177-a37c491129a0',
+ 'system uuid': '98755544-c749-40ed-b30a-a1cb27b2a46d',
+ 'nqn': 'fake_nqn'}
+
+ def test_attachment_create(self):
+ self.volume_api.attachment_create(self.fake_client, self.fake_vol_id)
+ self.fake_client.attachments.create.assert_called_once_with(
+ self.fake_vol_id, None, mode=None)
+
+ def test_attachment_create_with_connector_and_mountpoint(self):
+ self.volume_api.attachment_create(
+ self.fake_client, self.fake_vol_id,
+ connector=self.fake_connector, mountpoint='fake_mountpoint')
+ self.fake_connector['mountpoint'] = 'fake_mountpoint'
+ self.fake_client.attachments.create.assert_called_once_with(
+ self.fake_vol_id, self.fake_connector, mode=None)
+
+ def test_attachment_create_client_exception(self):
+ self.fake_client.attachments.create.side_effect = (
+ cinder_exception.ClientException(code=1))
+ self.assertRaises(
+ cinder_exception.ClientException,
+ self.volume_api.attachment_create,
+ self.fake_client, self.fake_vol_id)
+
+ def test_attachment_get(self):
+ self.volume_api.attachment_get(self.fake_client, self.fake_attach_id)
+ self.fake_client.attachments.show.assert_called_once_with(
+ self.fake_attach_id)
+
+ def test_attachment_get_client_exception(self):
+ self.fake_client.attachments.show.side_effect = (
+ cinder_exception.ClientException(code=1))
+ self.assertRaises(
+ cinder_exception.ClientException,
+ self.volume_api.attachment_get,
+ self.fake_client, self.fake_attach_id)
+
+ def test_attachment_update(self):
+ self.volume_api.attachment_update(self.fake_client,
+ self.fake_attach_id,
+ self.fake_connector)
+ self.fake_client.attachments.update.assert_called_once_with(
+ self.fake_attach_id, self.fake_connector)
+
+ def test_attachment_update_with_connector_and_mountpoint(self):
+ self.volume_api.attachment_update(
+ self.fake_client, self.fake_attach_id, self.fake_connector,
+ mountpoint='fake_mountpoint')
+ self.fake_connector['mountpoint'] = 'fake_mountpoint'
+ self.fake_client.attachments.update.assert_called_once_with(
+ self.fake_attach_id, self.fake_connector)
+
+ def test_attachment_update_client_exception(self):
+ self.fake_client.attachments.update.side_effect = (
+ cinder_exception.ClientException(code=1))
+ self.assertRaises(
+ cinder_exception.ClientException,
+ self.volume_api.attachment_update,
+ self.fake_client, self.fake_attach_id, self.fake_connector)
+
+ def test_attachment_complete(self):
+ self.volume_api.attachment_complete(self.fake_client,
+ self.fake_attach_id)
+ self.fake_client.attachments.complete.assert_called_once_with(
+ self.fake_attach_id)
+
+ def test_attachment_complete_client_exception(self):
+ self.fake_client.attachments.complete.side_effect = (
+ cinder_exception.ClientException(code=1))
+ self.assertRaises(
+ cinder_exception.ClientException,
+ self.volume_api.attachment_complete,
+ self.fake_client, self.fake_attach_id)
+
+ def test_attachment_delete(self):
+ self.volume_api.attachment_delete(self.fake_client,
+ self.fake_attach_id)
+ self.fake_client.attachments.delete.assert_called_once_with(
+ self.fake_attach_id)
+
+ def test_attachment_delete_client_exception(self):
+ self.fake_client.attachments.delete.side_effect = (
+ cinder_exception.ClientException(code=1))
+ self.assertRaises(
+ cinder_exception.ClientException,
+ self.volume_api.attachment_delete,
+ self.fake_client, self.fake_attach_id)
+
+ def test_attachment_delete_retries(self):
+ # Make delete fail two times and succeed on the third attempt.
+ self.fake_client.attachments.delete.side_effect = [
+ apiclient_exception.InternalServerError(),
+ apiclient_exception.InternalServerError(),
+ lambda aid: 'foo']
+
+ # Make sure we get a clean result.
+ self.assertIsNone(self.volume_api.attachment_delete(
+ self.fake_client, self.fake_attach_id))
+
+ # Assert that we called delete three times due to the retry
+ # decorator.
+ self.fake_client.attachments.delete.assert_has_calls([
+ mock.call(self.fake_attach_id),
+ mock.call(self.fake_attach_id),
+ mock.call(self.fake_attach_id)])
diff --git a/glance_store/tests/unit/test_cinder_store.py b/glance_store/tests/unit/test_cinder_store.py
index 7ba655d..ad25486 100644
--- a/glance_store/tests/unit/test_cinder_store.py
+++ b/glance_store/tests/unit/test_cinder_store.py
@@ -26,12 +26,12 @@ import tempfile
import time
import uuid
-from cinderclient.v3 import client as cinderclient
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_utils.secretutils import md5
from oslo_utils import units
+from glance_store.common import cinder_utils
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
@@ -67,31 +67,30 @@ class TestCinderStore(base.StoreBaseTest,
auth_token='fake_token',
project_id='fake_project')
self.hash_algo = 'sha256'
+ cinder._reset_cinder_session()
def test_get_cinderclient(self):
cc = self.store.get_cinderclient(self.context)
- self.assertEqual('fake_token', cc.client.auth_token)
- self.assertEqual('http://foo/public_url', cc.client.management_url)
+ self.assertEqual('fake_token', cc.client.auth.token)
+ self.assertEqual('http://foo/public_url', cc.client.auth.endpoint)
- def test_get_cinderclient_with_user_overriden(self):
+ def _test_get_cinderclient_with_user_overriden(self):
self.config(cinder_store_user_name='test_user')
self.config(cinder_store_password='test_password')
self.config(cinder_store_project_name='test_project')
self.config(cinder_store_auth_address='test_address')
cc = self.store.get_cinderclient(self.context)
- self.assertIsNone(cc.client.auth_token)
- self.assertEqual('test_address', cc.client.management_url)
+ self.assertEqual('test_project', cc.client.session.auth.project_name)
+ self.assertEqual('Default', cc.client.session.auth.project_domain_name)
+ return cc
+
+ def test_get_cinderclient_with_user_overriden(self):
+ self._test_get_cinderclient_with_user_overriden()
def test_get_cinderclient_with_user_overriden_and_region(self):
self.config(cinder_os_region_name='test_region')
- fake_client = FakeObject(client=FakeObject(auth_token=None))
- with mock.patch.object(cinderclient, 'Client',
- return_value=fake_client) as mock_client:
- self.test_get_cinderclient_with_user_overriden()
- mock_client.assert_called_once_with(
- 'test_user', 'test_password', 'test_project',
- auth_url='test_address', cacert=None, insecure=False,
- region_name='test_region', retries=3)
+ cc = self._test_get_cinderclient_with_user_overriden()
+ self.assertEqual('test_region', cc.client.region_name)
def test_temporary_chown(self):
class fake_stat(object):
@@ -153,11 +152,20 @@ class TestCinderStore(base.StoreBaseTest,
def _test_open_cinder_volume(self, open_mode, attach_mode, error,
multipath_supported=False,
enforce_multipath=False,
- encrypted_nfs=False):
+ encrypted_nfs=False, qcow2_vol=False):
self.config(cinder_mount_point_base=None)
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
- fake_volumes = FakeObject(get=lambda id: fake_volume,
- detach=mock.Mock())
+ fake_volume.manager.get.return_value = fake_volume
+ fake_volumes = FakeObject(get=lambda id: fake_volume)
+ fake_attachment_id = str(uuid.uuid4())
+ fake_attachment_create = {'id': fake_attachment_id}
+ if encrypted_nfs or qcow2_vol:
+ fake_attachment_update = mock.MagicMock(
+ id=fake_attachment_id,
+ connection_info={'driver_volume_type': 'nfs'})
+ else:
+ fake_attachment_update = mock.MagicMock(id=fake_attachment_id)
+ fake_conn_info = mock.MagicMock(connector={})
fake_client = FakeObject(volumes=fake_volumes)
_, fake_dev_path = tempfile.mkstemp(dir=self.test_dir)
fake_devinfo = {'path': fake_dev_path}
@@ -176,8 +184,6 @@ class TestCinderStore(base.StoreBaseTest,
raise error
def fake_factory(protocol, root_helper, **kwargs):
- self.assertEqual(fake_volume.initialize_connection.return_value,
- kwargs['conn'])
return fake_connector
root_helper = "sudo glance-rootwrap /etc/glance/rootwrap.conf"
@@ -189,30 +195,44 @@ class TestCinderStore(base.StoreBaseTest,
mock.patch.object(cinder.Store, 'get_root_helper',
return_value=root_helper), \
mock.patch.object(connector.InitiatorConnector, 'factory',
- side_effect=fake_factory) as fake_conn_obj:
+ side_effect=fake_factory
+ ) as fake_conn_obj, \
+ mock.patch.object(cinder_utils.API, 'attachment_create',
+ return_value=fake_attachment_create
+ ) as attach_create, \
+ mock.patch.object(cinder_utils.API, 'attachment_update',
+ return_value=fake_attachment_update
+ ) as attach_update, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_delete') as attach_delete, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_get') as attach_get, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_complete') as attach_complete:
with mock.patch.object(connector,
- 'get_connector_properties') as mock_conn:
+ 'get_connector_properties',
+ return_value=fake_conn_info) as mock_conn:
if error:
self.assertRaises(error, do_open)
- elif encrypted_nfs:
- fake_volume.initialize_connection.return_value = {
- 'driver_volume_type': 'nfs'
- }
- fake_volume.encrypted = True
+ elif encrypted_nfs or qcow2_vol:
+ fake_volume.encrypted = False
+ if encrypted_nfs:
+ fake_volume.encrypted = True
+ elif qcow2_vol:
+ attach_get.return_value = mock.MagicMock(
+ connection_info={'format': 'qcow2'})
try:
with self.store._open_cinder_volume(
fake_client, fake_volume, open_mode):
pass
except exceptions.BackendException:
- self.assertEqual(1,
- fake_volume.unreserve.call_count)
- self.assertEqual(1,
- fake_volume.delete.call_count)
+ attach_delete.assert_called_once_with(
+ fake_client, fake_attachment_id)
else:
do_open()
- if not encrypted_nfs:
+ if not (encrypted_nfs or qcow2_vol):
mock_conn.assert_called_once_with(
root_helper, socket.gethostname(),
multipath_supported, enforce_multipath)
@@ -220,13 +240,36 @@ class TestCinderStore(base.StoreBaseTest,
mock.ANY)
fake_connector.disconnect_volume.assert_called_once_with(
mock.ANY, fake_devinfo)
- fake_volume.attach.assert_called_once_with(
- None, 'glance_store', attach_mode,
- host_name=socket.gethostname())
- fake_volumes.detach.assert_called_once_with(fake_volume)
fake_conn_obj.assert_called_once_with(
mock.ANY, root_helper, conn=mock.ANY,
use_multipath=multipath_supported)
+ attach_create.assert_called_once_with(
+ fake_client, fake_volume.id, mode=attach_mode)
+ attach_update.assert_called_once_with(
+ fake_client, fake_attachment_id,
+ fake_conn_info, mountpoint='glance_store')
+ attach_complete.assert_called_once_with(
+ fake_client, fake_attachment_id)
+ attach_delete.assert_called_once_with(
+ fake_client, fake_attachment_id)
+ else:
+ mock_conn.assert_called_once_with(
+ root_helper, socket.gethostname(),
+ multipath_supported, enforce_multipath)
+ fake_connector.connect_volume.assert_not_called()
+ fake_connector.disconnect_volume.assert_not_called()
+ fake_conn_obj.assert_called_once_with(
+ mock.ANY, root_helper, conn=mock.ANY,
+ use_multipath=multipath_supported)
+ attach_create.assert_called_once_with(
+ fake_client, fake_volume.id, mode=attach_mode)
+ attach_update.assert_called_once_with(
+ fake_client, fake_attachment_id,
+ fake_conn_info, mountpoint='glance_store')
+ attach_complete.assert_called_once_with(
+ fake_client, fake_attachment_id)
+ attach_delete.assert_called_once_with(
+ fake_client, fake_attachment_id)
def test_open_cinder_volume_rw(self):
self._test_open_cinder_volume('wb', 'rw', None)
@@ -252,6 +295,9 @@ class TestCinderStore(base.StoreBaseTest,
def test_open_cinder_volume_nfs_encrypted(self):
self._test_open_cinder_volume('rb', 'ro', None, encrypted_nfs=True)
+ def test_open_cinder_volume_nfs_qcow2_volume(self):
+ self._test_open_cinder_volume('rb', 'ro', None, qcow2_vol=True)
+
def test_cinder_configure_add(self):
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context, None)
@@ -402,8 +448,7 @@ class TestCinderStore(base.StoreBaseTest,
def test_cinder_delete(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
- fake_volume = FakeObject(delete=mock.Mock())
- fake_volumes = {fake_volume_uuid: fake_volume}
+ fake_volumes = FakeObject(delete=mock.Mock())
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
@@ -412,7 +457,7 @@ class TestCinderStore(base.StoreBaseTest,
uri = 'cinder://%s' % fake_volume_uuid
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc, context=self.context)
- fake_volume.delete.assert_called_once_with()
+ fake_volumes.delete.assert_called_once_with(fake_volume_uuid)
def test_set_url_prefix(self):
self.assertEqual('cinder://', self.store._url_prefix)
diff --git a/glance_store/tests/unit/test_connection_manager.py b/glance_store/tests/unit/test_connection_manager.py
index 20be2fd..ac1ab91 100644
--- a/glance_store/tests/unit/test_connection_manager.py
+++ b/glance_store/tests/unit/test_connection_manager.py
@@ -46,6 +46,7 @@ class TestConnectionManager(base.StoreBaseTest):
auth_version='3')
store.backend_group = None
+ store.conf_endpoint = None
store.init_client.return_value = self.client
return store
diff --git a/glance_store/tests/unit/test_multistore_cinder.py b/glance_store/tests/unit/test_multistore_cinder.py
index 1c6ae26..ebe69ee 100644
--- a/glance_store/tests/unit/test_multistore_cinder.py
+++ b/glance_store/tests/unit/test_multistore_cinder.py
@@ -33,6 +33,7 @@ from oslo_utils.secretutils import md5
from oslo_utils import units
import glance_store as store
+from glance_store.common import cinder_utils
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
@@ -98,14 +99,15 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
user_id='admin_user',
auth_token='admin_token',
project_id='admin_project')
+ cinder._reset_cinder_session()
def test_location_url_prefix_is_set(self):
self.assertEqual("cinder://cinder1", self.store.url_prefix)
def test_get_cinderclient(self):
cc = self.store.get_cinderclient(self.context)
- self.assertEqual('fake_token', cc.client.auth_token)
- self.assertEqual('http://foo/public_url', cc.client.management_url)
+ self.assertEqual('fake_token', cc.client.auth.token)
+ self.assertEqual('http://foo/public_url', cc.client.auth.endpoint)
def test_get_cinderclient_with_user_overriden(self):
self.config(cinder_store_user_name='test_user', group="cinder1")
@@ -113,16 +115,14 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
self.config(cinder_store_project_name='test_project', group="cinder1")
self.config(cinder_store_auth_address='test_address', group="cinder1")
cc = self.store.get_cinderclient(self.context)
- self.assertIsNone(cc.client.auth_token)
- self.assertEqual('test_address', cc.client.management_url)
+ self.assertEqual('Default', cc.client.session.auth.project_domain_name)
+ self.assertEqual('test_project', cc.client.session.auth.project_name)
def test_get_cinderclient_legacy_update(self):
cc = self.store.get_cinderclient(self.fake_admin_context,
legacy_update=True)
- self.assertEqual('admin_token', cc.client.auth_token)
- self.assertEqual('admin_user', cc.client.user)
- self.assertEqual('admin_project', cc.client.projectid)
- self.assertEqual('http://foo/public_url', cc.client.management_url)
+ self.assertEqual('admin_token', cc.client.auth.token)
+ self.assertEqual('http://foo/public_url', cc.client.auth.endpoint)
def test_temporary_chown(self):
class fake_stat(object):
@@ -183,11 +183,21 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
def _test_open_cinder_volume(self, open_mode, attach_mode, error,
multipath_supported=False,
- enforce_multipath=False):
+ enforce_multipath=False,
+ encrypted_nfs=False, qcow2_vol=False):
self.config(cinder_mount_point_base=None, group='cinder1')
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
- fake_volumes = FakeObject(get=lambda id: fake_volume,
- detach=mock.Mock())
+ fake_volume.manager.get.return_value = fake_volume
+ fake_attachment_id = str(uuid.uuid4())
+ fake_attachment_create = {'id': fake_attachment_id}
+ if encrypted_nfs or qcow2_vol:
+ fake_attachment_update = mock.MagicMock(
+ id=fake_attachment_id,
+ connection_info={'driver_volume_type': 'nfs'})
+ else:
+ fake_attachment_update = mock.MagicMock(id=fake_attachment_id)
+ fake_conn_info = mock.MagicMock(connector={})
+ fake_volumes = FakeObject(get=lambda id: fake_volume)
fake_client = FakeObject(volumes=fake_volumes)
_, fake_dev_path = tempfile.mkstemp(dir=self.test_dir)
fake_devinfo = {'path': fake_dev_path}
@@ -206,8 +216,6 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
raise error
def fake_factory(protocol, root_helper, **kwargs):
- self.assertEqual(fake_volume.initialize_connection.return_value,
- kwargs['conn'])
return fake_connector
root_helper = "sudo glance-rootwrap /etc/glance/rootwrap.conf"
@@ -219,28 +227,82 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
mock.patch.object(cinder.Store, 'get_root_helper',
return_value=root_helper), \
mock.patch.object(connector.InitiatorConnector, 'factory',
- side_effect=fake_factory) as fake_conn_obj:
+ side_effect=fake_factory
+ ) as fake_conn_obj, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_create',
+ return_value=fake_attachment_create
+ ) as attach_create, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_update',
+ return_value=fake_attachment_update
+ ) as attach_update, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_delete') as attach_delete, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_get') as attach_get, \
+ mock.patch.object(cinder_utils.API,
+ 'attachment_complete') as attach_complete:
with mock.patch.object(connector,
- 'get_connector_properties') as mock_conn:
+ 'get_connector_properties',
+ return_value=fake_conn_info) as mock_conn:
if error:
self.assertRaises(error, do_open)
+ elif encrypted_nfs or qcow2_vol:
+ fake_volume.encrypted = False
+ if encrypted_nfs:
+ fake_volume.encrypted = True
+ elif qcow2_vol:
+ attach_get.return_value = mock.MagicMock(
+ connection_info={'format': 'qcow2'})
+ try:
+ with self.store._open_cinder_volume(
+ fake_client, fake_volume, open_mode):
+ pass
+ except exceptions.BackendException:
+ attach_delete.assert_called_once_with(
+ fake_client, fake_attachment_id)
else:
do_open()
-
- mock_conn.assert_called_once_with(
- root_helper, socket.gethostname(), multipath_supported,
- enforce_multipath)
- fake_connector.connect_volume.assert_called_once_with(mock.ANY)
- fake_connector.disconnect_volume.assert_called_once_with(
- mock.ANY, fake_devinfo)
- fake_volume.attach.assert_called_once_with(
- None, 'glance_store', attach_mode,
- host_name=socket.gethostname())
- fake_volumes.detach.assert_called_once_with(fake_volume)
- fake_conn_obj.assert_called_once_with(
- mock.ANY, root_helper, conn=mock.ANY,
- use_multipath=multipath_supported)
+ if not (encrypted_nfs or qcow2_vol):
+ mock_conn.assert_called_once_with(
+ root_helper, socket.gethostname(),
+ multipath_supported, enforce_multipath)
+ fake_connector.connect_volume.assert_called_once_with(
+ mock.ANY)
+ fake_connector.disconnect_volume.assert_called_once_with(
+ mock.ANY, fake_devinfo)
+ fake_conn_obj.assert_called_once_with(
+ mock.ANY, root_helper, conn=mock.ANY,
+ use_multipath=multipath_supported)
+ attach_create.assert_called_once_with(
+ fake_client, fake_volume.id, mode=attach_mode)
+ attach_update.assert_called_once_with(
+ fake_client, fake_attachment_id,
+ fake_conn_info, mountpoint='glance_store')
+ attach_complete.assert_called_once_with(
+ fake_client, fake_attachment_id)
+ attach_delete.assert_called_once_with(fake_client,
+ fake_attachment_id)
+ else:
+ mock_conn.assert_called_once_with(
+ root_helper, socket.gethostname(),
+ multipath_supported, enforce_multipath)
+ fake_connector.connect_volume.assert_not_called()
+ fake_connector.disconnect_volume.assert_not_called()
+ fake_conn_obj.assert_called_once_with(
+ mock.ANY, root_helper, conn=mock.ANY,
+ use_multipath=multipath_supported)
+ attach_create.assert_called_once_with(
+ fake_client, fake_volume.id, mode=attach_mode)
+ attach_update.assert_called_once_with(
+ fake_client, fake_attachment_id,
+ fake_conn_info, mountpoint='glance_store')
+ attach_complete.assert_called_once_with(
+ fake_client, fake_attachment_id)
+ attach_delete.assert_called_once_with(
+ fake_client, fake_attachment_id)
def test_open_cinder_volume_rw(self):
self._test_open_cinder_volume('wb', 'rw', None)
@@ -264,6 +326,12 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
multipath_supported=True,
enforce_multipath=True)
+ def test_open_cinder_volume_nfs_encrypted(self):
+ self._test_open_cinder_volume('rb', 'ro', None, encrypted_nfs=True)
+
+ def test_open_cinder_volume_nfs_qcow2_volume(self):
+ self._test_open_cinder_volume('rb', 'ro', None, qcow2_vol=True)
+
def test_cinder_check_context(self):
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context, None)
@@ -500,8 +568,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
def test_cinder_delete(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
- fake_volume = FakeObject(delete=mock.Mock())
- fake_volumes = {fake_volume_uuid: fake_volume}
+ fake_volumes = FakeObject(delete=mock.Mock())
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
@@ -512,7 +579,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
"cinder1",
conf=self.conf)
self.store.delete(loc, context=self.context)
- fake_volume.delete.assert_called_once_with()
+ fake_volumes.delete.assert_called_once_with(fake_volume_uuid)
def test_cinder_add_different_backend(self):
self.store = cinder.Store(self.conf, backend="cinder2")
diff --git a/glance_store/tests/unit/test_multistore_s3.py b/glance_store/tests/unit/test_multistore_s3.py
index d242b8f..2485e4a 100644
--- a/glance_store/tests/unit/test_multistore_s3.py
+++ b/glance_store/tests/unit/test_multistore_s3.py
@@ -44,7 +44,7 @@ S3_CONF = {
's3_store_secret_key': 'key',
's3_store_host': 'https://s3-region1.com',
's3_store_bucket': 'glance',
- 's3_store_large_object_size': 5, # over 5MB is large
+ 's3_store_large_object_size': 9, # over 9MB is large
's3_store_large_object_chunk_size': 6, # part size is 6MB
}
@@ -91,7 +91,9 @@ class TestMultiS3Store(base.MultiStoreBaseTest,
s3_store_secret_key='key',
s3_store_host='https://s3-region1.com',
s3_store_bucket='glance',
- s3_store_large_object_size=5,
+ s3_store_large_object_size=S3_CONF[
+ 's3_store_large_object_size'
+ ],
s3_store_large_object_chunk_size=6)
self.config(group='s3_region2',
@@ -99,7 +101,9 @@ class TestMultiS3Store(base.MultiStoreBaseTest,
s3_store_secret_key='key',
s3_store_host='http://s3-region2.com',
s3_store_bucket='glance',
- s3_store_large_object_size=5,
+ s3_store_large_object_size=S3_CONF[
+ 's3_store_large_object_size'
+ ],
s3_store_large_object_chunk_size=6)
# Ensure stores + locations cleared
location.SCHEME_TO_CLS_BACKEND_MAP = {}
@@ -204,6 +208,7 @@ class TestMultiS3Store(base.MultiStoreBaseTest,
def test_add_singlepart(self, mock_client):
"""Test that we can add an image via the s3 backend."""
expected_image_id = str(uuid.uuid4())
+ # 5KiB is smaller than WRITE_CHUNKSIZE
expected_s3_size = FIVE_KB
expected_s3_contents = b"*" * expected_s3_size
expected_checksum = md5(expected_s3_contents,
@@ -252,6 +257,59 @@ class TestMultiS3Store(base.MultiStoreBaseTest,
self.assertEqual(expected_multihash, multihash)
@mock.patch.object(boto3.session.Session, "client")
+ def test_add_singlepart_bigger_than_write_chunk(self, mock_client):
+ """Test that we can add an image via the s3 backend."""
+ expected_image_id = str(uuid.uuid4())
+ # 8 MiB is bigger than WRITE_CHUNKSIZE(=5MiB),
+ # but smaller than s3_store_large_object_size
+ expected_s3_size = 8 * units.Mi
+ expected_s3_contents = b"*" * expected_s3_size
+ expected_checksum = md5(expected_s3_contents,
+ usedforsecurity=False).hexdigest()
+ expected_multihash = hashlib.sha256(expected_s3_contents).hexdigest()
+ expected_location = format_s3_location(
+ S3_CONF['s3_store_access_key'],
+ S3_CONF['s3_store_secret_key'],
+ S3_CONF['s3_store_host'],
+ S3_CONF['s3_store_bucket'],
+ expected_image_id)
+ image_s3 = six.BytesIO(expected_s3_contents)
+
+ fake_s3_client = botocore.session.get_session().create_client('s3')
+
+ with stub.Stubber(fake_s3_client) as stubber:
+ stubber.add_response(method='head_bucket',
+ service_response={},
+ expected_params={
+ 'Bucket': S3_CONF['s3_store_bucket']
+ })
+ stubber.add_client_error(method='head_object',
+ service_error_code='404',
+ service_message='',
+ expected_params={
+ 'Bucket': S3_CONF['s3_store_bucket'],
+ 'Key': expected_image_id
+ })
+ stubber.add_response(method='put_object',
+ service_response={},
+ expected_params={
+ 'Bucket': S3_CONF['s3_store_bucket'],
+ 'Key': expected_image_id,
+ 'Body': botocore.stub.ANY
+ })
+
+ mock_client.return_value = fake_s3_client
+ loc, size, checksum, multihash, metadata = \
+ self.store.add(expected_image_id, image_s3, expected_s3_size,
+ self.hash_algo)
+ self.assertEqual("s3_region1", metadata["store"])
+
+ self.assertEqual(expected_location, loc)
+ self.assertEqual(expected_s3_size, size)
+ self.assertEqual(expected_checksum, checksum)
+ self.assertEqual(expected_multihash, multihash)
+
+ @mock.patch.object(boto3.session.Session, "client")
def test_add_different_backend(self, mock_client):
self.store = s3.Store(self.conf, backend="s3_region2")
self.store.configure()
diff --git a/glance_store/tests/unit/test_opts.py b/glance_store/tests/unit/test_opts.py
index 6f46d60..5928d4f 100644
--- a/glance_store/tests/unit/test_opts.py
+++ b/glance_store/tests/unit/test_opts.py
@@ -77,8 +77,10 @@ class OptsTestCase(base.StoreBaseTest):
'cinder_state_transition_timeout',
'cinder_store_auth_address',
'cinder_store_user_name',
+ 'cinder_store_user_domain_name',
'cinder_store_password',
'cinder_store_project_name',
+ 'cinder_store_project_domain_name',
'cinder_volume_type',
'cinder_use_multipath',
'cinder_enforce_multipath',
diff --git a/glance_store/tests/unit/test_s3_store.py b/glance_store/tests/unit/test_s3_store.py
index 2b95bfa..8a5f046 100644
--- a/glance_store/tests/unit/test_s3_store.py
+++ b/glance_store/tests/unit/test_s3_store.py
@@ -43,7 +43,7 @@ S3_CONF = {
's3_store_secret_key': 'key',
's3_store_host': 'localhost',
's3_store_bucket': 'glance',
- 's3_store_large_object_size': 5, # over 5MB is large
+ 's3_store_large_object_size': 9, # over 9MB is large
's3_store_large_object_chunk_size': 6, # part size is 6MB
}
@@ -157,6 +157,7 @@ class TestStore(base.StoreBaseTest,
def test_add_singlepart(self, mock_client):
"""Test that we can add an image via the s3 backend."""
expected_image_id = str(uuid.uuid4())
+ # 5KiB is smaller than WRITE_CHUNKSIZE
expected_s3_size = FIVE_KB
expected_s3_contents = b"*" * expected_s3_size
expected_checksum = md5(expected_s3_contents,
@@ -204,6 +205,58 @@ class TestStore(base.StoreBaseTest,
self.assertEqual(expected_multihash, multihash)
@mock.patch.object(boto3.session.Session, "client")
+ def test_add_singlepart_bigger_than_write_chunk(self, mock_client):
+ """Test that we can add a large image via the s3 backend."""
+ expected_image_id = str(uuid.uuid4())
+ # 8 MiB is bigger than WRITE_CHUNKSIZE(=5MiB),
+ # but smaller than s3_store_large_object_size
+ expected_s3_size = 8 * units.Mi
+ expected_s3_contents = b"*" * expected_s3_size
+ expected_checksum = md5(expected_s3_contents,
+ usedforsecurity=False).hexdigest()
+ expected_multihash = hashlib.sha256(expected_s3_contents).hexdigest()
+ expected_location = format_s3_location(
+ S3_CONF['s3_store_access_key'],
+ S3_CONF['s3_store_secret_key'],
+ S3_CONF['s3_store_host'],
+ S3_CONF['s3_store_bucket'],
+ expected_image_id)
+ image_s3 = six.BytesIO(expected_s3_contents)
+
+ fake_s3_client = botocore.session.get_session().create_client('s3')
+
+ with stub.Stubber(fake_s3_client) as stubber:
+ stubber.add_response(method='head_bucket',
+ service_response={},
+ expected_params={
+ 'Bucket': S3_CONF['s3_store_bucket']
+ })
+ stubber.add_client_error(method='head_object',
+ service_error_code='404',
+ service_message='',
+ expected_params={
+ 'Bucket': S3_CONF['s3_store_bucket'],
+ 'Key': expected_image_id
+ })
+ stubber.add_response(method='put_object',
+ service_response={},
+ expected_params={
+ 'Bucket': S3_CONF['s3_store_bucket'],
+ 'Key': expected_image_id,
+ 'Body': botocore.stub.ANY
+ })
+
+ mock_client.return_value = fake_s3_client
+ loc, size, checksum, multihash, _ = \
+ self.store.add(expected_image_id, image_s3, expected_s3_size,
+ self.hash_algo)
+
+ self.assertEqual(expected_location, loc)
+ self.assertEqual(expected_s3_size, size)
+ self.assertEqual(expected_checksum, checksum)
+ self.assertEqual(expected_multihash, multihash)
+
+ @mock.patch.object(boto3.session.Session, "client")
def test_add_with_verifier(self, mock_client):
"""Assert 'verifier.update' is called when verifier is provided"""
expected_image_id = str(uuid.uuid4())
diff --git a/glance_store/tests/unit/test_swift_store.py b/glance_store/tests/unit/test_swift_store.py
index 9018ccd..7364923 100644
--- a/glance_store/tests/unit/test_swift_store.py
+++ b/glance_store/tests/unit/test_swift_store.py
@@ -1561,6 +1561,31 @@ class TestSingleTenantStoreConnections(base.StoreBaseTest):
'endpoint_type': 'publicURL'},
connection.os_options)
+ @mock.patch("keystoneauth1.session.Session.get_endpoint")
+ @mock.patch("keystoneauth1.session.Session.get_auth_headers",
+ new=mock.Mock())
+ def _test_connection_manager_authv3_conf_endpoint(
+ self, mock_ep, expected_endpoint="https://from-catalog.com"):
+ self.config(swift_store_auth_version='3')
+ mock_ep.return_value = "https://from-catalog.com"
+ ctx = mock.MagicMock()
+ self.store.configure()
+ connection_manager = manager.SingleTenantConnectionManager(
+ store=self.store,
+ store_location=self.location,
+ context=ctx
+ )
+ conn = connection_manager._init_connection()
+ self.assertEqual(expected_endpoint, conn.preauthurl)
+
+ def test_connection_manager_authv3_without_conf_endpoint(self):
+ self._test_connection_manager_authv3_conf_endpoint()
+
+ def test_connection_manager_authv3_with_conf_endpoint(self):
+ self.config(swift_store_endpoint='http://localhost')
+ self._test_connection_manager_authv3_conf_endpoint(
+ expected_endpoint='http://localhost')
+
def test_connection_with_no_trailing_slash(self):
self.location.auth_or_store_url = 'example.com/v2'
connection = self.store.get_connection(self.location)
diff --git a/lower-constraints.txt b/lower-constraints.txt
deleted file mode 100644
index e1be785..0000000
--- a/lower-constraints.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-alabaster==0.7.10
-appdirs==1.4.3
-attrs==17.4.0
-Babel==2.3.4
-boto3==1.9.199
-botocore==1.12.253
-certifi==2018.1.18
-chardet==3.0.4
-cliff==2.11.0
-cmd2==0.8.1
-coverage==4.0
-debtcollector==1.19.0
-doc8==0.6.0
-docutils==0.14
-dulwich==0.19.0
-enum-compat==0.0.2
-eventlet==0.18.2
-extras==1.0.0
-fasteners==0.14.1
-fixtures==3.0.0
-flake8==3.7.9
-future==0.16.0
-gitdb2==2.0.3
-GitPython==2.1.8
-greenlet==0.4.13
-httplib2==0.9.1
-idna==2.6
-imagesize==1.0.0
-iso8601==0.1.12
-Jinja2==2.10
-jmespath==0.7.1
-jsonschema==3.2.0
-keystoneauth1==3.4.0
-linecache2==1.0.0
-MarkupSafe==1.0
-mccabe==0.6.0
-mock==2.0.0
-monotonic==1.4
-mox3==0.25.0
-msgpack==0.5.6
-netaddr==0.7.19
-netifaces==0.10.6
-os-brick==2.6.0
-os-client-config==1.29.0
-os-win==4.0.1
-oslo.concurrency==3.26.0
-oslo.config==5.2.0
-oslo.context==2.22.0
-oslo.i18n==3.15.3
-oslo.log==3.36.0
-oslo.privsep==1.23.0
-oslo.rootwrap==5.8.0
-oslo.serialization==2.18.0
-oslo.service==1.41.1
-oslotest==3.2.0
-oslo.utils==4.7.0
-oslo.vmware==2.17.0
-Parsley==1.3
-pbr==3.1.1
-prettytable==0.7.2
-Pygments==2.2.0
-pyparsing==2.2.0
-pyperclip==1.6.0
-python-cinderclient==4.1.0
-python-keystoneclient==3.8.0
-python-mimeparse==1.6.0
-python-subunit==1.0.0
-python-swiftclient==3.2.0
-pytz==2018.3
-PyYAML==3.12
-requests==2.14.2
-requestsexceptions==1.4.0
-requests-mock==1.2.0
-restructuredtext-lint==1.1.3
-rfc3986==1.1.0
-six==1.11.0
-smmap2==2.0.3
-snowballstemmer==1.2.1
-stestr==2.0.0
-stevedore==1.20.0
-testscenarios==0.4
-testtools==2.2.0
-traceback2==1.4.0
-unittest2==1.1.0
-urllib3==1.22
-voluptuous==0.11.1
-wrapt==1.10.11
diff --git a/releasenotes/notes/cinder-nfs-block-qcow2-vol-4fed58b0afafc980.yaml b/releasenotes/notes/cinder-nfs-block-qcow2-vol-4fed58b0afafc980.yaml
new file mode 100644
index 0000000..b438285
--- /dev/null
+++ b/releasenotes/notes/cinder-nfs-block-qcow2-vol-4fed58b0afafc980.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ `Bug #1901138 <https://bugs.launchpad.net/glance-store/+bug/1901138>`_:
+ Blocked creation of images when glance store is cinder,
+ cinder backend is nfs and volumes created are qcow2 format.
diff --git a/releasenotes/notes/support-cinder-user-domain-420c76053dd50534.yaml b/releasenotes/notes/support-cinder-user-domain-420c76053dd50534.yaml
new file mode 100644
index 0000000..7763366
--- /dev/null
+++ b/releasenotes/notes/support-cinder-user-domain-420c76053dd50534.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ For the Cinder store, if using an internal user to store images,
+ it is now possible to have the internal user and the internal project
+ in Keystone domains other than the ``Default`` one.
+ Two new config options ``cinder_store_user_domain_name`` and
+ ``cinder_store_project_domain_name`` are added
+ (both default to ``Default``) and now are possible to use in the
+ configuration of the Cinder store.
diff --git a/setup.cfg b/setup.cfg
index a185c43..faf8942 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -63,7 +63,7 @@ console_scripts =
[extras]
# Dependencies for each of the optional stores
vmware =
- oslo.vmware>=2.17.0 # Apache-2.0
+ oslo.vmware>=3.6.0 # Apache-2.0
swift =
httplib2>=0.9.1 # MIT
python-swiftclient>=3.2.0 # Apache-2.0
diff --git a/test-requirements.txt b/test-requirements.txt
index 4a2a0fa..b623f81 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -14,6 +14,7 @@ coverage!=4.4,>=4.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD
python-subunit>=1.0.0 # Apache-2.0/BSD
requests-mock>=1.2.0 # Apache-2.0
+retrying>=1.3.3
stestr>=2.0.0 # Apache-2.0
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
@@ -21,7 +22,7 @@ oslotest>=3.2.0 # Apache-2.0
# Dependencies for each of the optional stores
boto3>=1.9.199 # Apache-2.0
-oslo.vmware>=2.17.0 # Apache-2.0
+oslo.vmware>=3.6.0 # Apache-2.0
httplib2>=0.9.1 # MIT
python-swiftclient>=3.2.0 # Apache-2.0
python-cinderclient>=4.1.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 8d9c2df..6a3130b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -56,7 +56,7 @@ commands =
stestr run --slowest --test-path=./glance_store/tests/functional/filesystem
[doc8]
-ignore-path = .venv,.git,.tox,*glance_store/locale*,*lib/python*,glance_store.egg*,doc/build,*requirements.txt,lower-constraints.txt
+ignore-path = .venv,.git,.tox,*glance_store/locale*,*lib/python*,glance_store.egg*,doc/build,*requirements.txt
[flake8]
# TODO(dmllr): Analyze or fix the warnings blacklisted below
@@ -68,8 +68,3 @@ ignore-path = .venv,.git,.tox,*glance_store/locale*,*lib/python*,glance_store.eg
ignore = H301,H404,H405,W503,W504
exclude = .venv,.git,.tox,dist,doc,etc,*glance_store/locale*,*lib/python*,*egg,build
-[testenv:lower-constraints]
-deps =
- -c{toxinidir}/lower-constraints.txt
- -r{toxinidir}/test-requirements.txt
- .[s3,vmware,swift,cinder]