summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml23
-rw-r--r--babel.cfg1
-rw-r--r--doc/requirements.txt6
-rw-r--r--doc/source/conf.py18
-rw-r--r--etc/glance/rootwrap.d/glance_cinder_store.filters4
-rw-r--r--glance_store/_drivers/cinder.py234
-rw-r--r--glance_store/_drivers/filesystem.py32
-rw-r--r--glance_store/_drivers/rbd.py96
-rw-r--r--glance_store/common/fs_mount.py366
-rw-r--r--glance_store/common/utils.py4
-rw-r--r--glance_store/exceptions.py5
-rw-r--r--glance_store/locale/en_GB/LC_MESSAGES/glance_store.po13
-rw-r--r--glance_store/multi_backend.py5
-rw-r--r--glance_store/tests/unit/common/test_fs_mount.py145
-rw-r--r--glance_store/tests/unit/test_cinder_store.py50
-rw-r--r--glance_store/tests/unit/test_filesystem_store.py87
-rw-r--r--glance_store/tests/unit/test_multistore_cinder.py84
-rw-r--r--glance_store/tests/unit/test_multistore_rbd.py6
-rw-r--r--glance_store/tests/unit/test_opts.py31
-rw-r--r--glance_store/tests/unit/test_rbd_store.py201
-rw-r--r--glance_store/tests/unit/test_test_utils.py38
-rw-r--r--glance_store/tests/utils.py42
-rw-r--r--lower-constraints.txt12
-rw-r--r--releasenotes/notes/block-creating-encrypted-nfs-volumes-d0ff370ab762042e.yaml6
-rw-r--r--releasenotes/notes/handle-sparse-image-a3ecfc4ae1c00d48.yaml15
-rw-r--r--releasenotes/notes/support-cinder-multiple-stores-6cc8489f8f4f8ff3.yaml10
-rw-r--r--releasenotes/notes/victoria-milestone-1-c1f9de5b90e8c326.yaml9
-rw-r--r--releasenotes/source/conf.py10
-rw-r--r--requirements.txt2
-rw-r--r--setup.cfg23
-rw-r--r--test-requirements.txt7
-rw-r--r--tools/install_venv.py2
-rw-r--r--tools/install_venv_common.py2
-rw-r--r--tox.ini2
34 files changed, 1410 insertions, 181 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 62c7ce0..045b6c0 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -51,6 +51,7 @@
parent: tox
abstract: true
description: Abstract job for glance_store vs. cinder
+ nodeset: ubuntu-focal
required-projects:
- name: openstack/os-brick
- name: openstack/python-cinderclient
@@ -60,6 +61,7 @@
parent: glance_store-tox-cinder-tips-base
description: |
glance_store py36 unit tests vs. cinder masters
+ nodeset: ubuntu-bionic
vars:
tox_envlist: py36
@@ -68,6 +70,7 @@
parent: tox
abstract: true
description: Abstract job for glance_store vs. keystone
+ nodeset: ubuntu-focal
required-projects:
- name: openstack/keystoneauth
- name: openstack/python-keystoneclient
@@ -77,6 +80,7 @@
parent: glance_store-tox-keystone-tips-base
description: |
glance_store py36 unit tests vs. keystone masters
+ nodeset: ubuntu-bionic
vars:
tox_envlist: py36
@@ -85,6 +89,7 @@
parent: tox
abstract: true
description: Abstract job for glance_store vs. oslo
+ nodeset: ubuntu-focal
required-projects:
- name: openstack/oslo.concurrency
- name: openstack/oslo.config
@@ -101,6 +106,7 @@
parent: glance_store-tox-oslo-tips-base
description: |
glance_store py36 unit tests vs. oslo masters
+ nodeset: ubuntu-bionic
vars:
tox_envlist: py36
@@ -109,6 +115,7 @@
parent: tox
abstract: true
description: Abstract job for glance_store vs. swift
+ nodeset: ubuntu-focal
required-projects:
- name: openstack/python-swiftclient
@@ -117,9 +124,22 @@
parent: glance_store-tox-swift-tips-base
description: |
glance_store py36 unit tests vs. swift masters
+ nodeset: ubuntu-bionic
vars:
tox_envlist: py36
+- job:
+ name: glance_store-src-ceph-tempest
+ parent: devstack-plugin-ceph-tempest-py3
+ description: |
+ Runs tempest tests with the latest glance_store and the Ceph backend
+ Former names for this job were:
+ * legacy-tempest-dsvm-full-ceph-plugin-src-glance_store
+ required-projects:
+ - opendev.org/openstack/glance_store
+ vars:
+ tempest_test_regex: (^tempest\.(api|scenario)|(^cinder_tempest_plugin))
+
- project:
templates:
- check-requirements
@@ -130,7 +150,7 @@
- release-notes-jobs-python3
check:
jobs:
- - legacy-tempest-dsvm-full-ceph-plugin-src-glance_store:
+ - glance_store-src-ceph-tempest:
voting: false
irrelevant-files: &tempest-irrelevant-files
- ^doc/.*$
@@ -140,7 +160,6 @@
- ^lower-constraints.txt$
- ^setup.cfg$
- ^tox.ini$
- - ^\.zuul\.yaml$
experimental:
jobs:
- glance_store-dsvm-functional-filesystem
diff --git a/babel.cfg b/babel.cfg
deleted file mode 100644
index efceab8..0000000
--- a/babel.cfg
+++ /dev/null
@@ -1 +0,0 @@
-[python: **.py]
diff --git a/doc/requirements.txt b/doc/requirements.txt
index f0dc045..bd5c965 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,7 +1,7 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
-openstackdocstheme>=1.18.1 # Apache-2.0
-reno>=2.5.0 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+openstackdocstheme>=2.2.1 # Apache-2.0
+reno>=3.1.0 # Apache-2.0
sphinxcontrib-apidoc>=0.2.0 # BSD
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 7c71c42..582d001 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -25,10 +25,10 @@ extensions = ['openstackdocstheme',
'sphinxcontrib.apidoc']
# openstackdocstheme options
-repository_name = 'openstack/glance_store'
-bug_project = 'glance-store'
-bug_tag = ''
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
+openstackdocs_repo_name = 'openstack/glance_store'
+openstackdocs_auto_name = False
+openstackdocs_bug_project = 'glance-store'
+openstackdocs_bug_tag = ''
# sphinxcontrib.apidoc options
apidoc_module_dir = '../../glance_store'
@@ -63,7 +63,7 @@ add_function_parentheses = True
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# -- Options for HTML output --------------------------------------------------
@@ -88,3 +88,11 @@ latex_documents = [
'%s Documentation' % project,
'OpenStack Foundation', 'manual'),
]
+
+# The autodoc module imports every module to check for import
+# errors. Since the fs_mount module is self initializing, it
+# requires configurations that aren't loaded till that time.
+# It would never happen in a real scenario as it is only imported
+# from cinder store after the config are loaded but to handle doc
+# failures, we mock it here.
+autodoc_mock_imports = ['glance_store.common.fs_mount'] \ No newline at end of file
diff --git a/etc/glance/rootwrap.d/glance_cinder_store.filters b/etc/glance/rootwrap.d/glance_cinder_store.filters
index 46c389b..9aaf8b4 100644
--- a/etc/glance/rootwrap.d/glance_cinder_store.filters
+++ b/etc/glance/rootwrap.d/glance_cinder_store.filters
@@ -10,3 +10,7 @@ disk_chown: RegExpFilter, chown, root, chown, \d+, /dev/(?!.*/\.\.).*
# This line ties the superuser privs with the config files, context name,
# and (implicitly) the actual python code invoked.
privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*
+
+chown: CommandFilter, chown, root
+mount: CommandFilter, mount, root
+umount: CommandFilter, umount, root \ No newline at end of file
diff --git a/glance_store/_drivers/cinder.py b/glance_store/_drivers/cinder.py
index 2a83e95..2754017 100644
--- a/glance_store/_drivers/cinder.py
+++ b/glance_store/_drivers/cinder.py
@@ -15,6 +15,7 @@
import contextlib
import errno
import hashlib
+import importlib
import logging
import math
import os
@@ -304,6 +305,11 @@ Possible values:
Related options:
* None
+NOTE: You cannot use an encrypted volume_type associated with an NFS backend.
+An encrypted volume stored on an NFS backend will raise an exception whenever
+glance_store tries to write or access image data stored in that volume.
+Consult your Cinder administrator to determine an appropriate volume_type.
+
"""),
cfg.BoolOpt('cinder_enforce_multipath',
default=False,
@@ -354,13 +360,16 @@ class StoreLocation(glance_store.location.StoreLocation):
self.volume_id = self.specs.get('volume_id')
def get_uri(self):
+ if self.backend_group:
+ return "cinder://%s/%s" % (self.backend_group,
+ self.volume_id)
return "cinder://%s" % self.volume_id
def parse_uri(self, uri):
self.validate_schemas(uri, valid_schemas=('cinder://',))
self.scheme = 'cinder'
- self.volume_id = uri[9:]
+ self.volume_id = uri.split('/')[-1]
if not utils.is_uuid_like(self.volume_id):
reason = _("URI contains invalid volume ID")
@@ -380,54 +389,126 @@ class Store(glance_store.driver.Store):
def __init__(self, *args, **kargs):
super(Store, self).__init__(*args, **kargs)
+ # We are importing it here to let the config options load
+ # before we use them in the fs_mount file
+ self.mount = importlib.import_module('glance_store.common.fs_mount')
+ self._set_url_prefix()
if self.backend_group:
- self._set_url_prefix()
+ self.store_conf = getattr(self.conf, self.backend_group)
+ else:
+ self.store_conf = self.conf.glance_store
- def get_root_helper(self):
+ def _set_url_prefix(self):
+ self._url_prefix = "cinder://"
if self.backend_group:
- rootwrap = getattr(CONF, self.backend_group).rootwrap_config
- else:
- rootwrap = CONF.glance_store.rootwrap_config
+ self._url_prefix = "cinder://%s" % self.backend_group
+
+ def configure_add(self):
+ """
+ Configure the Store to use the stored configuration options
+ Any store that needs special configuration should implement
+ this method. If the store was not able to successfully configure
+ itself, it should raise `exceptions.BadStoreConfiguration`
+ :raises: `exceptions.BadStoreConfiguration` if multiple stores are
+ defined and particular store wasn't able to configure
+ successfully
+ :raises: `exceptions.BackendException` if single store is defined and
+ it wasn't able to configure successfully
+ """
+ if self.backend_group:
+ cinder_volume_type = self.store_conf.cinder_volume_type
+ if cinder_volume_type:
+ # NOTE: `cinder_volume_type` is configured, check
+ # configured volume_type is available in cinder or not
+ cinder_client = self.get_cinderclient()
+ try:
+ # We don't even need the volume type object, as long
+ # as this returns clean, we know the name is good.
+ cinder_client.volume_types.find(name=cinder_volume_type)
+ # No need to worry NoUniqueMatch as volume type name is
+ # unique
+ except cinder_exception.NotFound:
+ reason = _("Invalid `cinder_volume_type %s`"
+ % cinder_volume_type)
+ if len(self.conf.enabled_backends) > 1:
+ LOG.error(reason)
+ raise exceptions.BadStoreConfiguration(
+ store_name=self.backend_group, reason=reason)
+ else:
+ LOG.critical(reason)
+ raise exceptions.BackendException(reason)
+
+ def is_image_associated_with_store(self, context, volume_id):
+ """
+ Updates legacy images URL to respective stores.
+ This method checks the volume type of the volume associated with the
+ image against the configured stores. It returns true if the
+ cinder_volume_type configured in the store matches with the volume
+ type of the image-volume. When cinder_volume_type is not configured
+ then the it checks it against default_volume_type set in cinder.
+ If above both conditions doesn't meet, it returns false.
+ """
+ try:
+ cinder_client = self.get_cinderclient(context=context,
+ legacy_update=True)
+ cinder_volume_type = self.store_conf.cinder_volume_type
+ volume = cinder_client.volumes.get(volume_id)
+ if cinder_volume_type and volume.volume_type == cinder_volume_type:
+ return True
+ elif not cinder_volume_type:
+ default_type = cinder_client.volume_types.default()
+ if volume.volume_type == default_type.name:
+ return True
+ except Exception:
+ # Glance calls this method to update legacy images URL
+ # If an exception occours due to image/volume is non-existent or
+ # any other reason, we return False (i.e. the image location URL
+ # won't be updated) and it is glance's responsibility to handle
+ # the case when the image failed to update
+ pass
+
+ return False
+ def get_root_helper(self):
+ rootwrap = self.store_conf.rootwrap_config
return 'sudo glance-rootwrap %s' % rootwrap
def is_user_overriden(self):
- if self.backend_group:
- store_conf = getattr(self.conf, self.backend_group)
- else:
- store_conf = self.conf.glance_store
-
- return all([store_conf.get('cinder_store_' + key)
+ return all([self.store_conf.get('cinder_store_' + key)
for key in ['user_name', 'password',
'project_name', 'auth_address']])
- def get_cinderclient(self, context=None):
- if self.backend_group:
- glance_store = getattr(self.conf, self.backend_group)
+ def get_cinderclient(self, context=None, legacy_update=False):
+ # NOTE: For legacy image update from single store to multiple
+ # stores we need to use admin context rather than user provided
+ # credentials
+ if legacy_update:
+ user_overriden = False
+ context = context.elevated()
else:
- glance_store = self.conf.glance_store
+ user_overriden = self.is_user_overriden()
- user_overriden = self.is_user_overriden()
if user_overriden:
- username = glance_store.cinder_store_user_name
- password = glance_store.cinder_store_password
- project = glance_store.cinder_store_project_name
- url = glance_store.cinder_store_auth_address
+ username = self.store_conf.cinder_store_user_name
+ password = self.store_conf.cinder_store_password
+ project = self.store_conf.cinder_store_project_name
+ url = self.store_conf.cinder_store_auth_address
else:
username = context.user
password = context.auth_token
project = context.tenant
- if glance_store.cinder_endpoint_template:
- url = glance_store.cinder_endpoint_template % context.to_dict()
+ if self.store_conf.cinder_endpoint_template:
+ template = self.store_conf.cinder_endpoint_template
+ url = template % context.to_dict()
else:
- info = glance_store.cinder_catalog_info
+ info = self.store_conf.cinder_catalog_info
service_type, service_name, interface = info.split(':')
try:
catalog = keystone_sc.ServiceCatalogV2(
context.service_catalog)
url = catalog.url_for(
- region_name=glance_store.cinder_os_region_name,
+ region_name=self.store_conf.cinder_os_region_name,
service_type=service_type,
service_name=service_name,
interface=interface)
@@ -438,10 +519,10 @@ class Store(glance_store.driver.Store):
c = cinderclient.Client(
username, password, project, auth_url=url,
- region_name=glance_store.cinder_os_region_name,
- insecure=glance_store.cinder_api_insecure,
- retries=glance_store.cinder_http_retries,
- cacert=glance_store.cinder_ca_certificates_file)
+ region_name=self.store_conf.cinder_os_region_name,
+ insecure=self.store_conf.cinder_api_insecure,
+ retries=self.store_conf.cinder_http_retries,
+ cacert=self.store_conf.cinder_ca_certificates_file)
LOG.debug(
'Cinderclient connection created for user %(user)s using URL: '
@@ -476,9 +557,6 @@ class Store(glance_store.driver.Store):
def get_schemes(self):
return ('cinder',)
- def _set_url_prefix(self):
- self._url_prefix = "cinder://"
-
def _check_context(self, context, require_tenant=False):
user_overriden = self.is_user_overriden()
if user_overriden and not require_tenant:
@@ -494,12 +572,7 @@ class Store(glance_store.driver.Store):
def _wait_volume_status(self, volume, status_transition, status_expected):
max_recheck_wait = 15
- if self.backend_group:
- timeout = getattr(
- self.conf, self.backend_group).cinder_state_transition_timeout
- else:
- timeout = self.conf.glance_store.cinder_state_transition_timeout
-
+ timeout = self.store_conf.cinder_state_transition_timeout
volume = volume.manager.get(volume.id)
tries = 0
elapsed = 0
@@ -525,6 +598,22 @@ class Store(glance_store.driver.Store):
raise exceptions.BackendException(msg)
return volume
+ def get_hash_str(self, base_str):
+ """Returns string that represents SHA256 hash of base_str (in hex format).
+
+ If base_str is a Unicode string, encode it to UTF-8.
+ """
+ if isinstance(base_str, str):
+ base_str = base_str.encode('utf-8')
+ return hashlib.sha256(base_str).hexdigest()
+
+ def _get_mount_path(self, share, mount_point_base):
+ """Returns the mount path prefix using the mount point base and share.
+
+ :returns: The mount path prefix.
+ """
+ return os.path.join(mount_point_base, self.get_hash_str(share))
+
@contextlib.contextmanager
def _open_cinder_volume(self, client, volume, mode):
attach_mode = 'rw' if mode == 'wb' else 'ro'
@@ -532,17 +621,9 @@ class Store(glance_store.driver.Store):
root_helper = self.get_root_helper()
priv_context.init(root_helper=shlex.split(root_helper))
host = socket.gethostname()
- if self.backend_group:
- use_multipath = getattr(
- self.conf, self.backend_group).cinder_use_multipath
- enforce_multipath = getattr(
- self.conf, self.backend_group).cinder_enforce_multipath
- mount_point_base = getattr(
- self.conf, self.backend_group).cinder_mount_point_base
- else:
- use_multipath = self.conf.glance_store.cinder_use_multipath
- enforce_multipath = self.conf.glance_store.cinder_enforce_multipath
- mount_point_base = self.conf.glance_store.cinder_mount_point_base
+ use_multipath = self.store_conf.cinder_use_multipath
+ enforce_multipath = self.store_conf.cinder_enforce_multipath
+ mount_point_base = self.store_conf.cinder_mount_point_base
properties = connector.get_connector_properties(
root_helper, host, use_multipath, enforce_multipath)
@@ -557,13 +638,36 @@ class Store(glance_store.driver.Store):
try:
connection_info = volume.initialize_connection(volume, properties)
- if connection_info['driver_volume_type'] == 'nfs':
- connection_info['mount_point_base'] = os.path.join(
- mount_point_base, 'nfs')
conn = connector.InitiatorConnector.factory(
connection_info['driver_volume_type'], root_helper,
conn=connection_info)
- device = conn.connect_volume(connection_info['data'])
+ if connection_info['driver_volume_type'] == 'nfs':
+ if volume.encrypted:
+ volume.unreserve(volume)
+ volume.delete()
+ msg = (_('Encrypted volume creation for cinder nfs is not '
+ 'supported from glance_store. Failed to create '
+ 'volume %(volume_id)s')
+ % {'volume_id': volume.id})
+ LOG.error(msg)
+ raise exceptions.BackendException(msg)
+
+ @utils.synchronized(connection_info['data']['export'])
+ def connect_volume_nfs():
+ data = connection_info['data']
+ export = data['export']
+ vol_name = data['name']
+ mountpoint = self._get_mount_path(
+ export,
+ os.path.join(mount_point_base, 'nfs'))
+ options = data['options']
+ self.mount.mount(
+ 'nfs', export, vol_name, mountpoint, host,
+ root_helper, options)
+ return {'path': os.path.join(mountpoint, vol_name)}
+ device = connect_volume_nfs()
+ else:
+ device = conn.connect_volume(connection_info['data'])
volume.attach(None, 'glance_store', attach_mode, host_name=host)
volume = self._wait_volume_status(volume, 'attaching', 'in-use')
if (connection_info['driver_volume_type'] == 'rbd' and
@@ -571,8 +675,7 @@ class Store(glance_store.driver.Store):
yield device['path']
else:
with self.temporary_chown(
- device['path'], backend=self.backend_group
- ), open(device['path'], mode) as f:
+ device['path']), open(device['path'], mode) as f:
yield f
except Exception:
LOG.exception(_LE('Exception while accessing to cinder volume '
@@ -586,7 +689,15 @@ class Store(glance_store.driver.Store):
if device:
try:
- conn.disconnect_volume(connection_info['data'], device)
+ if connection_info['driver_volume_type'] == 'nfs':
+ @utils.synchronized(connection_info['data']['export'])
+ def disconnect_volume_nfs():
+ path, vol_name = device['path'].rsplit('/', 1)
+ self.mount.umount(vol_name, path, host,
+ root_helper)
+ disconnect_volume_nfs()
+ else:
+ conn.disconnect_volume(connection_info['data'], device)
except Exception:
LOG.exception(_LE('Failed to disconnect volume '
'%(volume_id)s.'),
@@ -730,11 +841,7 @@ class Store(glance_store.driver.Store):
'image_size': str(image_size),
'image_owner': owner}
- if self.backend_group:
- volume_type = getattr(self.conf,
- self.backend_group).cinder_volume_type
- else:
- volume_type = self.conf.glance_store.cinder_volume_type
+ volume_type = self.store_conf.cinder_volume_type
LOG.debug('Creating a new volume: image_size=%d size_gb=%d type=%s',
image_size, size_gb, volume_type or 'None')
@@ -818,10 +925,13 @@ class Store(glance_store.driver.Store):
'checksum_hex': checksum_hex})
image_metadata = {}
+ location_url = 'cinder://%s' % volume.id
if self.backend_group:
image_metadata['store'] = u"%s" % self.backend_group
+ location_url = 'cinder://%s/%s' % (self.backend_group,
+ volume.id)
- return ('cinder://%s' % volume.id,
+ return (location_url,
bytes_written,
checksum_hex,
hash_hex,
diff --git a/glance_store/_drivers/filesystem.py b/glance_store/_drivers/filesystem.py
index 3691fd9..ff633f1 100644
--- a/glance_store/_drivers/filesystem.py
+++ b/glance_store/_drivers/filesystem.py
@@ -167,7 +167,29 @@ Possible Values:
Related options:
* None
-""")]
+"""),
+ cfg.BoolOpt('filesystem_thin_provisioning',
+ default=False,
+ help="""
+Enable or not thin provisioning in this backend.
+
+This configuration option enable the feature of not really write null byte
+sequences on the filesystem, the holes who can appear will automatically
+be interpreted by the filesystem as null bytes, and do not really consume
+your storage.
+Enabling this feature will also speed up image upload and save network trafic
+in addition to save space in the backend, as null bytes sequences are not
+sent over the network.
+
+Possible Values:
+ * True
+ * False
+
+Related options:
+ * None
+
+"""),
+]
MULTI_FILESYSTEM_METADATA_SCHEMA = {
"type": "array",
@@ -410,6 +432,8 @@ class Store(glance_store.driver.Store):
fstore_perm = store_conf.filesystem_store_file_perm
meta_file = store_conf.filesystem_store_metadata_file
+ self.thin_provisioning = store_conf.\
+ filesystem_thin_provisioning
self.chunk_size = store_conf.filesystem_store_chunk_size
self.READ_CHUNKSIZE = self.chunk_size
self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE
@@ -727,7 +751,11 @@ class Store(glance_store.driver.Store):
checksum.update(buf)
if verifier:
verifier.update(buf)
- f.write(buf)
+ if self.thin_provisioning and not any(buf):
+ f.truncate(bytes_written)
+ f.seek(0, os.SEEK_END)
+ else:
+ f.write(buf)
except IOError as e:
if e.errno != errno.EACCES:
self._delete_partial(filepath, image_id)
diff --git a/glance_store/_drivers/rbd.py b/glance_store/_drivers/rbd.py
index 0e3e487..7c71075 100644
--- a/glance_store/_drivers/rbd.py
+++ b/glance_store/_drivers/rbd.py
@@ -1,4 +1,5 @@
# Copyright 2010-2011 Josh Durgin
+# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,8 +16,6 @@
"""Storage backend for RBD
(RADOS (Reliable Autonomic Distributed Object Store) Block Device)"""
-from __future__ import absolute_import
-from __future__ import with_statement
import contextlib
import hashlib
@@ -32,7 +31,7 @@ from glance_store import capabilities
from glance_store.common import utils
from glance_store import driver
from glance_store import exceptions
-from glance_store.i18n import _, _LE, _LI
+from glance_store.i18n import _, _LE, _LI, _LW
from glance_store import location
try:
@@ -152,6 +151,26 @@ Related options:
* None
"""),
+ cfg.BoolOpt('rbd_thin_provisioning',
+ default=False,
+ help="""
+Enable or not thin provisioning in this backend.
+
+This configuration option enable the feature of not really write null byte
+sequences on the RBD backend, the holes who can appear will automatically
+be interpreted by Ceph as null bytes, and do not really consume your storage.
+Enabling this feature will also speed up image upload and save network trafic
+in addition to save space in the backend, as null bytes sequences are not
+sent over the network.
+
+Possible Values:
+ * True
+ * False
+
+Related options:
+ * None
+
+"""),
]
@@ -271,10 +290,16 @@ class Store(driver.Store):
try:
client.connect(timeout=self.connect_timeout)
- except rados.Error:
- msg = _LE("Error connecting to ceph cluster.")
- LOG.exception(msg)
- raise exceptions.BackendException()
+ except (rados.Error, rados.ObjectNotFound) as e:
+ if self.backend_group and len(self.conf.enabled_backends) > 1:
+ reason = _("Error in store configuration: %s") % e
+ LOG.debug(reason)
+ raise exceptions.BadStoreConfiguration(
+ store_name=self.backend_group, reason=reason)
+ else:
+ msg = _LE("Error connecting to ceph cluster.")
+ LOG.exception(msg)
+ raise exceptions.BackendException()
try:
yield client
finally:
@@ -297,13 +322,19 @@ class Store(driver.Store):
self.backend_group).rbd_store_ceph_conf
connect_timeout = getattr(
self.conf, self.backend_group).rados_connect_timeout
+ thin_provisioning = getattr(self.conf,
+ self.backend_group).\
+ rbd_thin_provisioning
else:
chunk = self.conf.glance_store.rbd_store_chunk_size
pool = self.conf.glance_store.rbd_store_pool
user = self.conf.glance_store.rbd_store_user
conf_file = self.conf.glance_store.rbd_store_ceph_conf
connect_timeout = self.conf.glance_store.rados_connect_timeout
+ thin_provisioning = \
+ self.conf.glance_store.rbd_thin_provisioning
+ self.thin_provisioning = thin_provisioning
self.chunk_size = chunk * units.Mi
self.READ_CHUNKSIZE = self.chunk_size
self.WRITE_CHUNKSIZE = self.READ_CHUNKSIZE
@@ -321,6 +352,8 @@ class Store(driver.Store):
reason=reason)
if self.backend_group:
self._set_url_prefix()
+ self.size = 0
+ self.resize_amount = self.WRITE_CHUNKSIZE
def _set_url_prefix(self):
fsid = None
@@ -464,6 +497,18 @@ class Store(driver.Store):
# Such exception is not dangerous for us so it will be just logged
LOG.debug("Snapshot %s is unprotected already" % snap_name)
+ def _resize_on_write(self, image, image_size, bytes_written, chunk_length):
+ """Handle the rbd resize when needed."""
+ if image_size != 0 or self.size >= bytes_written + chunk_length:
+ return self.size
+ new_size = self.size + self.resize_amount
+ LOG.debug("resizing image to %s KiB" % (new_size / units.Ki))
+ image.resize(new_size)
+ # Note(jokke): We double how much we grow the image each time
+ # up to 8gigs to avoid resizing for each write on bigger images
+ self.resize_amount = min(self.resize_amount * 2, 8 * units.Gi)
+ return new_size
+
@driver.back_compat_add
@capabilities.check
def add(self, image_id, image_file, image_size, hashing_algo, context=None,
@@ -510,9 +555,9 @@ class Store(driver.Store):
LOG.debug('creating image %s with order %d and size %d',
image_name, order, image_size)
if image_size == 0:
- LOG.warning(_("since image size is zero we will be doing "
- "resize-before-write for each chunk which "
- "will be considerably slower than normal"))
+ LOG.warning(_LW("Since image size is zero we will be "
+ "doing resize-before-write which will be "
+ "slower than normal"))
try:
loc = self._create_image(fsid, conn, ioctx, image_name,
@@ -528,24 +573,27 @@ class Store(driver.Store):
chunks = utils.chunkreadable(image_file,
self.WRITE_CHUNKSIZE)
for chunk in chunks:
- # If the image size provided is zero we need to do
- # a resize for the amount we are writing. This will
- # be slower so setting a higher chunk size may
- # speed things up a bit.
- if image_size == 0:
- chunk_length = len(chunk)
- length = offset + chunk_length
- bytes_written += chunk_length
- LOG.debug(_("resizing image to %s KiB") %
- (length / units.Ki))
- image.resize(length)
- LOG.debug(_("writing chunk at offset %s") %
- (offset))
- offset += image.write(chunk, offset)
+ # NOTE(jokke): If we don't know image size we need
+ # to resize it on write. The resize amount will
+ # ramp up to 8 gigs.
+ chunk_length = len(chunk)
+ self.size = self._resize_on_write(image,
+ image_size,
+ bytes_written,
+ chunk_length)
+ bytes_written += chunk_length
+ if not (self.thin_provisioning and not any(chunk)):
+ image.write(chunk, offset)
+ offset += chunk_length
os_hash_value.update(chunk)
checksum.update(chunk)
if verifier:
verifier.update(chunk)
+
+ # Lets trim the image in case we overshoot with resize
+ if image_size == 0:
+ image.resize(bytes_written)
+
if loc.snapshot:
image.create_snap(loc.snapshot)
image.protect_snap(loc.snapshot)
diff --git a/glance_store/common/fs_mount.py b/glance_store/common/fs_mount.py
new file mode 100644
index 0000000..67e4a7f
--- /dev/null
+++ b/glance_store/common/fs_mount.py
@@ -0,0 +1,366 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import collections
+import contextlib
+import logging
+import os
+import socket
+import threading
+
+from oslo_concurrency import processutils
+from oslo_config import cfg
+
+from glance_store import exceptions
+from glance_store.i18n import _LE, _LW
+
+
+LOG = logging.getLogger(__name__)
+
+HOST = socket.gethostname()
+CONF = cfg.CONF
+
+
+class HostMountStateManagerMeta(type):
+ _instance = {}
+
+ def __call__(cls, *args, **kwargs):
+ if cls not in cls._instance:
+ cls._instance[cls] = super(
+ HostMountStateManagerMeta, cls).__call__(*args, **kwargs)
+ return cls._instance[cls]
+
+
+class _HostMountStateManager(metaclass=HostMountStateManagerMeta):
+ """A global manager of filesystem mounts.
+
+ _HostMountStateManager manages a _HostMountState object for the current
+ glance node. Primarily it creates one on object initialization and returns
+ it via get_state().
+
+ _HostMountStateManager manages concurrency itself. Independent callers do
+ not need to consider interactions between multiple _HostMountStateManager
+ calls when designing their own locking.
+
+ """
+ # Reset state of global _HostMountStateManager
+ state = None
+ use_count = 0
+
+ # Guards both state and use_count
+ cond = threading.Condition()
+
+ def __init__(self, host):
+ """Initialise a new _HostMountState
+
+ We will block before creating a new state until all operations
+ using a previous state have completed.
+
+ :param host: host
+ """
+ # Wait until all operations using a previous state are
+ # complete before initialising a new one. Note that self.state is
+ # already None, set either by initialisation or by host_down. This
+ # means the current state will not be returned to any new callers,
+ # and use_count will eventually reach zero.
+ # We do this to avoid a race between _HostMountState initialisation
+ # and an on-going mount/unmount operation
+ self.host = host
+ while self.use_count != 0:
+ self.cond.wait()
+
+ # Another thread might have initialised state while we were
+ # waiting
+ if self.state is None:
+ LOG.debug('Initialising _HostMountState')
+ self.state = _HostMountState()
+ backends = []
+ enabled_backends = CONF.enabled_backends
+ if enabled_backends:
+ for backend in enabled_backends:
+ if enabled_backends[backend] == 'cinder':
+ backends.append(backend)
+ else:
+ backends.append('glance_store')
+
+ for backend in backends:
+ mountpoint = getattr(CONF, backend).cinder_mount_point_base
+ # This is currently designed for cinder nfs backend only.
+ # Later can be modified to work with other *fs backends.
+ mountpoint = os.path.join(mountpoint, 'nfs')
+ # There will probably be the same rootwrap file for all stores,
+ # generalizing this will be done in a later refactoring
+ rootwrap = getattr(CONF, backend).rootwrap_config
+ rootwrap = ('sudo glance-rootwrap %s' % rootwrap)
+ dirs = []
+ # fetch the directories in the mountpoint path
+ if os.path.isdir(mountpoint):
+ dirs = os.listdir(mountpoint)
+ else:
+ continue
+ if not dirs:
+ return
+ for dir in dirs:
+ # for every directory in the mountpath, we
+ # unmount it (if mounted) and remove it
+ dir = os.path.join(mountpoint, dir)
+ with self.get_state() as mount_state:
+ if os.path.exists(dir) and not os.path.ismount(dir):
+ try:
+ os.rmdir(dir)
+ except Exception as ex:
+ LOG.debug(
+ "Couldn't remove directory "
+ "%(mountpoint)s: %(reason)s",
+ {'mountpoint': mountpoint,
+ 'reason': ex})
+ else:
+ mount_state.umount(None, dir, HOST, rootwrap)
+
+ @contextlib.contextmanager
+ def get_state(self):
+ """Return the current mount state.
+
+ _HostMountStateManager will not permit a new state object to be
+ created while any previous state object is still in use.
+
+ :rtype: _HostMountState
+ """
+
+ # We hold the instance lock here so that if a _HostMountState is
+ # currently initialising we'll wait for it to complete rather than
+ # fail.
+ with self.cond:
+ state = self.state
+ if state is None:
+ LOG.error('Host not initialized')
+ raise exceptions.HostNotInitialized(host=self.host)
+ self.use_count += 1
+ try:
+ LOG.debug('Got _HostMountState')
+ yield state
+ finally:
+ with self.cond:
+ self.use_count -= 1
+ self.cond.notify_all()
+
+
+class _HostMountState(object):
+ """A data structure recording all managed mountpoints and the
+ attachments in use for each one. _HostMountState ensures that the glance
+ node only attempts to mount a single mountpoint in use by multiple
+ attachments once, and that it is not unmounted until it is no longer in use
+ by any attachments.
+
+ Callers should not create a _HostMountState directly, but should obtain
+ it via:
+
+ with mount.get_manager().get_state() as state:
+ state.mount(...)
+
+ _HostMountState manages concurrency itself. Independent callers do not need
+ to consider interactions between multiple _HostMountState calls when
+ designing their own locking.
+ """
+
+ class _MountPoint(object):
+ """A single mountpoint, and the set of attachments in use on it."""
+ def __init__(self):
+ # A guard for operations on this mountpoint
+ # N.B. Care is required using this lock, as it will be deleted
+ # if the containing _MountPoint is deleted.
+ self.lock = threading.Lock()
+
+ # The set of attachments on this mountpoint.
+ self.attachments = set()
+
+ def add_attachment(self, vol_name, host):
+ self.attachments.add((vol_name, host))
+
+ def remove_attachment(self, vol_name, host):
+ self.attachments.remove((vol_name, host))
+
+ def in_use(self):
+ return len(self.attachments) > 0
+
+ def __init__(self):
+ """Initialise _HostMountState"""
+
+ self.mountpoints = collections.defaultdict(self._MountPoint)
+
+ @contextlib.contextmanager
+ def _get_locked(self, mountpoint):
+ """Get a locked mountpoint object
+
+ :param mountpoint: The path of the mountpoint whose object we should
+ return.
+ :rtype: _HostMountState._MountPoint
+ """
+ while True:
+ mount = self.mountpoints[mountpoint]
+ with mount.lock:
+ if self.mountpoints[mountpoint] is mount:
+ yield mount
+ break
+
+ def mount(self, fstype, export, vol_name, mountpoint, host,
+ rootwrap_helper, options):
+ """Ensure a mountpoint is available for an attachment, mounting it
+ if necessary.
+
+ If this is the first attachment on this mountpoint, we will mount it
+ with:
+
+ mount -t <fstype> <options> <export> <mountpoint>
+
+ :param fstype: The filesystem type to be passed to mount command.
+ :param export: The type-specific identifier of the filesystem to be
+ mounted. e.g. for nfs 'host.example.com:/mountpoint'.
+ :param vol_name: The name of the volume on the remote filesystem.
+ :param mountpoint: The directory where the filesystem will be
+ mounted on the local compute host.
+ :param host: The host the volume will be attached to.
+ :param options: An arbitrary list of additional arguments to be
+ passed to the mount command immediate before export
+ and mountpoint.
+ """
+
+ LOG.debug('_HostMountState.mount(fstype=%(fstype)s, '
+ 'export=%(export)s, vol_name=%(vol_name)s, %(mountpoint)s, '
+ 'options=%(options)s)',
+ {'fstype': fstype, 'export': export, 'vol_name': vol_name,
+ 'mountpoint': mountpoint, 'options': options})
+ with self._get_locked(mountpoint) as mount:
+ if not os.path.ismount(mountpoint):
+ LOG.debug('Mounting %(mountpoint)s',
+ {'mountpoint': mountpoint})
+
+ os.makedirs(mountpoint)
+
+ mount_cmd = ['mount', '-t', fstype]
+ if options is not None:
+ mount_cmd.extend(options)
+ mount_cmd.extend([export, mountpoint])
+
+ try:
+ processutils.execute(*mount_cmd, run_as_root=True,
+ root_helper=rootwrap_helper)
+ except Exception:
+ # Check to see if mountpoint is mounted despite the error
+ # eg it was already mounted
+ if os.path.ismount(mountpoint):
+ # We're not going to raise the exception because we're
+ # in the desired state anyway. However, this is still
+ # unusual so we'll log it.
+ LOG.exception(_LE('Error mounting %(fstype)s export '
+ '%(export)s on %(mountpoint)s. '
+ 'Continuing because mountpount is '
+ 'mounted despite this.'),
+ {'fstype': fstype, 'export': export,
+ 'mountpoint': mountpoint})
+
+ else:
+ # If the mount failed there's no reason for us to keep
+ # a record of it. It will be created again if the
+ # caller retries.
+
+ # Delete while holding lock
+ del self.mountpoints[mountpoint]
+
+ raise
+
+ mount.add_attachment(vol_name, host)
+
+ LOG.debug('_HostMountState.mount() for %(mountpoint)s '
+ 'completed successfully',
+ {'mountpoint': mountpoint})
+
+ def umount(self, vol_name, mountpoint, host, rootwrap_helper):
+ """Mark an attachment as no longer in use, and unmount its mountpoint
+ if necessary.
+
+ :param vol_name: The name of the volume on the remote filesystem.
+ :param mountpoint: The directory where the filesystem is be
+ mounted on the local compute host.
+ :param host: The host the volume was attached to.
+ """
+ LOG.debug('_HostMountState.umount(vol_name=%(vol_name)s, '
+ 'mountpoint=%(mountpoint)s)',
+ {'vol_name': vol_name, 'mountpoint': mountpoint})
+ with self._get_locked(mountpoint) as mount:
+ try:
+ mount.remove_attachment(vol_name, host)
+ except KeyError:
+ LOG.warning(_LW("Request to remove attachment "
+ "(%(vol_name)s, %(host)s) from "
+ "%(mountpoint)s, but we don't think it's in "
+ "use."),
+ {'vol_name': vol_name, 'host': host,
+ 'mountpoint': mountpoint})
+
+ if not mount.in_use():
+ mounted = os.path.ismount(mountpoint)
+
+ if mounted:
+ mounted = self._real_umount(mountpoint, rootwrap_helper)
+
+ # Delete our record entirely if it's unmounted
+ if not mounted:
+ del self.mountpoints[mountpoint]
+
+ LOG.debug('_HostMountState.umount() for %(mountpoint)s '
+ 'completed successfully',
+ {'mountpoint': mountpoint})
+
+ def _real_umount(self, mountpoint, rootwrap_helper):
+ # Unmount and delete a mountpoint.
+ # Return mount state after umount (i.e. True means still mounted)
+ LOG.debug('Unmounting %(mountpoint)s', {'mountpoint': mountpoint})
+
+ try:
+ processutils.execute('umount', mountpoint, run_as_root=True,
+ attempts=3, delay_on_retry=True,
+ root_helper=rootwrap_helper)
+ except processutils.ProcessExecutionError as ex:
+ LOG.error(_LE("Couldn't unmount %(mountpoint)s: %(reason)s"),
+ {'mountpoint': mountpoint, 'reason': ex})
+
+ if not os.path.ismount(mountpoint):
+ try:
+ os.rmdir(mountpoint)
+ except Exception as ex:
+ LOG.error(_LE("Couldn't remove directory %(mountpoint)s: "
+ "%(reason)s"),
+ {'mountpoint': mountpoint,
+ 'reason': ex})
+ return False
+
+ return True
+
+
+__manager__ = _HostMountStateManager(HOST)
+
+
+def mount(fstype, export, vol_name, mountpoint, host, rootwrap_helper,
+ options=None):
+ """A convenience wrapper around _HostMountState.mount()"""
+
+ with __manager__.get_state() as mount_state:
+ mount_state.mount(fstype, export, vol_name, mountpoint, host,
+ rootwrap_helper, options)
+
+
+def umount(vol_name, mountpoint, host, rootwrap_helper):
+ """A convenience wrapper around _HostMountState.umount()"""
+
+ with __manager__.get_state() as mount_state:
+ mount_state.umount(vol_name, mountpoint, host, rootwrap_helper)
diff --git a/glance_store/common/utils.py b/glance_store/common/utils.py
index 49acccb..0ee40f7 100644
--- a/glance_store/common/utils.py
+++ b/glance_store/common/utils.py
@@ -21,6 +21,8 @@ System-level utilities and helper functions.
import logging
import uuid
+from oslo_concurrency import lockutils
+
try:
from eventlet import sleep
except ImportError:
@@ -31,6 +33,8 @@ from glance_store.i18n import _
LOG = logging.getLogger(__name__)
+synchronized = lockutils.synchronized_with_prefix('glance_store-')
+
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
diff --git a/glance_store/exceptions.py b/glance_store/exceptions.py
index a20440f..99bc1b0 100644
--- a/glance_store/exceptions.py
+++ b/glance_store/exceptions.py
@@ -181,3 +181,8 @@ class HasSnapshot(GlanceStoreException):
class InUseByStore(GlanceStoreException):
message = _("The image cannot be deleted because it is in use through "
"the backend store outside of Glance.")
+
+
+class HostNotInitialized(GlanceStoreException):
+ message = _("The glance cinder store host %(host)s which will used to "
+ "perform nfs mount/umount operations isn't initialized.")
diff --git a/glance_store/locale/en_GB/LC_MESSAGES/glance_store.po b/glance_store/locale/en_GB/LC_MESSAGES/glance_store.po
index 05c301d..6e185b1 100644
--- a/glance_store/locale/en_GB/LC_MESSAGES/glance_store.po
+++ b/glance_store/locale/en_GB/LC_MESSAGES/glance_store.po
@@ -2,15 +2,16 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2019. #zanata
+# Andi Chandler <andi@gowling.com>, 2020. #zanata
msgid ""
msgstr ""
"Project-Id-Version: glance_store VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2019-12-19 00:11+0000\n"
+"POT-Creation-Date: 2020-05-04 16:18+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2019-12-22 08:14+0000\n"
+"PO-Revision-Date: 2020-05-04 08:05+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -199,6 +200,14 @@ msgstr "Store for scheme %s not found"
msgid "The Store URI was malformed: %(uri)s"
msgstr "The Store URI was malformed: %(uri)s"
+#, python-format
+msgid ""
+"The glance cinder store host %(host)s which will used to perform nfs mount/"
+"umount operations isn't initialized."
+msgstr ""
+"The Glance Cinder store host %(host)s which will used to perform NFS mount/"
+"umount operations isn't initialised."
+
msgid "The image cannot be deleted because it has snapshot(s)."
msgstr "The image cannot be deleted because it has snapshot(s)."
diff --git a/glance_store/multi_backend.py b/glance_store/multi_backend.py
index 4223e28..cfd80c0 100644
--- a/glance_store/multi_backend.py
+++ b/glance_store/multi_backend.py
@@ -143,7 +143,10 @@ def register_store_opts(conf, reserved_stores=None):
cfg.IntOpt('filesystem_store_chunk_size',
default=64 * units.Ki,
min=1,
- help=FS_CONF_CHUNKSIZE_HELP.format(key))]
+ help=FS_CONF_CHUNKSIZE_HELP.format(key)),
+ cfg.BoolOpt('filesystem_thin_provisioning',
+ default=False,
+ help="""Not used""")]
LOG.debug("Registering options for reserved store: {}".format(key))
conf.register_opts(fs_conf_template, group=key)
diff --git a/glance_store/tests/unit/common/test_fs_mount.py b/glance_store/tests/unit/common/test_fs_mount.py
new file mode 100644
index 0000000..7a83521
--- /dev/null
+++ b/glance_store/tests/unit/common/test_fs_mount.py
@@ -0,0 +1,145 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+from unittest import mock
+
+import fixtures
+from oslo_concurrency import processutils
+from oslo_config import cfg
+from oslotest import base
+
+from glance_store import exceptions
+
+CONF = cfg.CONF
+
+
+class HostMountManagerTestCase(base.BaseTestCase):
+
+ class FakeHostMountState:
+ def __init__(self):
+ self.mountpoints = {mock.sentinel.mountpoint}
+
+ def setUp(self):
+ super(HostMountManagerTestCase, self).setUp()
+ CONF.register_opt(cfg.DictOpt('enabled_backends'))
+ CONF.set_override('enabled_backends', 'fake:file')
+ # Since this is mocked in other tests, we unmock it here
+ if 'glance_store.common.fs_mount' in sys.modules:
+ sys.modules.pop('glance_store.common.fs_mount')
+ # Since the _HostMountStateManager class instantiates on its
+ # import, this import is done here to register the enabled_backends
+ # config option before it is used during initialization
+ from glance_store.common import fs_mount as mount # noqa
+ self.__manager__ = mount.__manager__
+
+ def get_state(self):
+ with self.__manager__.get_state() as state:
+ return state
+
+ def test_get_state_host_not_initialized(self):
+ self.__manager__.state = None
+ self.assertRaises(exceptions.HostNotInitialized,
+ self.get_state)
+
+ def test_get_state(self):
+ self.__manager__.state = self.FakeHostMountState()
+ state = self.get_state()
+ self.assertEqual({mock.sentinel.mountpoint}, state.mountpoints)
+
+
+class HostMountStateTestCase(base.BaseTestCase):
+
+ def setUp(self):
+ super(HostMountStateTestCase, self).setUp()
+ CONF.register_opt(cfg.DictOpt('enabled_backends'))
+ CONF.set_override('enabled_backends', 'fake:file')
+ # Since this is mocked in other tests, we unmock it here
+ if 'glance_store.common.fs_mount' in sys.modules:
+ sys.modules.pop('glance_store.common.fs_mount')
+ # Since the _HostMountStateManager class instantiates on its
+ # import, this import is done here to register the enabled_backends
+ # config option before it is used during initialization
+ from glance_store.common import fs_mount as mount # noqa
+ self.mounted = set()
+ self.m = mount._HostMountState()
+
+ def fake_execute(cmd, *args, **kwargs):
+ if cmd == 'mount':
+ path = args[-1]
+ if path in self.mounted:
+ raise processutils.ProcessExecutionError('Already mounted')
+ self.mounted.add(path)
+ elif cmd == 'umount':
+ path = args[-1]
+ if path not in self.mounted:
+ raise processutils.ProcessExecutionError('Not mounted')
+ self.mounted.remove(path)
+
+ def fake_ismount(path):
+ return path in self.mounted
+
+ mock_execute = mock.MagicMock(side_effect=fake_execute)
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'oslo_concurrency.processutils.execute',
+ mock_execute))
+ self.useFixture(fixtures.MonkeyPatch('os.path.ismount', fake_ismount))
+
+ @staticmethod
+ def _expected_sentinel_mount_calls(mountpoint=mock.sentinel.mountpoint):
+ return [mock.call('mount', '-t', mock.sentinel.fstype,
+ mock.sentinel.option1, mock.sentinel.option2,
+ mock.sentinel.export, mountpoint,
+ root_helper=mock.sentinel.rootwrap_helper,
+ run_as_root=True)]
+
+ @staticmethod
+ def _expected_sentinel_umount_calls(mountpoint=mock.sentinel.mountpoint):
+ return [mock.call('umount', mountpoint, attempts=3,
+ delay_on_retry=True,
+ root_helper=mock.sentinel.rootwrap_helper,
+ run_as_root=True)]
+
+ def _sentinel_mount(self):
+ self.m.mount(mock.sentinel.fstype, mock.sentinel.export,
+ mock.sentinel.vol, mock.sentinel.mountpoint,
+ mock.sentinel.host, mock.sentinel.rootwrap_helper,
+ [mock.sentinel.option1, mock.sentinel.option2])
+
+ def _sentinel_umount(self):
+ self.m.umount(mock.sentinel.vol, mock.sentinel.mountpoint,
+ mock.sentinel.host, mock.sentinel.rootwrap_helper)
+
+ @mock.patch('os.makedirs')
+ def test_mount(self, mock_makedirs):
+ self._sentinel_mount()
+ mock_makedirs.assert_called_once()
+ processutils.execute.assert_has_calls(
+ self._expected_sentinel_mount_calls())
+
+ def test_unmount_without_mount(self):
+ self._sentinel_umount()
+ processutils.execute.assert_not_called()
+
+ @mock.patch('os.rmdir')
+ @mock.patch('os.makedirs')
+ def test_umount_with_mount(self, mock_makedirs, mock_rmdir):
+ self._sentinel_mount()
+ self._sentinel_umount()
+ mock_makedirs.assert_called_once()
+ mock_rmdir.assert_called_once()
+ processutils.execute.assert_has_calls(
+ self._expected_sentinel_mount_calls() +
+ self._expected_sentinel_umount_calls())
diff --git a/glance_store/tests/unit/test_cinder_store.py b/glance_store/tests/unit/test_cinder_store.py
index 896bcfe..4c4d0d6 100644
--- a/glance_store/tests/unit/test_cinder_store.py
+++ b/glance_store/tests/unit/test_cinder_store.py
@@ -21,6 +21,7 @@ from unittest import mock
import six
import socket
+import sys
import tempfile
import time
import uuid
@@ -30,12 +31,14 @@ from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_utils import units
-from glance_store._drivers import cinder
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
+sys.modules['glance_store.common.fs_mount'] = mock.Mock()
+from glance_store._drivers import cinder # noqa
+
class FakeObject(object):
def __init__(self, **kwargs):
@@ -148,7 +151,8 @@ class TestCinderStore(base.StoreBaseTest,
def _test_open_cinder_volume(self, open_mode, attach_mode, error,
multipath_supported=False,
- enforce_multipath=False):
+ enforce_multipath=False,
+ encrypted_nfs=False):
self.config(cinder_mount_point_base=None)
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
fake_volumes = FakeObject(get=lambda id: fake_volume,
@@ -190,19 +194,35 @@ class TestCinderStore(base.StoreBaseTest,
'get_connector_properties') as mock_conn:
if error:
self.assertRaises(error, do_open)
+ elif encrypted_nfs:
+ fake_volume.initialize_connection.return_value = {
+ 'driver_volume_type': 'nfs'
+ }
+ fake_volume.encrypted = True
+ try:
+ with self.store._open_cinder_volume(
+ fake_client, fake_volume, open_mode):
+ pass
+ except exceptions.BackendException:
+ self.assertEqual(1,
+ fake_volume.unreserve.call_count)
+ self.assertEqual(1,
+ fake_volume.delete.call_count)
else:
do_open()
- mock_conn.assert_called_once_with(
- root_helper, socket.gethostname(), multipath_supported,
- enforce_multipath)
- fake_connector.connect_volume.assert_called_once_with(mock.ANY)
- fake_connector.disconnect_volume.assert_called_once_with(
- mock.ANY, fake_devinfo)
- fake_volume.attach.assert_called_once_with(
- None, 'glance_store', attach_mode,
- host_name=socket.gethostname())
- fake_volumes.detach.assert_called_once_with(fake_volume)
+ if not encrypted_nfs:
+ mock_conn.assert_called_once_with(
+ root_helper, socket.gethostname(),
+ multipath_supported, enforce_multipath)
+ fake_connector.connect_volume.assert_called_once_with(
+ mock.ANY)
+ fake_connector.disconnect_volume.assert_called_once_with(
+ mock.ANY, fake_devinfo)
+ fake_volume.attach.assert_called_once_with(
+ None, 'glance_store', attach_mode,
+ host_name=socket.gethostname())
+ fake_volumes.detach.assert_called_once_with(fake_volume)
def test_open_cinder_volume_rw(self):
self._test_open_cinder_volume('wb', 'rw', None)
@@ -225,6 +245,9 @@ class TestCinderStore(base.StoreBaseTest,
multipath_supported=True,
enforce_multipath=True)
+ def test_open_cinder_volume_nfs_encrypted(self):
+ self._test_open_cinder_volume('rb', 'ro', None, encrypted_nfs=True)
+
def test_cinder_configure_add(self):
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context, None)
@@ -385,3 +408,6 @@ class TestCinderStore(base.StoreBaseTest,
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc, context=self.context)
fake_volume.delete.assert_called_once_with()
+
+ def test_set_url_prefix(self):
+ self.assertEqual('cinder://', self.store._url_prefix)
diff --git a/glance_store/tests/unit/test_filesystem_store.py b/glance_store/tests/unit/test_filesystem_store.py
index 6f2661e..bb98f41 100644
--- a/glance_store/tests/unit/test_filesystem_store.py
+++ b/glance_store/tests/unit/test_filesystem_store.py
@@ -143,8 +143,13 @@ class TestStore(base.StoreBaseTest,
self.store.get,
loc)
- def test_add(self):
+ def _do_test_add(self, enable_thin_provisoning):
"""Test that we can add an image via the filesystem backend."""
+ self.config(filesystem_store_chunk_size=units.Ki,
+ filesystem_thin_provisioning=enable_thin_provisoning,
+ group='glance_store')
+ self.store.configure()
+
filesystem.ChunkedFile.CHUNKSIZE = units.Ki
expected_image_id = str(uuid.uuid4())
expected_file_size = 5 * units.Ki # 5K
@@ -176,6 +181,86 @@ class TestStore(base.StoreBaseTest,
self.assertEqual(expected_file_contents, new_image_contents)
self.assertEqual(expected_file_size, new_image_file_size)
+ def test_thin_provisioning_is_disabled_by_default(self):
+ self.assertEqual(self.store.thin_provisioning, False)
+
+ def test_add_with_thick_provisioning(self):
+ self._do_test_add(enable_thin_provisoning=False)
+
+ def test_add_with_thin_provisioning(self):
+ self._do_test_add(enable_thin_provisoning=True)
+
+ def test_add_thick_provisioning_with_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes chunks is fully
+ written with a thick provisioning configuration.
+ """
+ chunk_size = units.Ki # 1K
+ content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False)
+
+ def test_add_thin_provisioning_with_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes chunks is sparsified
+ with a thin provisioning configuration.
+ """
+ chunk_size = units.Ki # 1K
+ content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 1, 2, True)
+
+ def test_add_thick_provisioning_without_holes_in_file(self):
+ """
+ Tests that a file which not contain null bytes chunks is fully
+ written with a thick provisioning configuration.
+ """
+ chunk_size = units.Ki # 1K
+ content = b"*" * 3 * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, False)
+
+ def test_add_thin_provisioning_without_holes_in_file(self):
+ """
+ Tests that a file which not contain null bytes chunks is fully
+ written with a thin provisioning configuration.
+ """
+ chunk_size = units.Ki # 1K
+ content = b"*" * 3 * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 0, 3, True)
+
+ def test_add_thick_provisioning_with_partial_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes not aligned with
+ chunk size is fully written with a thick provisioning configuration.
+ """
+ chunk_size = units.Ki # 1K
+ my_chunk = int(chunk_size * 1.5)
+ content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
+ self._do_test_thin_provisioning(content, 3 * my_chunk, 0, 5, False)
+
+ def test_add_thin_provisioning_with_partial_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes not aligned with
+ chunk size is sparsified with a thin provisioning configuration.
+ """
+ chunk_size = units.Ki # 1K
+ my_chunk = int(chunk_size * 1.5)
+ content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
+ self._do_test_thin_provisioning(content, 3 * my_chunk, 1, 4, True)
+
+ def _do_test_thin_provisioning(self, content, size, truncate, write, thin):
+ self.config(filesystem_store_chunk_size=units.Ki,
+ filesystem_thin_provisioning=thin,
+ group='glance_store')
+ self.store.configure()
+
+ image_file = six.BytesIO(content)
+ image_id = str(uuid.uuid4())
+ with mock.patch.object(builtins, 'open') as popen:
+ self.store.add(image_id, image_file, size, self.hash_algo)
+ write_count = popen.return_value.__enter__().write.call_count
+ truncate_count = popen.return_value.__enter__().truncate.call_count
+ self.assertEqual(write_count, write)
+ self.assertEqual(truncate_count, truncate)
+
def test_add_with_verifier(self):
"""Test that 'verifier.update' is called when verifier is provided."""
verifier = mock.MagicMock(name='mock_verifier')
diff --git a/glance_store/tests/unit/test_multistore_cinder.py b/glance_store/tests/unit/test_multistore_cinder.py
index 7f1f514..484fd4c 100644
--- a/glance_store/tests/unit/test_multistore_cinder.py
+++ b/glance_store/tests/unit/test_multistore_cinder.py
@@ -21,6 +21,7 @@ from unittest import mock
import six
import socket
+import sys
import tempfile
import time
import uuid
@@ -32,12 +33,14 @@ from oslo_config import cfg
from oslo_utils import units
import glance_store as store
-from glance_store._drivers import cinder
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities as test_cap
+sys.modules['glance_store.common.fs_mount'] = mock.Mock()
+from glance_store._drivers import cinder # noqa
+
class FakeObject(object):
def __init__(self, **kwargs):
@@ -89,9 +92,15 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
user='fake_user',
auth_token='fake_token',
tenant='fake_tenant')
+ self.fake_admin_context = mock.MagicMock()
+ self.fake_admin_context.elevated.return_value = FakeObject(
+ service_catalog=fake_sc,
+ user='admin_user',
+ auth_token='admin_token',
+ tenant='admin_project')
def test_location_url_prefix_is_set(self):
- self.assertEqual("cinder://", self.store.url_prefix)
+ self.assertEqual("cinder://cinder1", self.store.url_prefix)
def test_get_cinderclient(self):
cc = self.store.get_cinderclient(self.context)
@@ -107,6 +116,14 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
self.assertIsNone(cc.client.auth_token)
self.assertEqual('test_address', cc.client.management_url)
+ def test_get_cinderclient_legacy_update(self):
+ cc = self.store.get_cinderclient(self.fake_admin_context,
+ legacy_update=True)
+ self.assertEqual('admin_token', cc.client.auth_token)
+ self.assertEqual('admin_user', cc.client.user)
+ self.assertEqual('admin_project', cc.client.projectid)
+ self.assertEqual('http://foo/public_url', cc.client.management_url)
+
def test_temporary_chown(self):
class fake_stat(object):
st_uid = 1
@@ -244,7 +261,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
multipath_supported=True,
enforce_multipath=True)
- def test_cinder_configure_add(self):
+ def test_cinder_check_context(self):
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context, None)
@@ -254,6 +271,57 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
self.store._check_context(FakeObject(service_catalog='fake'))
+ def test_cinder_configure_add(self):
+ with mock.patch.object(self.store, 'get_cinderclient') as mocked_cc:
+ def raise_(ex):
+ raise ex
+ mocked_cc.return_value = FakeObject(volume_types=FakeObject(
+ find=lambda name: 'some_type' if name == 'some_type'
+ else raise_(cinder.cinder_exception.NotFound(code=404))))
+ self.config(cinder_volume_type='some_type',
+ group=self.store.backend_group)
+ # If volume type exists, no exception is raised
+ self.store.configure_add()
+ # setting cinder_volume_type to non-existent value will raise
+ # BadStoreConfiguration exception
+ self.config(cinder_volume_type='some_random_type',
+ group=self.store.backend_group)
+
+ self.assertRaises(exceptions.BadStoreConfiguration,
+ self.store.configure_add)
+ # when only 1 store is configured, BackendException is raised
+ self.config(enabled_backends={'cinder1': 'cinder'})
+ self.assertRaises(exceptions.BackendException,
+ self.store.configure_add)
+
+ def test_is_image_associated_with_store(self):
+ with mock.patch.object(self.store, 'get_cinderclient') as mocked_cc:
+ mocked_cc.return_value = FakeObject(volumes=FakeObject(
+ get=lambda volume_id: FakeObject(volume_type='some_type')),
+ volume_types=FakeObject(
+ default=lambda: FakeObject(name='some_type')))
+ # When cinder_volume_type is set and is same as volume's type
+ self.config(cinder_volume_type='some_type',
+ group=self.store.backend_group)
+ fake_vol_id = str(uuid.uuid4())
+ type_match = self.store.is_image_associated_with_store(
+ self.context, fake_vol_id)
+ self.assertTrue(type_match)
+ # When cinder_volume_type is not set and volume's type is same as
+ # set default volume type
+ self.config(cinder_volume_type=None,
+ group=self.store.backend_group)
+ type_match = self.store.is_image_associated_with_store(
+ self.context, fake_vol_id)
+ self.assertTrue(type_match)
+ # When cinder_volume_type is not set and volume's type does not
+ # match with default volume type
+ mocked_cc.return_value.volume_types = FakeObject(
+ default=lambda: {'name': 'random_type'})
+ type_match = self.store.is_image_associated_with_store(
+ self.context, fake_vol_id)
+ self.assertFalse(type_match)
+
def test_cinder_get(self):
expected_size = 5 * units.Ki
expected_file_contents = b"*" * expected_size
@@ -276,7 +344,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
side_effect=fake_open):
mock_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
- uri = "cinder://%s" % fake_volume_uuid
+ uri = "cinder://cinder1/%s" % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
@@ -303,7 +371,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
- uri = 'cinder://%s' % fake_volume_uuid
+ uri = 'cinder://cinder1/%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
@@ -322,7 +390,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
- uri = 'cinder://%s' % fake_volume_uuid
+ uri = 'cinder://cinder1/%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
@@ -336,7 +404,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
expected_file_contents = b"*" * expected_size
image_file = six.BytesIO(expected_file_contents)
expected_checksum = hashlib.md5(expected_file_contents).hexdigest()
- expected_location = 'cinder://%s' % fake_volume.id
+ expected_location = 'cinder://%s/%s' % (backend, fake_volume.id)
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume.manager.get.return_value = fake_volume
fake_volumes = FakeObject(create=mock.Mock(return_value=fake_volume))
@@ -407,7 +475,7 @@ class TestMultiCinderStore(base.MultiStoreBaseTest,
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
- uri = 'cinder://%s' % fake_volume_uuid
+ uri = 'cinder://cinder1/%s' % fake_volume_uuid
loc = location.get_location_from_uri_and_backend(uri,
"cinder1",
conf=self.conf)
diff --git a/glance_store/tests/unit/test_multistore_rbd.py b/glance_store/tests/unit/test_multistore_rbd.py
index 353ad9e..384fca0 100644
--- a/glance_store/tests/unit/test_multistore_rbd.py
+++ b/glance_store/tests/unit/test_multistore_rbd.py
@@ -36,6 +36,9 @@ class MockRados(object):
class Error(Exception):
pass
+ class ObjectNotFound(Exception):
+ pass
+
class ioctx(object):
def __init__(self, *args, **kwargs):
pass
@@ -443,11 +446,12 @@ class TestMultiStore(base.MultiStoreBaseTest,
@mock.patch.object(MockRados.Rados, 'connect', side_effect=MockRados.Error)
def test_rados_connect_error(self, _):
rbd_store.rados.Error = MockRados.Error
+ rbd_store.rados.ObjectNotFound = MockRados.ObjectNotFound
def test():
with self.store.get_connection('conffile', 'rados_id'):
pass
- self.assertRaises(exceptions.BackendException, test)
+ self.assertRaises(exceptions.BadStoreConfiguration, test)
def test_create_image_conf_features(self):
# Tests that we use non-0 features from ceph.conf and cast to int.
diff --git a/glance_store/tests/unit/test_opts.py b/glance_store/tests/unit/test_opts.py
index 5ec31a4..6f46d60 100644
--- a/glance_store/tests/unit/test_opts.py
+++ b/glance_store/tests/unit/test_opts.py
@@ -13,23 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-import pkg_resources
+import stevedore
from testtools import matchers
from glance_store import backend
from glance_store.tests import base
-def load_entry_point(entry_point, verify_requirements=False):
- """Load an entry-point without requiring dependencies."""
- resolve = getattr(entry_point, 'resolve', None)
- require = getattr(entry_point, 'require', None)
- if resolve is not None and require is not None:
- if verify_requirements:
- entry_point.require()
- return entry_point.resolve()
- else:
- return entry_point.load(require=verify_requirements)
+def on_load_failure_callback(*args, **kwargs):
+ raise
class OptsTestCase(base.StoreBaseTest):
@@ -53,11 +45,16 @@ class OptsTestCase(base.StoreBaseTest):
def _test_entry_point(self, namespace,
expected_opt_groups, expected_opt_names):
opt_list = None
- for ep in pkg_resources.iter_entry_points('oslo.config.opts'):
- if ep.name == namespace:
- list_fn = load_entry_point(ep)
- opt_list = list_fn()
- break
+ mgr = stevedore.NamedExtensionManager(
+ 'oslo.config.opts',
+ names=[namespace],
+ invoke_on_load=False,
+ on_load_failure_callback=on_load_failure_callback,
+ )
+ for ext in mgr:
+ list_fn = ext.plugin
+ opt_list = list_fn()
+ break
self.assertIsNotNone(opt_list)
@@ -92,12 +89,14 @@ class OptsTestCase(base.StoreBaseTest):
'filesystem_store_datadirs',
'filesystem_store_file_perm',
'filesystem_store_metadata_file',
+ 'filesystem_thin_provisioning',
'http_proxy_information',
'https_ca_certificates_file',
'rbd_store_ceph_conf',
'rbd_store_chunk_size',
'rbd_store_pool',
'rbd_store_user',
+ 'rbd_thin_provisioning',
'rados_connect_timeout',
'rootwrap_config',
's3_store_access_key',
diff --git a/glance_store/tests/unit/test_rbd_store.py b/glance_store/tests/unit/test_rbd_store.py
index 8d30855..c6a5f27 100644
--- a/glance_store/tests/unit/test_rbd_store.py
+++ b/glance_store/tests/unit/test_rbd_store.py
@@ -24,6 +24,7 @@ from glance_store import exceptions
from glance_store import location as g_location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
+from glance_store.tests import utils as test_utils
class TestException(Exception):
@@ -35,6 +36,9 @@ class MockRados(object):
class Error(Exception):
pass
+ class ObjectNotFound(Exception):
+ pass
+
class ioctx(object):
def __init__(self, *args, **kwargs):
pass
@@ -122,7 +126,7 @@ class MockRBD(object):
raise NotImplementedError()
def resize(self, *args, **kwargs):
- raise NotImplementedError()
+ pass
def discard(self, offset, length):
raise NotImplementedError()
@@ -165,6 +169,123 @@ class MockRBD(object):
RBD_FEATURE_LAYERING = 1
+class TestReSize(base.StoreBaseTest,
+ test_store_capabilities.TestStoreCapabilitiesChecking):
+
+ def setUp(self):
+ """Establish a clean test environment."""
+ super(TestReSize, self).setUp()
+
+ rbd_store.rados = MockRados
+ rbd_store.rbd = MockRBD
+
+ self.store = rbd_store.Store(self.conf)
+ self.store.configure()
+ self.store_specs = {'pool': 'fake_pool',
+ 'image': 'fake_image',
+ 'snapshot': 'fake_snapshot'}
+ self.location = rbd_store.StoreLocation(self.store_specs,
+ self.conf)
+ self.hash_algo = 'sha256'
+
+ def test_add_w_image_size_zero_less_resizes(self):
+ """Assert that correct size is returned even though 0 was provided."""
+ data_len = 57 * units.Mi
+ data_iter = test_utils.FakeData(data_len)
+ with mock.patch.object(rbd_store.rbd.Image, 'resize') as resize:
+ with mock.patch.object(rbd_store.rbd.Image, 'write') as write:
+ ret = self.store.add(
+ 'fake_image_id', data_iter, 0, self.hash_algo)
+
+ # We expect to trim at the end so +1
+ expected = 1
+ expected_calls = []
+ data_len_temp = data_len
+ resize_amount = self.store.WRITE_CHUNKSIZE
+ while data_len_temp > 0:
+ expected_calls.append(resize_amount + (data_len -
+ data_len_temp))
+ data_len_temp -= resize_amount
+ resize_amount *= 2
+ expected += 1
+ self.assertEqual(expected, resize.call_count)
+ resize.assert_has_calls([mock.call(call) for call in
+ expected_calls])
+ expected = ([self.store.WRITE_CHUNKSIZE for i in range(int(
+ data_len / self.store.WRITE_CHUNKSIZE))] +
+ [(data_len % self.store.WRITE_CHUNKSIZE)])
+ actual = ([len(args[0]) for args, kwargs in
+ write.call_args_list])
+ self.assertEqual(expected, actual)
+ self.assertEqual(data_len,
+ resize.call_args_list[-1][0][0])
+ self.assertEqual(data_len, ret[1])
+
+ def test_resize_on_write_ceiling(self):
+ image = mock.MagicMock()
+
+ # image, size, written, chunk
+
+ # Non-zero image size means no resize
+ ret = self.store._resize_on_write(image, 32, 16, 16)
+ self.assertEqual(0, ret)
+ image.resize.assert_not_called()
+
+ # Current size is smaller than we need
+ self.store.size = 8
+ ret = self.store._resize_on_write(image, 0, 16, 16)
+ self.assertEqual(8 + self.store.WRITE_CHUNKSIZE, ret)
+ self.assertEqual(self.store.WRITE_CHUNKSIZE * 2,
+ self.store.resize_amount)
+ image.resize.assert_called_once_with(ret)
+
+ # More reads under the limit do not require a resize
+ image.resize.reset_mock()
+ self.store.size = ret
+ ret = self.store._resize_on_write(image, 0, 64, 16)
+ self.assertEqual(8 + self.store.WRITE_CHUNKSIZE, ret)
+ image.resize.assert_not_called()
+
+ # Read past the limit triggers another resize
+ ret = self.store._resize_on_write(image, 0, ret + 1, 16)
+ self.assertEqual(8 + self.store.WRITE_CHUNKSIZE * 3, ret)
+ image.resize.assert_called_once_with(ret)
+ self.assertEqual(self.store.WRITE_CHUNKSIZE * 4,
+ self.store.resize_amount)
+
+ # Check that we do not resize past the 8G ceiling.
+
+ # Start with resize_amount at 4G, 1G read so far
+ image.resize.reset_mock()
+ self.store.resize_amount = 4 * units.Gi
+ self.store.size = 1 * units.Gi
+
+ # First resize happens and we get the 4G,
+ # resize_amount goes to limit of 8G
+ ret = self.store._resize_on_write(image, 0, 4097 * units.Mi, 16)
+ self.assertEqual(5 * units.Gi, ret)
+ self.assertEqual(8 * units.Gi, self.store.resize_amount)
+ self.store.size = ret
+
+ # Second resize happens and we get to 13G,
+ # resize amount stays at limit of 8G
+ ret = self.store._resize_on_write(image, 0, 6144 * units.Mi, 16)
+ self.assertEqual((5 + 8) * units.Gi, ret)
+ self.assertEqual(8 * units.Gi, self.store.resize_amount)
+ self.store.size = ret
+
+ # Third resize happens and we get to 21G,
+ # resize amount stays at limit of 8G
+ ret = self.store._resize_on_write(image, 0, 14336 * units.Mi, 16)
+ self.assertEqual((5 + 8 + 8) * units.Gi, ret)
+ self.assertEqual(8 * units.Gi, self.store.resize_amount)
+
+ image.resize.assert_has_calls([
+ mock.call(5 * units.Gi),
+ mock.call(13 * units.Gi),
+ mock.call(21 * units.Gi)])
+
+
class TestStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
@@ -190,6 +311,9 @@ class TestStore(base.StoreBaseTest,
self.data_iter = six.BytesIO(b'*' * self.data_len)
self.hash_algo = 'sha256'
+ def test_thin_provisioning_is_disabled_by_default(self):
+ self.assertEqual(self.store.thin_provisioning, False)
+
def test_add_w_image_size_zero(self):
"""Assert that correct size is returned even though 0 was provided."""
self.store.chunk_size = units.Ki
@@ -302,6 +426,80 @@ class TestStore(base.StoreBaseTest,
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
+ def test_add_thick_provisioning_with_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes chunks is fully
+ written to rbd backend in a thick provisioning configuration.
+ """
+ chunk_size = units.Mi
+ content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 3, False)
+
+ def test_add_thin_provisioning_with_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes chunks is sparsified
+ in rbd backend with a thin provisioning configuration.
+ """
+ chunk_size = units.Mi
+ content = b"*" * chunk_size + b"\x00" * chunk_size + b"*" * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 2, True)
+
+ def test_add_thick_provisioning_without_holes_in_file(self):
+ """
+ Tests that a file which not contain null bytes chunks is fully
+ written to rbd backend in a thick provisioning configuration.
+ """
+ chunk_size = units.Mi
+ content = b"*" * 3 * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 3, False)
+
+ def test_add_thin_provisioning_without_holes_in_file(self):
+ """
+ Tests that a file which not contain null bytes chunks is fully
+ written to rbd backend in a thin provisioning configuration.
+ """
+ chunk_size = units.Mi
+ content = b"*" * 3 * chunk_size
+ self._do_test_thin_provisioning(content, 3 * chunk_size, 3, True)
+
+ def test_add_thick_provisioning_with_partial_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes not aligned with
+ chunk size is fully written with a thick provisioning configuration.
+ """
+ chunk_size = units.Mi
+ my_chunk = int(chunk_size * 1.5)
+ content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
+ self._do_test_thin_provisioning(content, 3 * my_chunk, 5, False)
+
+ def test_add_thin_provisioning_with_partial_holes_in_file(self):
+ """
+ Tests that a file which contains null bytes not aligned with
+ chunk size is sparsified with a thin provisioning configuration.
+ """
+ chunk_size = units.Mi
+ my_chunk = int(chunk_size * 1.5)
+ content = b"*" * my_chunk + b"\x00" * my_chunk + b"*" * my_chunk
+ self._do_test_thin_provisioning(content, 3 * my_chunk, 4, True)
+
+ def _do_test_thin_provisioning(self, content, size, write, thin):
+ self.config(rbd_store_chunk_size=1,
+ rbd_thin_provisioning=thin)
+ self.store.configure()
+
+ image_id = 'fake_image_id'
+ image_file = six.BytesIO(content)
+ expected_checksum = hashlib.md5(content).hexdigest()
+ expected_multihash = hashlib.sha256(content).hexdigest()
+
+ with mock.patch.object(rbd_store.rbd.Image, 'write') as mock_write:
+ loc, size, checksum, multihash, _ = self.store.add(
+ image_id, image_file, size, self.hash_algo)
+ self.assertEqual(mock_write.call_count, write)
+
+ self.assertEqual(expected_checksum, checksum)
+ self.assertEqual(expected_multihash, multihash)
+
def test_delete(self):
def _fake_remove(*args, **kwargs):
self.called_commands_actual.append('remove')
@@ -451,6 +649,7 @@ class TestStore(base.StoreBaseTest,
@mock.patch.object(MockRados.Rados, 'connect', side_effect=MockRados.Error)
def test_rados_connect_error(self, _):
rbd_store.rados.Error = MockRados.Error
+ rbd_store.rados.ObjectNotFound = MockRados.ObjectNotFound
def test():
with self.store.get_connection('conffile', 'rados_id'):
diff --git a/glance_store/tests/unit/test_test_utils.py b/glance_store/tests/unit/test_test_utils.py
new file mode 100644
index 0000000..81495a2
--- /dev/null
+++ b/glance_store/tests/unit/test_test_utils.py
@@ -0,0 +1,38 @@
+# Copyright 2020 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from glance_store.tests import base
+from glance_store.tests import utils as test_utils
+
+
+class TestFakeData(base.StoreBaseTest):
+ def test_via_read(self):
+ fd = test_utils.FakeData(1024)
+ data = []
+ for i in range(0, 1025, 256):
+ chunk = fd.read(256)
+ data.append(chunk)
+ if not chunk:
+ break
+
+ self.assertEqual(5, len(data))
+ # Make sure we got a zero-length final read
+ self.assertEqual(b'', data[-1])
+ # Make sure we only got 1024 bytes
+ self.assertEqual(1024, len(b''.join(data)))
+
+ def test_via_iter(self):
+ data = b''.join(list(test_utils.FakeData(1024)))
+ self.assertEqual(1024, len(data))
diff --git a/glance_store/tests/utils.py b/glance_store/tests/utils.py
index 2f3a90f..5180bdd 100644
--- a/glance_store/tests/utils.py
+++ b/glance_store/tests/utils.py
@@ -16,6 +16,7 @@
import six
from six.moves import urllib
+from oslo_utils import units
import requests
@@ -73,3 +74,44 @@ def fake_response(status_code=200, headers=None, content=None, **kwargs):
r.headers = headers or {}
r.raw = FakeHTTPResponse(status_code, headers, content, kwargs)
return r
+
+
+class FakeData(object):
+ """Generate a bunch of data without storing it in memory.
+
+ This acts like a read-only file object which generates fake data
+ in chunks when read() is called or it is used as a generator. It
+ can generate an arbitrary amount of data without storing it in
+ memory.
+
+ :param length: The number of bytes to generate
+ :param chunk_size: The chunk size to return in iteration mode, or when
+ read() is called unbounded
+
+ """
+ def __init__(self, length, chunk_size=64 * units.Ki):
+ self._max = length
+ self._chunk_size = chunk_size
+ self._len = 0
+
+ def read(self, length=None):
+ if length is None:
+ length = self._chunk_size
+
+ length = min(length, self._max - self._len)
+
+ self._len += length
+ if length == 0:
+ return b''
+ else:
+ return b'0' * length
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ r = self.read()
+ if len(r) == 0:
+ raise StopIteration()
+ else:
+ return r
diff --git a/lower-constraints.txt b/lower-constraints.txt
index 7e1720c..67923e6 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -1,6 +1,5 @@
alabaster==0.7.10
appdirs==1.4.3
-Babel==2.5.3
boto3==1.9.199
certifi==2018.1.18
chardet==3.0.4
@@ -25,7 +24,7 @@ idna==2.6
imagesize==1.0.0
iso8601==0.1.12
Jinja2==2.10
-jsonschema==2.6.0
+jsonschema==3.2.0
keystoneauth1==3.4.0
linecache2==1.0.0
MarkupSafe==1.0
@@ -36,8 +35,7 @@ mox3==0.25.0
msgpack==0.5.6
netaddr==0.7.19
netifaces==0.10.6
-openstackdocstheme==1.18.1
-os-brick==2.2.0
+os-brick==2.6.0
os-client-config==1.29.0
oslo.concurrency==3.26.0
oslo.config==5.2.0
@@ -48,7 +46,6 @@ oslo.serialization==2.18.0
oslotest==3.2.0
oslo.utils==3.33.0
oslo.vmware==2.17.0
-os-testr==1.0.0
packaging==17.1
Parsley==1.3
pbr==3.1.1
@@ -56,14 +53,13 @@ prettytable==0.7.2
Pygments==2.2.0
pyparsing==2.2.0
pyperclip==1.6.0
-python-cinderclient==3.3.0
+python-cinderclient==4.1.0
python-keystoneclient==3.8.0
python-mimeparse==1.6.0
python-subunit==1.0.0
python-swiftclient==3.2.0
pytz==2018.3
PyYAML==3.12
-reno==2.5.0
requests==2.14.2
requestsexceptions==1.4.0
requests-mock==1.2.0
@@ -72,8 +68,6 @@ rfc3986==1.1.0
six==1.10.0
smmap2==2.0.3
snowballstemmer==1.2.1
-Sphinx==1.6.2
-sphinxcontrib-websupport==1.0.1
stestr==2.0.0
stevedore==1.20.0
testscenarios==0.4
diff --git a/releasenotes/notes/block-creating-encrypted-nfs-volumes-d0ff370ab762042e.yaml b/releasenotes/notes/block-creating-encrypted-nfs-volumes-d0ff370ab762042e.yaml
new file mode 100644
index 0000000..8459d9b
--- /dev/null
+++ b/releasenotes/notes/block-creating-encrypted-nfs-volumes-d0ff370ab762042e.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ `Bug #1884482 <https://bugs.launchpad.net/cinder/+bug/1884482>`_:
+ Blocked creation of images on encrypted nfs volumes when glance store
+ is cinder.
diff --git a/releasenotes/notes/handle-sparse-image-a3ecfc4ae1c00d48.yaml b/releasenotes/notes/handle-sparse-image-a3ecfc4ae1c00d48.yaml
new file mode 100644
index 0000000..6122051
--- /dev/null
+++ b/releasenotes/notes/handle-sparse-image-a3ecfc4ae1c00d48.yaml
@@ -0,0 +1,15 @@
+---
+features:
+ - |
+ Add new configuration option ``rbd_thin_provisioning`` and
+ ``filesystem_thin_provisioning`` to rbd and filesystem
+ store to enable or not sparse upload, default are False.
+
+ A sparse file means that we do not actually write null byte sequences
+ but only the data itself at a given offset, the "holes" which can
+ appear will automatically be interpreted by the storage backend as
+ null bytes, and do not really consume your storage.
+
+ Enabling this feature will also speed up image upload and save
+ network traffic in addition to save space in the backend, as null
+ bytes sequences are not sent over the network.
diff --git a/releasenotes/notes/support-cinder-multiple-stores-6cc8489f8f4f8ff3.yaml b/releasenotes/notes/support-cinder-multiple-stores-6cc8489f8f4f8ff3.yaml
new file mode 100644
index 0000000..24d2330
--- /dev/null
+++ b/releasenotes/notes/support-cinder-multiple-stores-6cc8489f8f4f8ff3.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Added support for cinder multiple stores. Operators can now configure
+ multiple cinder stores by configuring a unique cinder_volume_type for
+ each cinder store.
+upgrade:
+ - |
+ Legacy images will be moved to specific stores as per their current
+ volume's type and the location URL will be updated respectively.
diff --git a/releasenotes/notes/victoria-milestone-1-c1f9de5b90e8c326.yaml b/releasenotes/notes/victoria-milestone-1-c1f9de5b90e8c326.yaml
new file mode 100644
index 0000000..5993571
--- /dev/null
+++ b/releasenotes/notes/victoria-milestone-1-c1f9de5b90e8c326.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ * Bug 1875281_: API returns 503 if one of the store is mis-configured
+ * Bug 1870289_: Add lock per share for cinder nfs mount/umount
+
+ .. _1875281: https://bugs.launchpad.net/glance-store/+bug/1875281
+ .. _1870289: https://bugs.launchpad.net/glance-store/+bug/1870289
+
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index b91665c..b64031d 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -38,10 +38,10 @@ extensions = [
]
# openstackdocstheme options
-repository_name = 'openstack/glance_store'
-bug_project = 'glance-store'
-bug_tag = ''
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
+openstackdocs_repo_name = 'openstack/glance_store'
+openstackdocs_auto_name = False
+openstackdocs_bug_project = 'glance-store'
+openstackdocs_bug_tag = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -93,7 +93,7 @@ exclude_patterns = []
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
diff --git a/requirements.txt b/requirements.txt
index e217936..e537c3a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,7 +10,7 @@ stevedore>=1.20.0 # Apache-2.0
eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT
six>=1.10.0 # MIT
-jsonschema>=2.6.0 # MIT
+jsonschema>=3.2.0 # MIT
keystoneauth1>=3.4.0 # Apache-2.0
python-keystoneclient>=3.8.0 # Apache-2.0
requests>=2.14.2 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index d8c1777..a185c43 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -25,6 +25,11 @@ classifier =
[files]
packages =
glance_store
+data_files =
+ etc/glance =
+ etc/glance/rootwrap.conf
+ etc/glance/rootwrap.d =
+ etc/glance/rootwrap.d/glance_cinder_store.filters
[entry_points]
glance_store.drivers =
@@ -63,23 +68,9 @@ swift =
httplib2>=0.9.1 # MIT
python-swiftclient>=3.2.0 # Apache-2.0
cinder =
- python-cinderclient>=3.3.0 # Apache-2.0
- os-brick>=2.2.0 # Apache-2.0
+ python-cinderclient>=4.1.0 # Apache-2.0
+ os-brick>=2.6.0 # Apache-2.0
oslo.rootwrap>=5.8.0 # Apache-2.0
oslo.privsep>=1.23.0 # Apache-2.0
s3 =
boto3>=1.9.199 # Apache-2.0
-
-[compile_catalog]
-directory = glance_store/locale
-domain = glance_store
-
-[update_catalog]
-domain = glance_store
-output_dir = glance_store/locale
-input_file = glance_store/locale/glance_store.pot
-
-[extract_messages]
-keywords = _ gettext ngettext l_ lazy_gettext
-mapping_file = babel.cfg
-output_file = glance_store/locale/glance_store.pot
diff --git a/test-requirements.txt b/test-requirements.txt
index 60de638..4a2a0fa 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
# Metrics and style
-hacking>=3.0,<3.1.0 # Apache-2.0
+hacking>=3.0.1,<3.1.0 # Apache-2.0
# Documentation style
doc8>=0.6.0 # Apache-2.0
@@ -18,14 +18,13 @@ stestr>=2.0.0 # Apache-2.0
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
oslotest>=3.2.0 # Apache-2.0
-os-testr>=1.0.0 # Apache-2.0
# Dependencies for each of the optional stores
boto3>=1.9.199 # Apache-2.0
oslo.vmware>=2.17.0 # Apache-2.0
httplib2>=0.9.1 # MIT
python-swiftclient>=3.2.0 # Apache-2.0
-python-cinderclient>=3.3.0 # Apache-2.0
-os-brick>=2.2.0 # Apache-2.0
+python-cinderclient>=4.1.0 # Apache-2.0
+os-brick>=2.6.0 # Apache-2.0
oslo.rootwrap>=5.8.0 # Apache-2.0
oslo.privsep>=1.23.0 # Apache-2.0
diff --git a/tools/install_venv.py b/tools/install_venv.py
index e238215..acc0e6f 100644
--- a/tools/install_venv.py
+++ b/tools/install_venv.py
@@ -21,8 +21,6 @@
Installation script for glance_store's development virtualenv
"""
-from __future__ import print_function
-
import os
import sys
diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py
index 46822e3..d73649c 100644
--- a/tools/install_venv_common.py
+++ b/tools/install_venv_common.py
@@ -22,8 +22,6 @@ environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
-from __future__ import print_function
-
import optparse
import os
import subprocess
diff --git a/tox.ini b/tox.ini
index 8c05b46..8d9c2df 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,6 +1,6 @@
[tox]
minversion = 3.1.1
-envlist = py37,py36,pep8
+envlist = py38,py36,pep8
skipsdist = True
ignore_basepython_conflict = True