summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cinder/api/contrib/admin_actions.py24
-rw-r--r--cinder/backup/manager.py2
-rw-r--r--cinder/common/config.py2
-rw-r--r--cinder/policies/group_snapshot_actions.py2
-rw-r--r--cinder/tests/unit/api/v3/test_volumes.py17
-rw-r--r--cinder/tests/unit/volume/drivers/test_nfs.py18
-rw-r--r--cinder/tests/unit/volume/drivers/test_rbd.py64
-rw-r--r--cinder/tests/unit/volume/test_volume.py45
-rw-r--r--cinder/volume/api.py9
-rw-r--r--cinder/volume/drivers/pure.py3
-rw-r--r--cinder/volume/drivers/rbd.py39
-rw-r--r--cinder/volume/drivers/remotefs.py10
-rw-r--r--cinder/volume/flows/manager/create_volume.py2
-rw-r--r--lower-constraints.txt28
-rw-r--r--releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml6
-rw-r--r--releasenotes/notes/bug-1901241-361b1b361bfa5152.yaml8
-rw-r--r--releasenotes/notes/bug-1904440-clone-rekey-fd57a2b5f6224e0f.yaml8
-rw-r--r--releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml38
-rw-r--r--releasenotes/notes/fix-list-volume-filtering-3f2bf93ab9b98974.yaml5
-rw-r--r--releasenotes/notes/increase_glance_num_retries-66b455a0729c4535.yaml9
-rw-r--r--releasenotes/notes/reset-status-notification-update-4a80a8b5feb821ef.yaml13
-rw-r--r--requirements.txt10
-rw-r--r--test-requirements.txt12
-rw-r--r--tox.ini4
24 files changed, 327 insertions, 51 deletions
diff --git a/cinder/api/contrib/admin_actions.py b/cinder/api/contrib/admin_actions.py
index 02fef3d67..ba0da2d3a 100644
--- a/cinder/api/contrib/admin_actions.py
+++ b/cinder/api/contrib/admin_actions.py
@@ -31,6 +31,7 @@ from cinder.i18n import _
from cinder import objects
from cinder import rpc
from cinder import volume
+from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
@@ -63,6 +64,9 @@ class AdminController(wsgi.Controller):
def validate_update(self, req, body):
raise NotImplementedError()
+ def _notify_reset_status(self, context, id, message):
+ raise NotImplementedError()
+
def authorize(self, context, action_name, target_obj=None):
context.authorize(
'volume_extension:%(resource)s_admin_actions:%(action)s' %
@@ -97,10 +101,13 @@ class AdminController(wsgi.Controller):
LOG.debug(msg, {'resource': self.resource_name, 'id': id,
'update': update})
+ # calling notifier here for volumeStatusUpdate is deprecated.
+ # Will be replaced with _notify_reset_status()
notifier_info = dict(id=id, update=update)
notifier = rpc.get_notifier('volumeStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
+ self._notify_reset_status(context, id, 'reset_status.start')
# Not found exception will be handled at the wsgi level
self._update(context, id, update)
@@ -110,6 +117,7 @@ class AdminController(wsgi.Controller):
notifier.info(context, self.collection + '.reset_status.end',
notifier_info)
+ self._notify_reset_status(context, id, 'reset_status.end')
@wsgi.response(http_client.ACCEPTED)
@wsgi.action('os-force_delete')
@@ -127,6 +135,11 @@ class VolumeAdminController(AdminController):
collection = 'volumes'
+ def _notify_reset_status(self, context, id, message):
+ volume = objects.Volume.get_by_id(context, id)
+ volume_utils.notify_about_volume_usage(context, volume,
+ message)
+
def _update(self, *args, **kwargs):
context = args[0]
volume_id = args[1]
@@ -242,6 +255,11 @@ class SnapshotAdminController(AdminController):
collection = 'snapshots'
+ def _notify_reset_status(self, context, id, message):
+ snapshot = objects.Snapshot.get_by_id(context, id)
+ volume_utils.notify_about_snapshot_usage(context, snapshot,
+ message)
+
@validation.schema(admin_actions.reset_status_snapshot)
def validate_update(self, req, body):
status = body['os-reset_status']['status']
@@ -269,6 +287,11 @@ class BackupAdminController(AdminController):
collection = 'backups'
+ def _notify_reset_status(self, context, id, message):
+ backup = objects.Backup.get_by_id(context, id)
+ volume_utils.notify_about_backup_usage(context, backup,
+ message)
+
def _get(self, *args, **kwargs):
return self.backup_api.get(*args, **kwargs)
@@ -291,6 +314,7 @@ class BackupAdminController(AdminController):
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, self.collection + '.reset_status.start',
notifier_info)
+ self._notify_reset_status(context, id, 'reset_status.start')
# Not found exception will be handled at the wsgi level
self.backup_api.reset_status(context=context, backup_id=id,
diff --git a/cinder/backup/manager.py b/cinder/backup/manager.py
index a44cc19d8..6c6c44799 100644
--- a/cinder/backup/manager.py
+++ b/cinder/backup/manager.py
@@ -990,6 +990,8 @@ class BackupManager(manager.ThreadPoolManager):
notifier = rpc.get_notifier('backupStatusUpdate')
notifier.info(context, "backups.reset_status.end",
notifier_info)
+ volume_utils.notify_about_backup_usage(context, backup,
+ 'reset_status.end')
def check_support_to_force_delete(self, context):
"""Check if the backup driver supports force delete operation.
diff --git a/cinder/common/config.py b/cinder/common/config.py
index 19418a50a..e4304fa91 100644
--- a/cinder/common/config.py
+++ b/cinder/common/config.py
@@ -174,7 +174,7 @@ image_opts = [
'is not specified it defaults to http.'),
cfg.IntOpt('glance_num_retries',
min=0,
- default=0,
+ default=3,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
diff --git a/cinder/policies/group_snapshot_actions.py b/cinder/policies/group_snapshot_actions.py
index 6a766d602..e74e0b173 100644
--- a/cinder/policies/group_snapshot_actions.py
+++ b/cinder/policies/group_snapshot_actions.py
@@ -24,7 +24,7 @@ RESET_STATUS = 'group:reset_group_snapshot_status'
group_snapshot_actions_policies = [
policy.DocumentedRuleDefault(
name=RESET_STATUS,
- check_str=base.RULE_ADMIN_OR_OWNER,
+ check_str=base.RULE_ADMIN_API,
description="Reset status of group snapshot.",
operations=[
{
diff --git a/cinder/tests/unit/api/v3/test_volumes.py b/cinder/tests/unit/api/v3/test_volumes.py
index 46ddae924..fa4d415b3 100644
--- a/cinder/tests/unit/api/v3/test_volumes.py
+++ b/cinder/tests/unit/api/v3/test_volumes.py
@@ -281,6 +281,23 @@ class VolumeApiTest(test.TestCase):
else:
self.assertNotIn('count', res_dict)
+ def test_list_volume_with_multiple_filters(self):
+ metadata = {'key_X': 'value_X'}
+ self._create_multiple_volumes_with_different_project()
+ test_utils.create_volume(self.ctxt, metadata=metadata)
+
+ self.mock_object(ViewBuilder, '_get_volume_type',
+ v2_fakes.fake_volume_type_name_get)
+ # Request with 'all_tenants' and 'metadata'
+ req = fakes.HTTPRequest.blank(
+ "/v3/volumes/detail?all_tenants=1"
+ "&metadata=%7B%27key_X%27%3A+%27value_X%27%7D")
+ ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID, False)
+ req.environ['cinder.context'] = ctxt
+ res_dict = self.controller._get_volumes(req, is_detail=True)
+ self.assertEqual(1, len(res_dict['volumes']))
+ self.assertEqual(metadata, res_dict['volumes'][0]['metadata'])
+
def test_volume_index_filter_by_group_id_in_unsupport_version(self):
self._create_volume_with_group()
req = fakes.HTTPRequest.blank(("/v3/volumes?group_id=%s") %
diff --git a/cinder/tests/unit/volume/drivers/test_nfs.py b/cinder/tests/unit/volume/drivers/test_nfs.py
index b4ac4bfd8..1b2acb83d 100644
--- a/cinder/tests/unit/volume/drivers/test_nfs.py
+++ b/cinder/tests/unit/volume/drivers/test_nfs.py
@@ -1224,12 +1224,13 @@ class NfsDriverTestCase(test.TestCase):
run_as_root=True)
mock_permission.assert_called_once_with(dest_vol_path)
- @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3],
- [NFS_CONFIG2, QEMU_IMG_INFO_OUT4],
- [NFS_CONFIG3, QEMU_IMG_INFO_OUT3],
- [NFS_CONFIG4, QEMU_IMG_INFO_OUT4])
+ @ddt.data([NFS_CONFIG1, QEMU_IMG_INFO_OUT3, 'available'],
+ [NFS_CONFIG2, QEMU_IMG_INFO_OUT4, 'backing-up'],
+ [NFS_CONFIG3, QEMU_IMG_INFO_OUT3, 'available'],
+ [NFS_CONFIG4, QEMU_IMG_INFO_OUT4, 'backing-up'])
@ddt.unpack
- def test_create_volume_from_snapshot(self, nfs_conf, qemu_img_info):
+ def test_create_volume_from_snapshot(self, nfs_conf, qemu_img_info,
+ snap_status):
self._set_driver(extra_confs=nfs_conf)
drv = self._driver
@@ -1246,7 +1247,7 @@ class NfsDriverTestCase(test.TestCase):
# Fake snapshot based in the previous created volume
snap_file = src_volume.name + '.' + fake_snap.id
fake_snap.volume = src_volume
- fake_snap.status = 'available'
+ fake_snap.status = snap_status
fake_snap.size = 10
# New fake volume where the snap will be copied
@@ -1289,7 +1290,9 @@ class NfsDriverTestCase(test.TestCase):
mock_ensure.assert_called_once()
mock_find_share.assert_called_once_with(new_volume)
- def test_create_volume_from_snapshot_status_not_available(self):
+ @ddt.data('error', 'creating', 'deleting', 'deleted', 'updating',
+ 'error_deleting', 'unmanaging', 'restoring')
+ def test_create_volume_from_snapshot_invalid_status(self, snap_status):
"""Expect an error when the snapshot's status is not 'available'."""
self._set_driver()
drv = self._driver
@@ -1298,6 +1301,7 @@ class NfsDriverTestCase(test.TestCase):
fake_snap = fake_snapshot.fake_snapshot_obj(self.context)
fake_snap.volume = src_volume
+ fake_snap.status = snap_status
new_volume = self._simple_volume()
new_volume['size'] = fake_snap['volume_size']
diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py
index e1ba497b2..d20c95a7e 100644
--- a/cinder/tests/unit/volume/drivers/test_rbd.py
+++ b/cinder/tests/unit/volume/drivers/test_rbd.py
@@ -887,6 +887,47 @@ class RBDTestCase(test.TestCase):
@common_mocks
@mock.patch('cinder.objects.Volume.get_by_id')
+ @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock())
+ def test_log_create_vol_from_snap_w_v2_clone_api(self, volume_get_by_id):
+ volume_get_by_id.return_value = self.volume_a
+
+ self.mock_proxy().__enter__().volume.op_features.return_value = 1
+ self.mock_rbd.RBD_OPERATION_FEATURE_CLONE_PARENT = 1
+
+ snapshot = self.snapshot
+ self.cfg.rbd_flatten_volume_from_snapshot = False
+
+ with mock.patch.object(driver, 'LOG') as \
+ mock_log:
+
+ self.driver.create_volume_from_snapshot(self.volume_a, snapshot)
+
+ mock_log.info.assert_called_once()
+ self.assertTrue(self.driver._clone_v2_api_checked)
+
+ @common_mocks
+ @mock.patch('cinder.objects.Volume.get_by_id')
+ @mock.patch.object(driver.RBDDriver, '_resize', mock.Mock())
+ def test_log_create_vol_from_snap_without_v2_clone_api(self,
+ volume_get_by_id):
+ volume_get_by_id.return_value = self.volume_a
+
+ self.mock_proxy().__enter__().volume.op_features.return_value = 0
+ self.mock_rbd.RBD_OPERATION_FEATURE_CLONE_PARENT = 1
+
+ snapshot = self.snapshot
+ self.cfg.rbd_flatten_volume_from_snapshot = False
+
+ with mock.patch.object(driver, 'LOG') as \
+ mock_log:
+
+ self.driver.create_volume_from_snapshot(self.volume_a, snapshot)
+
+ mock_log.warning.assert_called_once()
+ self.assertTrue(self.driver._clone_v2_api_checked)
+
+ @common_mocks
+ @mock.patch('cinder.objects.Volume.get_by_id')
def test_delete_snapshot(self, volume_get_by_id):
volume_get_by_id.return_value = self.volume_a
proxy = self.mock_proxy.return_value
@@ -1308,17 +1349,29 @@ class RBDTestCase(test.TestCase):
self.driver._is_cloneable(location, {'disk_format': f}))
self.assertTrue(mock_get_fsid.called)
- def _copy_image(self):
+ def _copy_image(self, volume_busy=False):
with mock.patch.object(tempfile, 'NamedTemporaryFile'):
with mock.patch.object(os.path, 'exists') as mock_exists:
mock_exists.return_value = True
with mock.patch.object(image_utils, 'fetch_to_raw'):
- with mock.patch.object(self.driver, 'delete_volume'):
+ with mock.patch.object(self.driver, 'delete_volume') \
+ as mock_dv:
with mock.patch.object(self.driver, '_resize'):
mock_image_service = mock.MagicMock()
args = [None, self.volume_a,
mock_image_service, None]
- self.driver.copy_image_to_volume(*args)
+ if volume_busy:
+ mock_dv.side_effect = (
+ exception.VolumeIsBusy("doh"))
+ self.assertRaises(
+ exception.VolumeIsBusy,
+ self.driver.copy_image_to_volume,
+ *args)
+ self.assertEqual(
+ self.cfg.rados_connection_retries,
+ mock_dv.call_count)
+ else:
+ self.driver.copy_image_to_volume(*args)
@mock.patch('cinder.volume.drivers.rbd.fileutils.delete_if_exists')
@mock.patch('cinder.volume.volume_utils.check_encryption_provider',
@@ -1368,6 +1421,11 @@ class RBDTestCase(test.TestCase):
self.cfg.image_conversion_dir = '/var/run/cinder/tmp'
self._copy_image_encrypted()
+ @common_mocks
+ def test_copy_image_busy_volume(self):
+ self.cfg.image_conversion_dir = '/var/run/cinder/tmp'
+ self._copy_image(volume_busy=True)
+
@ddt.data(True, False)
@common_mocks
@mock.patch('cinder.volume.drivers.rbd.RBDDriver._get_usage_info')
diff --git a/cinder/tests/unit/volume/test_volume.py b/cinder/tests/unit/volume/test_volume.py
index e1c741935..3f032cf36 100644
--- a/cinder/tests/unit/volume/test_volume.py
+++ b/cinder/tests/unit/volume/test_volume.py
@@ -20,6 +20,7 @@ import ddt
import time
import uuid
+import castellan
from castellan.common import exception as castellan_exception
from castellan import key_manager
import enum
@@ -89,6 +90,16 @@ def create_snapshot(volume_id, size=1, metadata=None, ctxt=None,
return snap
+class KeyObject(object):
+ def get_encoded(arg):
+ return "asdf".encode('utf-8')
+
+
+class KeyObject2(object):
+ def get_encoded(arg):
+ return "qwert".encode('utf-8')
+
+
@ddt.ddt
class VolumeTestCase(base.BaseVolumeTestCase):
@@ -1713,6 +1724,40 @@ class VolumeTestCase(base.BaseVolumeTestCase):
mock_at.assert_called()
mock_det.assert_called()
+ @mock.patch('cinder.db.sqlalchemy.api.volume_encryption_metadata_get')
+ def test_setup_encryption_keys(self, mock_enc_metadata_get):
+ key_mgr = fake_keymgr.fake_api()
+ self.mock_object(castellan.key_manager, 'API', return_value=key_mgr)
+ key_id = key_mgr.store(self.context, KeyObject())
+ key2_id = key_mgr.store(self.context, KeyObject2())
+
+ params = {'status': 'creating',
+ 'size': 1,
+ 'host': CONF.host,
+ 'encryption_key_id': key_id}
+ vol = tests_utils.create_volume(self.context, **params)
+
+ self.volume.create_volume(self.context, vol)
+ db.volume_update(self.context,
+ vol['id'],
+ {'encryption_key_id': key_id})
+
+ mock_enc_metadata_get.return_value = {'cipher': 'aes-xts-plain64',
+ 'key_size': 256,
+ 'provider': 'luks'}
+ ctxt = context.get_admin_context()
+
+ enc_info = {'encryption_key_id': key_id}
+ with mock.patch('cinder.volume.volume_utils.create_encryption_key',
+ return_value=key2_id):
+ r = cinder.volume.flows.manager.create_volume.\
+ CreateVolumeFromSpecTask._setup_encryption_keys(ctxt,
+ vol,
+ enc_info)
+ (source_pass, new_pass, new_key_id) = r
+ self.assertNotEqual(source_pass, new_pass)
+ self.assertEqual(new_key_id, key2_id)
+
@mock.patch.object(key_manager, 'API', fake_keymgr.fake_api)
def test_create_volume_from_snapshot_with_encryption(self):
"""Test volume can be created from a snapshot of an encrypted volume"""
diff --git a/cinder/volume/api.py b/cinder/volume/api.py
index d2aa98be7..5dcfbc4d7 100644
--- a/cinder/volume/api.py
+++ b/cinder/volume/api.py
@@ -2020,8 +2020,8 @@ class API(base.Base):
# To translate any true/false equivalent to True/False
# which is only acceptable format in database queries.
-
- for key, val in filters.items():
+ temp_dict = filters.copy()
+ for key, val in temp_dict.items():
try:
if key in booleans:
filters[key] = self._check_boolean_filter_value(
@@ -2034,6 +2034,11 @@ class API(base.Base):
# the filter becomes different from the user input.
continue
else:
+ # this is required as ast.literal_eval(<int>/<float>)
+ # raises exception. Eg: ast.literal_eval(5) generates
+ # ValueError: malformed node or string: 5
+ if not isinstance(val, str):
+ val = str(val)
filters[key] = ast.literal_eval(val)
except (ValueError, SyntaxError):
LOG.debug('Could not evaluate value %s, assuming string', val)
diff --git a/cinder/volume/drivers/pure.py b/cinder/volume/drivers/pure.py
index f09efe9e6..fdce05e9f 100644
--- a/cinder/volume/drivers/pure.py
+++ b/cinder/volume/drivers/pure.py
@@ -62,8 +62,9 @@ PURE_OPTS = [
"this calculated value will override the "
"max_over_subscription_ratio config option."),
cfg.StrOpt("pure_host_personality",
+ default=None,
choices=['aix', 'esxi', 'hitachi-vsp', 'hpux',
- 'oracle-vm-server', 'solaris', 'vms'],
+ 'oracle-vm-server', 'solaris', 'vms', None],
help="Determines how the Purity system tunes the protocol used "
"between the array and the initiator."),
# These are used as default settings. In future these can be overridden
diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py
index f06162d68..2fa2e011d 100644
--- a/cinder/volume/drivers/rbd.py
+++ b/cinder/volume/drivers/rbd.py
@@ -82,7 +82,9 @@ RBD_OPTS = [
default=5,
help='Maximum number of nested volume clones that are '
'taken before a flatten occurs. Set to 0 to disable '
- 'cloning.'),
+ 'cloning. Note: lowering this value will not affect '
+ 'existing volumes whose clone depth exceeds the new '
+ 'value.'),
cfg.IntOpt('rbd_store_chunk_size', default=4,
help='Volumes will be chunked into objects of this size '
'(in megabytes).'),
@@ -252,6 +254,7 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
self._is_replication_enabled = False
self._replication_targets = []
self._target_names = []
+ self._clone_v2_api_checked = False
if self.rbd is not None:
self.RBD_FEATURE_LAYERING = self.rbd.RBD_FEATURE_LAYERING
@@ -271,6 +274,22 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
def get_driver_options():
return RBD_OPTS
+ def _show_msg_check_clone_v2_api(self, volume_name):
+ if not self._clone_v2_api_checked:
+ self._clone_v2_api_checked = True
+ with RBDVolumeProxy(self, volume_name) as volume:
+ try:
+ if (volume.volume.op_features() &
+ self.rbd.RBD_OPERATION_FEATURE_CLONE_PARENT):
+ LOG.info('Using v2 Clone API')
+ return
+ except AttributeError:
+ pass
+ LOG.warning('Not using v2 clone API, please upgrade to'
+ ' mimic+ and set the OSD minimum client'
+ ' compat version to mimic for better'
+ ' performance, fewer deletion issues')
+
def _get_target_config(self, target_id):
"""Get a replication target from known replication targets."""
for target in self._replication_targets:
@@ -636,12 +655,6 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
if not parent:
return depth
- # If clone depth was reached, flatten should have occurred so if it has
- # been exceeded then something has gone wrong.
- if depth > self.configuration.rbd_max_clone_depth:
- raise Exception(_("clone depth exceeds limit of %s") %
- (self.configuration.rbd_max_clone_depth))
-
return self._get_clone_depth(client, parent, depth + 1)
def _extend_if_required(self, volume, src_vref):
@@ -711,7 +724,7 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
depth = self._get_clone_depth(client, src_name)
# If dest volume is a clone and rbd_max_clone_depth reached,
# flatten the dest after cloning. Zero rbd_max_clone_depth means
- # infinite is allowed.
+ # volumes are always flattened.
if depth >= self.configuration.rbd_max_clone_depth:
LOG.info("maximum clone depth (%d) has been reached - "
"flattening dest volume",
@@ -1012,6 +1025,8 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
self._flatten(self.configuration.rbd_pool, volume.name)
if int(volume.size):
self._resize(volume)
+
+ self._show_msg_check_clone_v2_api(snapshot.volume_name)
return volume_update
def _delete_backup_snaps(self, rbd_image):
@@ -1575,7 +1590,13 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
if encrypted:
self._encrypt_image(context, volume, tmp_dir, tmp.name)
- self.delete_volume(volume)
+ @utils.retry(exception.VolumeIsBusy,
+ self.configuration.rados_connection_interval,
+ self.configuration.rados_connection_retries)
+ def _delete_volume(volume):
+ self.delete_volume(volume)
+
+ _delete_volume(volume)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
diff --git a/cinder/volume/drivers/remotefs.py b/cinder/volume/drivers/remotefs.py
index 1e5dde3e8..e27a44bca 100644
--- a/cinder/volume/drivers/remotefs.py
+++ b/cinder/volume/drivers/remotefs.py
@@ -1258,11 +1258,11 @@ class RemoteFSSnapDriverBase(RemoteFSDriver):
LOG.debug('Creating volume %(vol)s from snapshot %(snap)s',
{'vol': volume.id, 'snap': snapshot.id})
- if snapshot.status != 'available':
- msg = _('Snapshot status must be "available" to clone. '
- 'But is: %(status)s') % {'status': snapshot.status}
-
- raise exception.InvalidSnapshot(msg)
+ status = snapshot.status
+ acceptable_states = ['available', 'backing-up']
+ self._validate_state(status, acceptable_states,
+ obj_description='snapshot',
+ invalid_exc=exception.InvalidSnapshot)
self._ensure_shares_mounted()
diff --git a/cinder/volume/flows/manager/create_volume.py b/cinder/volume/flows/manager/create_volume.py
index 27cd40bd6..721c6c4e5 100644
--- a/cinder/volume/flows/manager/create_volume.py
+++ b/cinder/volume/flows/manager/create_volume.py
@@ -498,7 +498,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
new_key_id = volume_utils.create_encryption_key(context,
keymgr,
volume.volume_type_id)
- new_key = keymgr.get(context, encryption['encryption_key_id'])
+ new_key = keymgr.get(context, new_key_id)
new_pass = binascii.hexlify(new_key.get_encoded()).decode('utf-8')
return (source_pass, new_pass, new_key_id)
diff --git a/lower-constraints.txt b/lower-constraints.txt
index 024b069d9..84e3de4d5 100644
--- a/lower-constraints.txt
+++ b/lower-constraints.txt
@@ -4,6 +4,7 @@ amqp==2.2.2
appdirs==1.4.3
asn1crypto==0.24.0
automaton==1.14.0
+Babel==2.7.0
bandit==1.6.0
bcrypt==3.1.4
cachetools==2.0.1
@@ -15,14 +16,14 @@ cliff==2.11.0
cmd2==0.8.1
contextlib2==0.5.5
coverage==4.0
-cryptography==2.1
+cryptography==2.1.4
cursive==0.2.1
ddt==1.2.1
debtcollector==1.19.0
decorator==3.4.0
defusedxml==0.5.0
doc8==0.6.0
-docutils==0.14
+docutils==0.11
dogpile.cache==0.6.5
dulwich==0.19.0
enum-compat==0.0.2
@@ -31,14 +32,13 @@ eventlet==0.22.0
extras==1.0.0
fasteners==0.14.1
fixtures==3.0.0
-flake8==2.5.5
+flake8==2.6.0
future==0.16.0
futurist==1.6.0
gitdb2==2.0.3
GitPython==2.1.8
google-api-python-client==1.4.2
greenlet==0.4.10
-hacking==1.1.0
httplib2==0.9.1
idna==2.6
imagesize==1.0.0
@@ -48,6 +48,7 @@ Jinja2==2.10
jsonpatch==1.21
jsonpointer==2.0
jsonschema==2.6.0
+hacking==1.1.0
keystoneauth1==3.7.0
keystonemiddleware==4.21.0
kombu==4.1.0
@@ -58,6 +59,7 @@ MarkupSafe==1.0
mccabe==0.2.1
mock==2.0.0
monotonic==1.4
+mox3==0.28.0
msgpack==0.5.6
netaddr==0.7.19
netifaces==0.10.6
@@ -67,11 +69,12 @@ openstackdocstheme==1.20.0
os-api-ref==1.4.0
os-brick==2.10.5
os-client-config==1.29.0
-os-win==3.0.0
+os-service-types==1.6.0
+os-win==4.1.0
oslo.cache==1.29.0
oslo.concurrency==3.26.0
oslo.config==5.2.0
-oslo.context==2.19.2
+oslo.context==2.22.0
oslo.db==4.27.0
oslo.i18n==3.15.3
oslo.log==3.36.0
@@ -82,7 +85,7 @@ oslo.privsep==1.32.0
oslo.reports==1.18.0
oslo.rootwrap==5.8.0
oslo.serialization==2.18.0
-oslo.service==1.24.0
+oslo.service==1.31.0
oslo.utils==3.34.0
oslo.versionedobjects==1.31.2
oslo.vmware==2.17.0
@@ -101,7 +104,7 @@ psycopg2==2.7
pyasn1-modules==0.2.1
pyasn1==0.4.2
pycadf==2.7.0
-pycodestyle==2.5.0
+pycodestyle==2.0.0
pycparser==2.18
pyflakes==0.8.1
Pygments==2.2.0
@@ -118,11 +121,11 @@ python-glanceclient==2.15.0
python-keystoneclient==3.15.0
python-mimeparse==1.6.0
python-novaclient==9.1.0
-python-subunit==1.2.0
+python-subunit==1.3.0
python-swiftclient==3.2.0
-pytz==2013.6
+pytz==2015.7
pyudev==0.21.0
-PyYAML==3.12
+PyYAML==3.13
reno==2.5.0
repoze.lru==0.7
requests==2.14.2
@@ -132,6 +135,7 @@ rfc3986==1.1.0
Routes==2.3.1
rsa==3.4.2
rtslib-fb==2.1.65
+simplejson==3.0.0
six==1.10.0
smmap2==2.0.3
snowballstemmer==1.2.1
@@ -140,7 +144,7 @@ sphinx-feature-classification==0.1.0
sphinxcontrib-websupport==1.0.1
sqlalchemy-migrate==0.11.0
SQLAlchemy==1.0.10
-SQLAlchemy-Utils==0.36.1
+SQLAlchemy-Utils==0.33.11
sqlparse==0.2.4
statsd==3.2.2
stestr==2.2.0
diff --git a/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml b/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml
new file mode 100644
index 000000000..3f63402b5
--- /dev/null
+++ b/releasenotes/notes/bug-1888951-backup-from-nfs-snapshot-2e06235eb318b852.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ `Bug #1888951 <https://bugs.launchpad.net/cinder/+bug/1888951>`_:
+ Fixed an issue with creating a backup from snapshot with NFS volume
+ driver.
diff --git a/releasenotes/notes/bug-1901241-361b1b361bfa5152.yaml b/releasenotes/notes/bug-1901241-361b1b361bfa5152.yaml
new file mode 100644
index 000000000..7609cfe9d
--- /dev/null
+++ b/releasenotes/notes/bug-1901241-361b1b361bfa5152.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ RBD driver `bug #1901241
+ <https://bugs.launchpad.net/cinder/+bug/1901241>`_:
+ Fixed an issue where decreasing the ``rbd_max_clone_depth`` configuration
+ option would prevent volumes that had already exceeded that depth from
+ being cloned.
diff --git a/releasenotes/notes/bug-1904440-clone-rekey-fd57a2b5f6224e0f.yaml b/releasenotes/notes/bug-1904440-clone-rekey-fd57a2b5f6224e0f.yaml
new file mode 100644
index 000000000..b24017874
--- /dev/null
+++ b/releasenotes/notes/bug-1904440-clone-rekey-fd57a2b5f6224e0f.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ `Bug #1904440 <https://bugs.launchpad.net/cinder/+bug/1904440>`_:
+ When an iSCSI/FC encrypted volume was cloned, the rekey operation would
+ stamp the wrong encryption key on the newly cloned volume. This resulted
+ in a volume that could not be attached. It does not present a security
+ problem.
diff --git a/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml b/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml
new file mode 100644
index 000000000..f5a227641
--- /dev/null
+++ b/releasenotes/notes/bug-1908315-020fea3e244d49bb.yaml
@@ -0,0 +1,38 @@
+---
+upgrade:
+ - |
+ This release contains a fix for `Bug #1908315
+ <https://bugs.launchpad.net/cinder/+bug/1908315>`_, which changes the
+ default value of the policy governing the Block Storage API action
+ `Reset group snapshot status
+ <https://docs.openstack.org/api-ref/block-storage/v3/#reset-group-snapshot-status>`_
+ to make the action administrator-only. This policy was inadvertently
+ changed to be admin-or-owner during the Queens development cycle.
+
+ The policy is named ``group:reset_group_snapshot_status``.
+
+ * If you have a custom value for this policy in your cinder policy
+ configuration file, this change to the default value will not affect
+ you.
+ * If you have been aware of this regression and like the current
+ (incorrect) behavior, you may add the following line to your cinder
+ policy configuration file to restore that behavior::
+
+ "group:reset_group_snapshot_status": "rule:admin_or_owner"
+
+ This setting is *not recommended* by the Cinder project team, as it
+ may allow end users to put a group snapshot into an invalid status with
+ indeterminate consequences.
+
+ For more information about the cinder policy configuration file, see the
+ `policy.yaml
+ <https://docs.openstack.org/cinder/latest/configuration/block-storage/samples/policy.yaml.html>`_
+ section of the Cinder Configuration Guide.
+fixes:
+ - |
+ `Bug #1908315 <https://bugs.launchpad.net/cinder/+bug/1908315>`_: Corrected
+ the default checkstring for the ``group:reset_group_snapshot_status``
+ policy to make it admin-only. This policy governs the Block Storage API
+ action `Reset group snapshot status
+ <https://docs.openstack.org/api-ref/block-storage/v3/#reset-group-snapshot-status>`_,
+ which by default is supposed to be an adminstrator-only action.
diff --git a/releasenotes/notes/fix-list-volume-filtering-3f2bf93ab9b98974.yaml b/releasenotes/notes/fix-list-volume-filtering-3f2bf93ab9b98974.yaml
new file mode 100644
index 000000000..02dd00a82
--- /dev/null
+++ b/releasenotes/notes/fix-list-volume-filtering-3f2bf93ab9b98974.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+ - |
+ `Bug #1883490 <https://bugs.launchpad.net/cinder/+bug/1883490>`_:
+ Fixed incorrect response of listing volumes with filters. \ No newline at end of file
diff --git a/releasenotes/notes/increase_glance_num_retries-66b455a0729c4535.yaml b/releasenotes/notes/increase_glance_num_retries-66b455a0729c4535.yaml
new file mode 100644
index 000000000..14fdadb60
--- /dev/null
+++ b/releasenotes/notes/increase_glance_num_retries-66b455a0729c4535.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The default value of the configuration option, ``glance_num_retries``,
+ has been changed to 3 in this release. Its former value was 0.
+ The option controls how many times to retry a Glance API call
+ in response to a HTTP connection failure, timeout or ServiceUnavailable status.
+ By this change, Cinder can be more resilient to temporary failure and continue
+ the request if a retry succeeds.
diff --git a/releasenotes/notes/reset-status-notification-update-4a80a8b5feb821ef.yaml b/releasenotes/notes/reset-status-notification-update-4a80a8b5feb821ef.yaml
new file mode 100644
index 000000000..418dc84ca
--- /dev/null
+++ b/releasenotes/notes/reset-status-notification-update-4a80a8b5feb821ef.yaml
@@ -0,0 +1,13 @@
+---
+fixes:
+ - |
+ `Bug #1863806 <https://bugs.launchpad.net/cinder/+bug/1863806>`_:
+ ``os-reset_status`` notifications for volumes, snapshots, and
+ backups were being sent to nonstandard publisher_ids relative to
+ other cinder notifications for volumes, snapshots, and backups.
+ Now they are also sent to the following *standard* publisher_ids,
+ where most people would expect to find them:
+
+ * 'volume' for volume status resets
+ * 'snapshot' for snapshot status resets
+ * 'backup' for backup status resets
diff --git a/requirements.txt b/requirements.txt
index 3bd2ee40e..c90a8bf73 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -18,7 +18,7 @@ lxml!=3.7.0,>=3.4.1 # BSD
oauth2client!=4.0.0,>=1.5.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
oslo.concurrency>=3.26.0 # Apache-2.0
-oslo.context>=2.19.2 # Apache-2.0
+oslo.context>=2.22.0 # Apache-2.0
oslo.db>=4.27.0 # Apache-2.0
oslo.log>=3.36.0 # Apache-2.0
oslo.messaging>=6.4.0 # Apache-2.0
@@ -28,7 +28,7 @@ oslo.privsep>=1.32.0 # Apache-2.0
oslo.reports>=1.18.0 # Apache-2.0
oslo.rootwrap>=5.8.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-oslo.service!=1.28.1,>=1.24.0 # Apache-2.0
+oslo.service>=1.31.0 # Apache-2.0
oslo.upgradecheck>=0.1.0 # Apache-2.0
oslo.utils>=3.34.0 # Apache-2.0
oslo.versionedobjects>=1.31.2 # Apache-2.0
@@ -44,7 +44,7 @@ python-glanceclient>=2.15.0 # Apache-2.0
python-keystoneclient>=3.15.0 # Apache-2.0
python-novaclient>=9.1.0 # Apache-2.0
python-swiftclient>=3.2.0 # Apache-2.0
-pytz>=2013.6 # MIT
+pytz>=2015.7 # MIT
requests>=2.14.2,!=2.20.0 # Apache-2.0
retrying!=1.3.0,>=1.2.3 # Apache-2.0
Routes>=2.3.1 # MIT
@@ -59,9 +59,9 @@ WebOb>=1.7.1 # MIT
oslo.i18n>=3.15.3 # Apache-2.0
oslo.vmware>=2.17.0 # Apache-2.0
os-brick>=2.10.5 # Apache-2.0
-os-win>=3.0.0 # Apache-2.0
+os-win>=4.1.0 # Apache-2.0
tooz>=1.58.0 # Apache-2.0
google-api-python-client>=1.4.2 # Apache-2.0
castellan>=0.16.0 # Apache-2.0
-cryptography>=2.1 # BSD/Apache-2.0
+cryptography>=2.1.4 # BSD/Apache-2.0
cursive>=0.2.1 # Apache-2.0
diff --git a/test-requirements.txt b/test-requirements.txt
index a8514b563..32874d927 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -11,10 +11,10 @@ fixtures>=3.0.0 # Apache-2.0/BSD
mock>=2.0.0 # BSD
os-api-ref>=1.4.0 # Apache-2.0
oslotest>=3.2.0 # Apache-2.0
-pycodestyle==2.5.0 # MIT License
+pycodestyle>=2.0.0,<2.6.0 # MIT License
PyMySQL>=0.7.6 # MIT License
psycopg2>=2.7 # LGPL/ZPL
-SQLAlchemy-Utils>=0.36.1 # BSD License
+SQLAlchemy-Utils>=0.33.11 # BSD License
testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD
@@ -24,3 +24,11 @@ tempest>=17.1.0 # Apache-2.0
# so we need to pin it here to a known working version
bandit==1.6.0 # Apache-2.0
doc8>=0.6.0 # Apache-2.0
+#
+# These are here to enable the resolver to work faster.
+# They are not directly used by cinder. Without these
+# dependency resolution was taking >6 hours.
+mox3>=0.28.0
+os-service-types>=1.6.0
+msgpack>=0.5.6
+Babel>=2.7.0
diff --git a/tox.ini b/tox.ini
index 33c40d14d..084febc46 100644
--- a/tox.ini
+++ b/tox.ini
@@ -73,7 +73,7 @@ setenv =
OS_TEST_PATH = ./cinder/tests/compliance
[testenv:pep8]
-basepython = python3
+basepython = python3.6
commands =
flake8 {posargs} .
doc8
@@ -81,7 +81,7 @@ commands =
{toxinidir}/tools/check_exec.py {toxinidir}/cinder {toxinidir}/doc/source/ {toxinidir}/releasenotes/notes
[testenv:fast8]
-basepython = python3
+basepython = python3.6
# Use same environment directory as pep8 env to save space and install time
envdir = {toxworkdir}/pep8
commands =