summaryrefslogtreecommitdiff
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/api/openstack/compute/plugins/v3/server_groups.py65
-rw-r--r--nova/api/openstack/compute/schemas/v3/server_groups.py51
-rw-r--r--nova/cells/manager.py8
-rw-r--r--nova/cells/messaging.py9
-rw-r--r--nova/cells/rpcapi.py19
-rw-r--r--nova/compute/api.py5
-rw-r--r--nova/compute/manager.py13
-rw-r--r--nova/compute/rpcapi.py25
-rw-r--r--nova/conductor/api.py16
-rw-r--r--nova/conductor/manager.py12
-rw-r--r--nova/conductor/rpcapi.py25
-rw-r--r--nova/openstack/common/_i18n.py49
-rw-r--r--nova/openstack/common/eventlet_backdoor.py1
-rw-r--r--nova/openstack/common/log.py6
-rw-r--r--nova/openstack/common/loopingcall.py8
-rw-r--r--nova/openstack/common/policy.py102
-rw-r--r--nova/openstack/common/report/generators/threading.py23
-rw-r--r--nova/openstack/common/report/guru_meditation_report.py20
-rw-r--r--nova/openstack/common/report/models/base.py41
-rw-r--r--nova/openstack/common/report/models/conf.py2
-rw-r--r--nova/openstack/common/report/models/with_default_views.py4
-rw-r--r--nova/openstack/common/service.py6
-rw-r--r--nova/openstack/common/uuidutils.py2
-rw-r--r--nova/openstack/common/versionutils.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py9
-rw-r--r--nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py69
-rw-r--r--nova/tests/unit/cells/test_cells_manager.py14
-rw-r--r--nova/tests/unit/cells/test_cells_messaging.py14
-rw-r--r--nova/tests/unit/cells/test_cells_rpcapi.py8
-rw-r--r--nova/tests/unit/compute/test_compute_api.py14
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py11
-rw-r--r--nova/tests/unit/conductor/test_conductor.py34
-rw-r--r--nova/tests/unit/objects/test_service.py24
-rw-r--r--nova/tests/unit/objects/test_tag.py8
-rw-r--r--nova/tests/unit/objects/test_virtual_interface.py4
-rw-r--r--nova/tests/unit/virt/hyperv/test_imagecache.py119
-rw-r--r--nova/tests/unit/virt/hyperv/test_livemigrationops.py1
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py29
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py5
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py12
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py17
-rw-r--r--nova/virt/hyperv/imagecache.py6
-rw-r--r--nova/virt/hyperv/livemigrationops.py2
-rw-r--r--nova/virt/ironic/client_wrapper.py48
-rw-r--r--nova/virt/ironic/driver.py96
-rw-r--r--nova/virt/libvirt/driver.py160
-rw-r--r--nova/virt/libvirt/imagecache.py23
48 files changed, 818 insertions, 426 deletions
diff --git a/nova/api/openstack/compute/plugins/v3/server_groups.py b/nova/api/openstack/compute/plugins/v3/server_groups.py
index 370e3e70a3..50b0c49cef 100644
--- a/nova/api/openstack/compute/plugins/v3/server_groups.py
+++ b/nova/api/openstack/compute/plugins/v3/server_groups.py
@@ -19,20 +19,20 @@ import webob
from webob import exc
from nova.api.openstack import common
+from nova.api.openstack.compute.schemas.v3 import server_groups as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+from nova.api import validation
import nova.exception
from nova.i18n import _
from nova.i18n import _LE
from nova import objects
from nova.openstack.common import log as logging
-from nova import utils
LOG = logging.getLogger(__name__)
ALIAS = "os-server-groups"
-SUPPORTED_POLICIES = ['anti-affinity', 'affinity']
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
@@ -68,61 +68,6 @@ class ServerGroupController(wsgi.Controller):
server_group['members'] = members
return server_group
- def _validate_policies(self, policies):
- """Validate the policies.
-
- Validates that there are no contradicting policies, for example
- 'anti-affinity' and 'affinity' in the same group.
- Validates that the defined policies are supported.
- :param policies: the given policies of the server_group
- """
- if ('anti-affinity' in policies and
- 'affinity' in policies):
- msg = _("Conflicting policies configured!")
- raise nova.exception.InvalidInput(reason=msg)
- not_supported = [policy for policy in policies
- if policy not in SUPPORTED_POLICIES]
- if not_supported:
- msg = _("Invalid policies: %s") % ', '.join(not_supported)
- raise nova.exception.InvalidInput(reason=msg)
-
- # Note(wingwj): It doesn't make sense to store duplicate policies.
- if sorted(set(policies)) != sorted(policies):
- msg = _("Duplicate policies configured!")
- raise nova.exception.InvalidInput(reason=msg)
-
- def _validate_input_body(self, body, entity_name):
- if not self.is_valid_body(body, entity_name):
- msg = _("the body is invalid.")
- raise nova.exception.InvalidInput(reason=msg)
-
- subbody = dict(body[entity_name])
-
- expected_fields = ['name', 'policies']
- for field in expected_fields:
- value = subbody.pop(field, None)
- if not value:
- msg = _("'%s' is either missing or empty.") % field
- raise nova.exception.InvalidInput(reason=msg)
- if field == 'name':
- utils.check_string_length(value, field,
- min_length=1, max_length=255)
- if not common.VALID_NAME_REGEX.search(value):
- msg = _("Invalid format for name: '%s'") % value
- raise nova.exception.InvalidInput(reason=msg)
- elif field == 'policies':
- if isinstance(value, list):
- [utils.check_string_length(v, field,
- min_length=1, max_length=255) for v in value]
- self._validate_policies(value)
- else:
- msg = _("'%s' is not a list") % value
- raise nova.exception.InvalidInput(reason=msg)
-
- if subbody:
- msg = _("unsupported fields: %s") % subbody.keys()
- raise nova.exception.InvalidInput(reason=msg)
-
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given server group."""
@@ -181,15 +126,11 @@ class ServerGroupController(wsgi.Controller):
return {'server_groups': result}
@extensions.expected_errors((400, 403))
+ @validation.schema(schema.create)
def create(self, req, body):
"""Creates a new server group."""
context = _authorize_context(req)
- try:
- self._validate_input_body(body, 'server_group')
- except nova.exception.InvalidInput as e:
- raise exc.HTTPBadRequest(explanation=e.format_message())
-
quotas = objects.Quotas()
try:
quotas.reserve(context, project_id=context.project_id,
diff --git a/nova/api/openstack/compute/schemas/v3/server_groups.py b/nova/api/openstack/compute/schemas/v3/server_groups.py
new file mode 100644
index 0000000000..f270ff7f1d
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/server_groups.py
@@ -0,0 +1,51 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.validation import parameter_types
+
+# NOTE(russellb) There is one other policy, 'legacy', but we don't allow that
+# being set via the API. It's only used when a group gets automatically
+# created to support the legacy behavior of the 'group' scheduler hint.
+SUPPORTED_POLICIES = ['anti-affinity', 'affinity']
+
+
+create = {
+ 'type': 'object',
+ 'properties': {
+ 'server_group': {
+ 'type': 'object',
+ 'properties': {
+ 'name': parameter_types.name,
+ 'policies': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string', 'enum': SUPPORTED_POLICIES,
+ 'not': {'allOf': [
+ # NOTE: Clients cannot specify both affinity and
+ # anti-affinity in a single request.
+ {'enum': 'affinity'},
+ {'enum': 'anti-affinity'}
+ ]}
+ },
+ 'uniqueItems': True,
+ 'minItems': 1
+ }
+ },
+ 'required': ['name', 'policies'],
+ 'additionalProperties': False,
+ }
+ },
+ 'required': ['server_group'],
+ 'additionalProperties': False,
+}
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index 39f68941e1..2280301762 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -74,7 +74,7 @@ class CellsManager(manager.Manager):
Scheduling requests get passed to the scheduler class.
"""
- target = oslo_messaging.Target(version='1.32')
+ target = oslo_messaging.Target(version='1.33')
def __init__(self, *args, **kwargs):
LOG.warning(_LW('The cells feature of Nova is considered experimental '
@@ -523,10 +523,12 @@ class CellsManager(manager.Manager):
self.msg_runner.soft_delete_instance(ctxt, instance)
def resize_instance(self, ctxt, instance, flavor,
- extra_instance_updates):
+ extra_instance_updates,
+ clean_shutdown=True):
"""Resize an instance in its cell."""
self.msg_runner.resize_instance(ctxt, instance,
- flavor, extra_instance_updates)
+ flavor, extra_instance_updates,
+ clean_shutdown=clean_shutdown)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 0f414d4b37..7918530023 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -894,10 +894,11 @@ class _TargetedMessageMethods(_BaseMessageMethods):
self._call_compute_api_with_obj(message.ctxt, instance, 'unpause')
def resize_instance(self, message, instance, flavor,
- extra_instance_updates):
+ extra_instance_updates, clean_shutdown=True):
"""Resize an instance via compute_api.resize()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'resize',
flavor_id=flavor['flavorid'],
+ clean_shutdown=clean_shutdown,
**extra_instance_updates)
def live_migrate_instance(self, message, instance, block_migration,
@@ -1765,12 +1766,14 @@ class MessageRunner(object):
self._instance_action(ctxt, instance, 'unpause_instance')
def resize_instance(self, ctxt, instance, flavor,
- extra_instance_updates):
+ extra_instance_updates,
+ clean_shutdown=True):
"""Resize an instance in its cell."""
extra_kwargs = dict(flavor=flavor,
extra_instance_updates=extra_instance_updates)
self._instance_action(ctxt, instance, 'resize_instance',
- extra_kwargs=extra_kwargs)
+ extra_kwargs=extra_kwargs,
+ clean_shutdown=clean_shutdown)
def live_migrate_instance(self, ctxt, instance, block_migration,
disk_over_commit, host_name):
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
index 69a811d083..a0a500670b 100644
--- a/nova/cells/rpcapi.py
+++ b/nova/cells/rpcapi.py
@@ -105,6 +105,7 @@ class CellsAPI(object):
* 1.30 - Make build_instances() use flavor object
* 1.31 - Add clean_shutdown to stop, resize, rescue, and shelve
* 1.32 - Send objects for instances in build_instances()
+ * 1.33 - Add clean_shutdown to resize_instance()
'''
VERSION_ALIASES = {
@@ -551,14 +552,22 @@ class CellsAPI(object):
cctxt.cast(ctxt, 'soft_delete_instance', instance=instance)
def resize_instance(self, ctxt, instance, extra_instance_updates,
- scheduler_hint, flavor, reservations):
+ scheduler_hint, flavor, reservations,
+ clean_shutdown=True):
if not CONF.cells.enable:
return
flavor_p = jsonutils.to_primitive(flavor)
- cctxt = self.client.prepare(version='1.20')
- cctxt.cast(ctxt, 'resize_instance',
- instance=instance, flavor=flavor_p,
- extra_instance_updates=extra_instance_updates)
+ version = '1.33'
+ msg_args = {'instance': instance,
+ 'flavor': flavor_p,
+ 'extra_instance_updates': extra_instance_updates,
+ 'clean_shutdown': clean_shutdown}
+ if not self.client.can_send_version(version):
+ del msg_args['clean_shutdown']
+ version = '1.20'
+
+ cctxt = self.client.prepare(version=version)
+ cctxt.cast(ctxt, 'resize_instance', **msg_args)
def live_migrate_instance(self, ctxt, instance, host_name,
block_migration, disk_over_commit):
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 2ef01ae717..3713b0f9b6 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -2537,7 +2537,7 @@ class API(base.Base):
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
- def resize(self, context, instance, flavor_id=None,
+ def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
@@ -2638,7 +2638,8 @@ class API(base.Base):
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
- reservations=quotas.reservations or [])
+ reservations=quotas.reservations or [],
+ clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 140504ce86..9546b34338 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -589,7 +589,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='3.37')
+ target = messaging.Target(version='3.38')
# How long to wait in seconds before re-issuing a shutdown
# signal to a instance during power off. The overall
@@ -3649,7 +3649,8 @@ class ComputeManager(manager.Manager):
quotas.commit()
def _prep_resize(self, context, image, instance, instance_type,
- quotas, request_spec, filter_properties, node):
+ quotas, request_spec, filter_properties, node,
+ clean_shutdown=True):
if not filter_properties:
filter_properties = {}
@@ -3682,14 +3683,16 @@ class ComputeManager(manager.Manager):
LOG.audit(_('Migrating'), context=context, instance=instance)
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
- instance_type, quotas.reservations)
+ instance_type, quotas.reservations,
+ clean_shutdown)
@wrap_exception()
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
def prep_resize(self, context, image, instance, instance_type,
- reservations, request_spec, filter_properties, node):
+ reservations, request_spec, filter_properties, node,
+ clean_shutdown=True):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
@@ -3713,7 +3716,7 @@ class ComputeManager(manager.Manager):
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
- node)
+ node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 9e4218dfa6..b2fdb6d386 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -281,6 +281,7 @@ class ComputeAPI(object):
* 3.36 - Make build_and_run_instance() send a Flavor object
* 3.37 - Add clean_shutdown to stop, resize, rescue, shelve, and
shelve_offload
+ * 3.38 - Add clean_shutdown to prep_resize
'''
VERSION_ALIASES = {
@@ -590,18 +591,24 @@ class ComputeAPI(object):
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
- filter_properties=None, node=None):
- version = '3.0'
+ filter_properties=None, node=None,
+ clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
+ msg_args = {'instance': instance,
+ 'instance_type': instance_type_p,
+ 'image': image_p,
+ 'reservations': reservations,
+ 'request_spec': request_spec,
+ 'filter_properties': filter_properties,
+ 'node': node,
+ 'clean_shutdown': clean_shutdown}
+ version = '3.38'
+ if not self.client.can_send_version(version):
+ del msg_args['clean_shutdown']
+ version = '3.0'
cctxt = self.client.prepare(server=host, version=version)
- cctxt.cast(ctxt, 'prep_resize',
- instance=instance,
- instance_type=instance_type_p,
- image=image_p, reservations=reservations,
- request_spec=request_spec,
- filter_properties=filter_properties,
- node=node)
+ cctxt.cast(ctxt, 'prep_resize', **msg_args)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index fc9ebbdfaf..5d5f30ba82 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -225,13 +225,15 @@ class LocalComputeTaskAPI(object):
manager.ComputeTaskManager())
def resize_instance(self, context, instance, extra_instance_updates,
- scheduler_hint, flavor, reservations):
+ scheduler_hint, flavor, reservations,
+ clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self._manager.migrate_server(
- context, instance, scheduler_hint, False, False, flavor,
- None, None, reservations)
+ context, instance, scheduler_hint, live=False, rebuild=False,
+ flavor=flavor, block_migration=None, disk_over_commit=None,
+ reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
@@ -336,13 +338,15 @@ class ComputeTaskAPI(object):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
- scheduler_hint, flavor, reservations):
+ scheduler_hint, flavor, reservations,
+ clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
- context, instance, scheduler_hint, False, False, flavor,
- None, None, reservations)
+ context, instance, scheduler_hint, live=False, rebuild=False,
+ flavor=flavor, block_migration=None, disk_over_commit=None,
+ reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
block_migration, disk_over_commit):
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 9b622a343d..58a6311af4 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -460,7 +460,7 @@ class ComputeTaskManager(base.Base):
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.10')
+ target = messaging.Target(namespace='compute_task', version='1.11')
def __init__(self):
super(ComputeTaskManager, self).__init__()
@@ -481,7 +481,8 @@ class ComputeTaskManager(base.Base):
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSafe)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
- flavor, block_migration, disk_over_commit, reservations=None):
+ flavor, block_migration, disk_over_commit, reservations=None,
+ clean_shutdown=True):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
@@ -504,12 +505,12 @@ class ComputeTaskManager(base.Base):
instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
- reservations)
+ reservations, clean_shutdown)
else:
raise NotImplementedError()
def _cold_migrate(self, context, instance, flavor, filter_properties,
- reservations):
+ reservations, clean_shutdown):
image_ref = instance.image_ref
image = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
@@ -555,7 +556,8 @@ class ComputeTaskManager(base.Base):
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
- filter_properties=filter_properties, node=node)
+ filter_properties=filter_properties, node=node,
+ clean_shutdown=clean_shutdown)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance['vm_state'],
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 60621742d0..6c21b74cec 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -391,6 +391,7 @@ class ComputeTaskAPI(object):
1.8 - Add rebuild_instance
1.9 - Converted requested_networks to NetworkRequestList object
1.10 - Made migrate_server() and build_instances() send flavor objects
+ 1.11 - Added clean_shutdown to migrate_server()
"""
@@ -404,22 +405,26 @@ class ComputeTaskAPI(object):
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
- reservations=None):
- version = '1.10'
+ reservations=None, clean_shutdown=True):
+ kw = {'instance': instance, 'scheduler_hint': scheduler_hint,
+ 'live': live, 'rebuild': rebuild, 'flavor': flavor,
+ 'block_migration': block_migration,
+ 'disk_over_commit': disk_over_commit,
+ 'reservations': reservations,
+ 'clean_shutdown': clean_shutdown}
+ version = '1.11'
+ if not self.client.can_send_version(version):
+ del kw['clean_shutdown']
+ version = '1.10'
if not self.client.can_send_version(version):
- flavor = objects_base.obj_to_primitive(flavor)
+ kw['flavor'] = objects_base.obj_to_primitive(flavor)
version = '1.6'
if not self.client.can_send_version(version):
- instance = jsonutils.to_primitive(
+ kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
cctxt = self.client.prepare(version=version)
- return cctxt.call(context, 'migrate_server',
- instance=instance, scheduler_hint=scheduler_hint,
- live=live, rebuild=rebuild, flavor=flavor,
- block_migration=block_migration,
- disk_over_commit=disk_over_commit,
- reservations=reservations)
+ return cctxt.call(context, 'migrate_server', **kw)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
diff --git a/nova/openstack/common/_i18n.py b/nova/openstack/common/_i18n.py
index c556573f42..65ce056499 100644
--- a/nova/openstack/common/_i18n.py
+++ b/nova/openstack/common/_i18n.py
@@ -16,25 +16,30 @@ See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
-import oslo.i18n
-
-
-# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
-# application name when this module is synced into the separate
-# repository. It is OK to have more than one translation function
-# using the same domain, since there will still only be one message
-# catalog.
-_translators = oslo.i18n.TranslatorFactory(domain='nova')
-
-# The primary translation function using the well-known name "_"
-_ = _translators.primary
-
-# Translators for log levels.
-#
-# The abbreviated names are meant to reflect the usual use of a short
-# name like '_'. The "L" is for "log" and the other letter comes from
-# the level.
-_LI = _translators.log_info
-_LW = _translators.log_warning
-_LE = _translators.log_error
-_LC = _translators.log_critical
+try:
+ import oslo.i18n
+
+ # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
+ # application name when this module is synced into the separate
+ # repository. It is OK to have more than one translation function
+ # using the same domain, since there will still only be one message
+ # catalog.
+ _translators = oslo.i18n.TranslatorFactory(domain='nova')
+
+ # The primary translation function using the well-known name "_"
+ _ = _translators.primary
+
+ # Translators for log levels.
+ #
+ # The abbreviated names are meant to reflect the usual use of a short
+ # name like '_'. The "L" is for "log" and the other letter comes from
+ # the level.
+ _LI = _translators.log_info
+ _LW = _translators.log_warning
+ _LE = _translators.log_error
+ _LC = _translators.log_critical
+except ImportError:
+ # NOTE(dims): Support for cases where a project wants to use
+ # code from oslo-incubator, but is not ready to be internationalized
+ # (like tempest)
+ _ = _LI = _LW = _LE = _LC = lambda x: x
diff --git a/nova/openstack/common/eventlet_backdoor.py b/nova/openstack/common/eventlet_backdoor.py
index ef1f1ee351..764b8c03bc 100644
--- a/nova/openstack/common/eventlet_backdoor.py
+++ b/nova/openstack/common/eventlet_backdoor.py
@@ -25,7 +25,6 @@ import socket
import sys
import traceback
-import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index e6330c85cd..922c992245 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -552,9 +552,11 @@ def _setup_logging_from_conf(project, version):
# TODO(bogdando) use the format provided by RFCSysLogHandler
# after existing syslog format deprecation in J
if CONF.use_syslog_rfc_format:
- syslog = RFCSysLogHandler(facility=facility)
+ syslog = RFCSysLogHandler(address='/dev/log',
+ facility=facility)
else:
- syslog = logging.handlers.SysLogHandler(facility=facility)
+ syslog = logging.handlers.SysLogHandler(address='/dev/log',
+ facility=facility)
log_root.addHandler(syslog)
except socket.error:
log_root.error('Unable to add syslog handler. Verify that syslog '
diff --git a/nova/openstack/common/loopingcall.py b/nova/openstack/common/loopingcall.py
index f2086b2e0e..ba9f3c7702 100644
--- a/nova/openstack/common/loopingcall.py
+++ b/nova/openstack/common/loopingcall.py
@@ -84,9 +84,9 @@ class FixedIntervalLoopingCall(LoopingCallBase):
break
delay = end - start - interval
if delay > 0:
- LOG.warn(_LW('task %(func_name)s run outlasted '
+ LOG.warn(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
- {'func_name': repr(self.f), 'delay': delay})
+ {'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
@@ -127,9 +127,9 @@ class DynamicLoopingCall(LoopingCallBase):
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
- LOG.debug('Dynamic looping call %(func_name)s sleeping '
+ LOG.debug('Dynamic looping call %(func_name)r sleeping '
'for %(idle).02f seconds',
- {'func_name': repr(self.f), 'idle': idle})
+ {'func_name': self.f, 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
diff --git a/nova/openstack/common/policy.py b/nova/openstack/common/policy.py
index fe45e5a13f..17e4e7f3de 100644
--- a/nova/openstack/common/policy.py
+++ b/nova/openstack/common/policy.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+#
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
@@ -22,22 +24,43 @@ string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
-combined as with an "or" conjunction. This is the original way of
-expressing policies, but there now exists a new way: the policy
-language.
-
-In the policy language, each check is specified the same way as in the
-list-of-lists representation: a simple "a:b" pair that is matched to
-the correct code to perform that check. However, conjunction
-operators are available, allowing for more expressiveness in crafting
-policies.
-
-As an example, take the following rule, expressed in the list-of-lists
-representation::
+combined as with an "or" conjunction. As an example, take the following
+rule, expressed in the list-of-lists representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
-In the policy language, this becomes::
+This is the original way of expressing policies, but there now exists a
+new way: the policy language.
+
+In the policy language, each check is specified the same way as in the
+list-of-lists representation: a simple "a:b" pair that is matched to
+the correct class to perform that check::
+
+ +===========================================================================+
+ | TYPE | SYNTAX |
+ +===========================================================================+
+ |User's Role | role:admin |
+ +---------------------------------------------------------------------------+
+ |Rules already defined on policy | rule:admin_required |
+ +---------------------------------------------------------------------------+
+ |Against URL's¹ | http://my-url.org/check |
+ +---------------------------------------------------------------------------+
+ |User attributes² | project_id:%(target.project.id)s |
+ +---------------------------------------------------------------------------+
+ |Strings | <variable>:'xpto2035abc' |
+ | | 'myproject':<variable> |
+ +---------------------------------------------------------------------------+
+ | | project_id:xpto2035abc |
+ |Literals | domain_id:20 |
+ | | True:%(user.enabled)s |
+ +===========================================================================+
+
+¹URL checking must return 'True' to be valid
+²User attributes (obtained through the token): user_id, domain_id or project_id
+
+Conjunction operators are available, allowing for more expressiveness
+in crafting policies. So, in the policy language, the previous check in
+list-of-lists becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
@@ -46,26 +69,16 @@ policy rule::
project_id:%(project_id)s and not role:dunce
-It is possible to perform policy checks on the following user
-attributes (obtained through the token): user_id, domain_id or
-project_id::
-
- domain_id:<some_value>
-
Attributes sent along with API calls can be used by the policy engine
(on the right side of the expression), by using the following syntax::
- <some_value>:user.id
+ <some_value>:%(user.id)s
Contextual attributes of objects identified by their IDs are loaded
from the database. They are also available to the policy engine and
can be checked through the `target` keyword::
- <some_value>:target.role.name
-
-All these attributes (related to users, API calls, and context) can be
-checked against each other or against constants, be it literals (True,
-<a_number>) or strings.
+ <some_value>:%(target.role.name)s
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
@@ -88,7 +101,7 @@ import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
from nova.openstack.common import fileutils
-from nova.openstack.common._i18n import _, _LE, _LW
+from nova.openstack.common._i18n import _, _LE, _LI
from nova.openstack.common import log as logging
@@ -103,7 +116,11 @@ policy_opts = [
cfg.MultiStrOpt('policy_dirs',
default=['policy.d'],
help=_('Directories where policy configuration files are '
- 'stored.')),
+ 'stored. They can be relative to any directory '
+ 'in the search path defined by the config_dir '
+ 'option, or absolute paths. The file defined by '
+ 'policy_file must exist for these directories to '
+ 'be searched.')),
]
CONF = cfg.CONF
@@ -195,16 +212,19 @@ class Enforcer(object):
:param default_rule: Default rule to use, CONF.default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from cache or config file.
+ :param overwrite: Whether to overwrite existing rules when reload rules
+ from config file.
"""
def __init__(self, policy_file=None, rules=None,
- default_rule=None, use_conf=True):
+ default_rule=None, use_conf=True, overwrite=True):
self.default_rule = default_rule or CONF.policy_default_rule
self.rules = Rules(rules, self.default_rule)
self.policy_path = None
self.policy_file = policy_file or CONF.policy_file
self.use_conf = use_conf
+ self.overwrite = overwrite
def set_rules(self, rules, overwrite=True, use_conf=False):
"""Create a new Rules object based on the provided dict of rules.
@@ -236,7 +256,7 @@ class Enforcer(object):
Policy file is cached and will be reloaded if modified.
- :param force_reload: Whether to overwrite current rules.
+ :param force_reload: Whether to reload rules from config file.
"""
if force_reload:
@@ -246,18 +266,20 @@ class Enforcer(object):
if not self.policy_path:
self.policy_path = self._get_policy_path(self.policy_file)
- self._load_policy_file(self.policy_path, force_reload)
+ self._load_policy_file(self.policy_path, force_reload,
+ overwrite=self.overwrite)
for path in CONF.policy_dirs:
try:
path = self._get_policy_path(path)
except cfg.ConfigFilesNotFoundError:
- LOG.warn(_LW("Can not find policy directory: %s"), path)
+ LOG.info(_LI("Can not find policy directory: %s"), path)
continue
self._walk_through_policy_directory(path,
self._load_policy_file,
force_reload, False)
- def _walk_through_policy_directory(self, path, func, *args):
+ @staticmethod
+ def _walk_through_policy_directory(path, func, *args):
# We do not iterate over sub-directories.
policy_files = next(os.walk(path))[2]
policy_files.sort()
@@ -267,9 +289,9 @@ class Enforcer(object):
def _load_policy_file(self, path, force_reload, overwrite=True):
reloaded, data = fileutils.read_cached_file(
path, force_reload=force_reload)
- if reloaded or not self.rules:
+ if reloaded or not self.rules or not overwrite:
rules = Rules.load_json(data, self.default_rule)
- self.set_rules(rules, overwrite)
+ self.set_rules(rules, overwrite=overwrite, use_conf=True)
LOG.debug("Rules successfully reloaded")
def _get_policy_path(self, path):
@@ -889,7 +911,17 @@ class HttpCheck(Check):
"""
url = ('http:' + self.match) % target
- data = {'target': jsonutils.dumps(target),
+
+ # Convert instances of object() in target temporarily to
+ # empty dict to avoid circular reference detection
+ # errors in jsonutils.dumps().
+ temp_target = copy.deepcopy(target)
+ for key in target.keys():
+ element = target.get(key)
+ if type(element) is object:
+ temp_target[key] = {}
+
+ data = {'target': jsonutils.dumps(temp_target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
diff --git a/nova/openstack/common/report/generators/threading.py b/nova/openstack/common/report/generators/threading.py
index 26bbef818e..a5b6fe02c4 100644
--- a/nova/openstack/common/report/generators/threading.py
+++ b/nova/openstack/common/report/generators/threading.py
@@ -19,7 +19,10 @@ generators for generating the models in
:mod:`openstack.common.report.models.threading`.
"""
+from __future__ import absolute_import
+
import sys
+import threading
import greenlet
@@ -35,14 +38,26 @@ class ThreadReportGenerator(object):
This generator returns a collection of
:class:`openstack.common.report.models.threading.ThreadModel`
objects by introspecting the current python state using
- :func:`sys._current_frames()` .
+ :func:`sys._current_frames()` . Its constructor may optionally
+ be passed a frame object. This frame object will be interpreted
+ as the actual stack trace for the current thread, and, come generation
+ time, will be used to replace the stack trace of the thread in which
+ this code is running.
"""
+ def __init__(self, curr_thread_traceback=None):
+ self.traceback = curr_thread_traceback
+
def __call__(self):
- threadModels = [
- tm.ThreadModel(thread_id, stack)
+ threadModels = dict(
+ (thread_id, tm.ThreadModel(thread_id, stack))
for thread_id, stack in sys._current_frames().items()
- ]
+ )
+
+ if self.traceback is not None:
+ curr_thread_id = threading.current_thread().ident
+ threadModels[curr_thread_id] = tm.ThreadModel(curr_thread_id,
+ self.traceback)
return mwdv.ModelWithDefaultViews(threadModels,
text_view=text_views.MultiView())
diff --git a/nova/openstack/common/report/guru_meditation_report.py b/nova/openstack/common/report/guru_meditation_report.py
index 6377fe5b65..8310d3c847 100644
--- a/nova/openstack/common/report/guru_meditation_report.py
+++ b/nova/openstack/common/report/guru_meditation_report.py
@@ -80,8 +80,9 @@ class GuruMeditation(object):
timestamp_fmt = "%Y%m%d%H%M%S"
- def __init__(self, version_obj, *args, **kwargs):
+ def __init__(self, version_obj, sig_handler_tb=None, *args, **kwargs):
self.version_obj = version_obj
+ self.traceback = sig_handler_tb
super(GuruMeditation, self).__init__(*args, **kwargs)
self.start_section_index = len(self.sections)
@@ -123,11 +124,11 @@ class GuruMeditation(object):
if signum:
signal.signal(signum,
- lambda *args: cls.handle_signal(
- version, service_name, log_dir, *args))
+ lambda sn, tb: cls.handle_signal(
+ version, service_name, log_dir, tb))
@classmethod
- def handle_signal(cls, version, service_name, log_dir, *args):
+ def handle_signal(cls, version, service_name, log_dir, traceback):
"""The Signal Handler
This method (indirectly) handles receiving a registered signal and
@@ -142,10 +143,11 @@ class GuruMeditation(object):
:param version: the version object for the current product
:param service_name: this program name used to construct logfile name
:param logdir: path to a log directory where to create a file
+ :param traceback: the traceback provided to the signal handler
"""
try:
- res = cls(version).run()
+ res = cls(version, traceback).run()
except Exception:
print("Unable to run Guru Meditation Report!",
file=sys.stderr)
@@ -172,7 +174,7 @@ class GuruMeditation(object):
pgen.PackageReportGenerator(self.version_obj))
self.add_section('Threads',
- tgen.ThreadReportGenerator())
+ tgen.ThreadReportGenerator(self.traceback))
self.add_section('Green Threads',
tgen.GreenThreadReportGenerator())
@@ -209,8 +211,10 @@ class TextGuruMeditation(GuruMeditation, report.TextReport):
- Configuration Options
:param version_obj: the version object for the current product
+ :param traceback: an (optional) frame object providing the actual
+ traceback for the current thread
"""
- def __init__(self, version_obj):
- super(TextGuruMeditation, self).__init__(version_obj,
+ def __init__(self, version_obj, traceback=None):
+ super(TextGuruMeditation, self).__init__(version_obj, traceback,
'Guru Meditation')
diff --git a/nova/openstack/common/report/models/base.py b/nova/openstack/common/report/models/base.py
index a9b070b83f..d840c5b514 100644
--- a/nova/openstack/common/report/models/base.py
+++ b/nova/openstack/common/report/models/base.py
@@ -24,6 +24,8 @@ the report serialization process.
import collections as col
import copy
+import six
+
class ReportModel(col.MutableMapping):
"""A Report Data Model
@@ -119,19 +121,42 @@ class ReportModel(col.MutableMapping):
def __iter__(self):
return self.data.__iter__()
- def set_current_view_type(self, tp):
+ def set_current_view_type(self, tp, visited=None):
"""Set the current view type
This method attempts to set the current view
type for this model and all submodels by calling
- itself recursively on all values (and ignoring the
- ones that are not themselves models)
+ itself recursively on all values, traversing
+ intervening sequences and mappings when possible,
+ and ignoring all other objects.
:param tp: the type of the view ('text', 'json', 'xml', etc)
+ :param visited: a set of object ids for which the corresponding objects
+ have already had their view type set
"""
- for key in self:
- try:
- self[key].set_current_view_type(tp)
- except AttributeError:
- pass
+ if visited is None:
+ visited = set()
+
+ def traverse_obj(obj):
+ oid = id(obj)
+
+ # don't die on recursive structures,
+ # and don't treat strings like sequences
+ if oid in visited or isinstance(obj, six.string_types):
+ return
+
+ visited.add(oid)
+
+ if hasattr(obj, 'set_current_view_type'):
+ obj.set_current_view_type(tp, visited=visited)
+
+ if isinstance(obj, col.Sequence):
+ for item in obj:
+ traverse_obj(item)
+
+ elif isinstance(obj, col.Mapping):
+ for val in six.itervalues(obj):
+ traverse_obj(val)
+
+ traverse_obj(self)
diff --git a/nova/openstack/common/report/models/conf.py b/nova/openstack/common/report/models/conf.py
index 8a0ef3796b..1521b5bffd 100644
--- a/nova/openstack/common/report/models/conf.py
+++ b/nova/openstack/common/report/models/conf.py
@@ -43,7 +43,7 @@ class ConfigModel(mwdv.ModelWithDefaultViews):
def opt_value(opt_obj, value):
if opt_obj['opt'].secret:
- return '*******'
+ return '***'
else:
return value
diff --git a/nova/openstack/common/report/models/with_default_views.py b/nova/openstack/common/report/models/with_default_views.py
index 6ca8dbb14f..0e165721a8 100644
--- a/nova/openstack/common/report/models/with_default_views.py
+++ b/nova/openstack/common/report/models/with_default_views.py
@@ -64,9 +64,9 @@ class ModelWithDefaultViews(base_model.ReportModel):
del newargs[k]
super(ModelWithDefaultViews, self).__init__(*args, **newargs)
- def set_current_view_type(self, tp):
+ def set_current_view_type(self, tp, visited=None):
self.attached_view = self.views[tp]
- super(ModelWithDefaultViews, self).set_current_view_type(tp)
+ super(ModelWithDefaultViews, self).set_current_view_type(tp, visited)
def __getattr__(self, attrname):
if attrname[:3] == 'to_':
diff --git a/nova/openstack/common/service.py b/nova/openstack/common/service.py
index f682b2b745..616db6afa4 100644
--- a/nova/openstack/common/service.py
+++ b/nova/openstack/common/service.py
@@ -397,7 +397,7 @@ class ProcessLauncher(object):
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
- LOG.info(_LI("Wait called after thread killed. Cleaning up."))
+ LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
@@ -434,8 +434,8 @@ class Service(object):
def start(self):
pass
- def stop(self):
- self.tg.stop()
+ def stop(self, graceful=False):
+ self.tg.stop(graceful)
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
diff --git a/nova/openstack/common/uuidutils.py b/nova/openstack/common/uuidutils.py
index 234b880c99..69a78b97c7 100644
--- a/nova/openstack/common/uuidutils.py
+++ b/nova/openstack/common/uuidutils.py
@@ -32,6 +32,6 @@ def is_uuid_like(val):
"""
try:
- return str(uuid.UUID(val)) == val
+ return str(uuid.UUID(val)).lower() == val.lower()
except (TypeError, ValueError, AttributeError):
return False
diff --git a/nova/openstack/common/versionutils.py b/nova/openstack/common/versionutils.py
index c2179a821a..8b98361634 100644
--- a/nova/openstack/common/versionutils.py
+++ b/nova/openstack/common/versionutils.py
@@ -136,7 +136,7 @@ class deprecated(object):
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
- # and added to the nova-incubator requrements
+ # and added to the oslo-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
LOG.deprecated(msg, details)
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py b/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py
index 4044a30d66..35f77078b3 100644
--- a/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_extended_availability_zone.py
@@ -91,7 +91,8 @@ class ExtendedAvailabilityZoneTestV21(test.TestCase):
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=None))
+ res = req.get_response(fakes.wsgi_app_v21(init_only=(
+ 'servers', 'os-extended-availability-zone')))
return res
def _get_server(self, body):
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py
index d87b39d00e..81a9ebbbd8 100644
--- a/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_group_quotas.py
@@ -94,7 +94,8 @@ class ServerGroupQuotasTestV21(test.TestCase):
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req, {'server_group': sgroup})
+ res_dict = self.controller.create(self.req,
+ body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
@@ -106,19 +107,19 @@ class ServerGroupQuotasTestV21(test.TestCase):
sgroup['policies'] = policies
# Start by creating as many server groups as we're allowed to.
for i in range(CONF.quota_server_groups):
- self.controller.create(self.req, {'server_group': sgroup})
+ self.controller.create(self.req, body={'server_group': sgroup})
# Then, creating a server group should fail.
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create,
- self.req, {'server_group': sgroup})
+ self.req, body={'server_group': sgroup})
def test_delete_server_group_by_admin(self):
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
- res = self.controller.create(self.req, {'server_group': sgroup})
+ res = self.controller.create(self.req, body={'server_group': sgroup})
sg_id = res['server_group']['id']
context = self.req.environ['nova.context']
diff --git a/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py b/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py
index 5fcfcf3853..45e186c6da 100644
--- a/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/contrib/test_server_groups.py
@@ -78,6 +78,7 @@ def server_group_db(sg):
class ServerGroupTestV21(test.TestCase):
+ validation_error = exception.ValidationError
def setUp(self):
super(ServerGroupTestV21, self).setUp()
@@ -89,14 +90,15 @@ class ServerGroupTestV21(test.TestCase):
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
def test_create_server_group_normal(self):
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req, {'server_group': sgroup})
+ res_dict = self.controller.create(self.req,
+ body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
@@ -149,88 +151,88 @@ class ServerGroupTestV21(test.TestCase):
def test_create_server_group_with_illegal_name(self):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- self.req, {'server_group': sgroup})
+ self.assertRaises(self.validation_error, self.controller.create,
+ self.req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, None)
+ self.assertRaises(self.validation_error,
+ self.controller.create, self.req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, self.req, body)
+ self.assertRaises(self.validation_error,
+ self.controller.create, self.req, body=body)
def test_list_server_group_by_tenant(self):
groups = []
@@ -338,6 +340,7 @@ class ServerGroupTestV21(test.TestCase):
class ServerGroupTestV2(ServerGroupTestV21):
+ validation_error = webob.exc.HTTPBadRequest
def _setup_controller(self):
ext_mgr = extensions.ExtensionManager()
diff --git a/nova/tests/unit/cells/test_cells_manager.py b/nova/tests/unit/cells/test_cells_manager.py
index 3b75c58b0a..38cff5cb55 100644
--- a/nova/tests/unit/cells/test_cells_manager.py
+++ b/nova/tests/unit/cells/test_cells_manager.py
@@ -758,14 +758,22 @@ class CellsManagerClassTestCase(test.NoDBTestCase):
self.cells_manager.soft_delete_instance(self.ctxt,
instance='fake-instance')
- def test_resize_instance(self):
+ def _test_resize_instance(self, clean_shutdown=True):
self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
- 'fake-flavor', 'fake-updates')
+ 'fake-flavor', 'fake-updates',
+ clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
self.cells_manager.resize_instance(
self.ctxt, instance='fake-instance', flavor='fake-flavor',
- extra_instance_updates='fake-updates')
+ extra_instance_updates='fake-updates',
+ clean_shutdown=clean_shutdown)
+
+ def test_resize_instance(self):
+ self._test_resize_instance()
+
+ def test_resize_instance_forced_shutdown(self):
+ self._test_resize_instance(clean_shutdown=False)
def test_live_migrate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
diff --git a/nova/tests/unit/cells/test_cells_messaging.py b/nova/tests/unit/cells/test_cells_messaging.py
index e3116fad0b..e9ae693cf4 100644
--- a/nova/tests/unit/cells/test_cells_messaging.py
+++ b/nova/tests/unit/cells/test_cells_messaging.py
@@ -1309,14 +1309,22 @@ class CellsTargetedMethodsTestCase(test.TestCase):
def test_unpause_instance(self):
self._test_instance_action_method('unpause', (), {}, (), {}, False)
- def test_resize_instance(self):
+ def _test_resize_instance(self, clean_shutdown=True):
kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'),
- extra_instance_updates=dict(cow='moo'))
- expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo')
+ extra_instance_updates=dict(cow='moo'),
+ clean_shutdown=clean_shutdown)
+ expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo',
+ clean_shutdown=clean_shutdown)
self._test_instance_action_method('resize', (), kwargs,
(), expected_kwargs,
False)
+ def test_resize_instance(self):
+ self._test_resize_instance()
+
+ def test_resize_instance_forced_shutdown(self):
+ self._test_resize_instance(clean_shutdown=False)
+
def test_live_migrate_instance(self):
kwargs = dict(block_migration='fake-block-mig',
disk_over_commit='fake-commit',
diff --git a/nova/tests/unit/cells/test_cells_rpcapi.py b/nova/tests/unit/cells/test_cells_rpcapi.py
index 9b095cabeb..0822af35d7 100644
--- a/nova/tests/unit/cells/test_cells_rpcapi.py
+++ b/nova/tests/unit/cells/test_cells_rpcapi.py
@@ -666,12 +666,14 @@ class CellsAPITestCase(test.NoDBTestCase):
dict(cow='moo'),
'fake-hint',
'fake-flavor',
- 'fake-reservations')
+ 'fake-reservations',
+ clean_shutdown=True)
expected_args = {'instance': 'fake-instance',
'flavor': 'fake-flavor',
- 'extra_instance_updates': dict(cow='moo')}
+ 'extra_instance_updates': dict(cow='moo'),
+ 'clean_shutdown': True}
self._check_result(call_info, 'resize_instance',
- expected_args, version='1.20')
+ expected_args, version='1.33')
def test_live_migrate_instance(self):
call_info = self._stub_rpc_method('cast', None)
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
index a6c0c483f0..2a7deefadb 100644
--- a/nova/tests/unit/compute/test_compute_api.py
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -1379,7 +1379,8 @@ class _ComputeAPIUnitTestMixIn(object):
allow_mig_same_host=False,
project_id=None,
extra_kwargs=None,
- same_flavor=False):
+ same_flavor=False,
+ clean_shutdown=True):
if extra_kwargs is None:
extra_kwargs = {}
@@ -1482,16 +1483,20 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, fake_inst, extra_kwargs,
scheduler_hint=scheduler_hint,
flavor=mox.IsA(objects.Flavor),
- reservations=expected_reservations)
+ reservations=expected_reservations,
+ clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
if flavor_id_passed:
self.compute_api.resize(self.context, fake_inst,
flavor_id='new-flavor-id',
+ clean_shutdown=clean_shutdown,
**extra_kwargs)
else:
- self.compute_api.resize(self.context, fake_inst, **extra_kwargs)
+ self.compute_api.resize(self.context, fake_inst,
+ clean_shutdown=clean_shutdown,
+ **extra_kwargs)
def _test_migrate(self, *args, **kwargs):
self._test_resize(*args, flavor_id_passed=False, **kwargs)
@@ -1511,6 +1516,9 @@ class _ComputeAPIUnitTestMixIn(object):
def test_resize_different_project_id(self):
self._test_resize(project_id='different')
+ def test_resize_forced_shutdown(self):
+ self._test_resize(clean_shutdown=False)
+
def test_migrate(self):
self._test_migrate()
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index bdef05922c..7a3facd48c 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -302,13 +302,22 @@ class ComputeRpcAPITestCase(test.TestCase):
migrate_data=None, version='3.19')
def test_prep_resize(self):
+ self.flags(compute='3.0', group='upgrade_levels')
+ self._test_compute_api('prep_resize', 'cast',
+ instance=self.fake_instance_obj, instance_type='fake_type',
+ image='fake_image', host='host',
+ reservations=list('fake_res'),
+ request_spec='fake_spec',
+ filter_properties={'fakeprop': 'fakeval'},
+ node='node', version='3.0')
+ self.flags(compute='3.38', group='upgrade_levels')
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance_obj, instance_type='fake_type',
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
- node='node')
+ node='node', clean_shutdown=True, version='3.38')
def test_reboot_instance(self):
self.maxDiff = None
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 4e8aab9ef2..2aade3a63a 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -1184,7 +1184,7 @@ class _BaseTaskTestCase(object):
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
- def test_cold_migrate(self):
+ def _test_cold_migrate(self, clean_shutdown=True):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
@@ -1222,7 +1222,8 @@ class _BaseTaskTestCase(object):
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(objects.Instance),
mox.IsA(objects.Flavor), 'host1', [], request_spec=request_spec,
- filter_properties=filter_properties, node=None)
+ filter_properties=filter_properties, node=None,
+ clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
@@ -1233,11 +1234,19 @@ class _BaseTaskTestCase(object):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
- self.context, inst_obj, {}, scheduler_hint, flavor, [])
+ self.context, inst_obj, {}, scheduler_hint, flavor, [],
+ clean_shutdown)
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
- False, False, flavor, None, None, [])
+ False, False, flavor, None, None, [],
+ clean_shutdown)
+
+ def test_cold_migrate(self):
+ self._test_cold_migrate()
+
+ def test_cold_migrate_forced_shutdown(self):
+ self._test_cold_migrate(clean_shutdown=False)
def test_build_instances(self):
system_metadata = flavors.save_flavor_info({},
@@ -1824,7 +1833,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
- flavor, filter_props, [resvs])
+ flavor, filter_props, [resvs],
+ clean_shutdown=True)
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
@@ -1885,7 +1895,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
- inst_obj, flavor, filter_props, [resvs])
+ inst_obj, flavor, filter_props, [resvs],
+ clean_shutdown=True)
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
@@ -1915,7 +1926,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
) as (image_mock, brs_mock, sig_mock, set_vm_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
- inst_obj, flavor, filter_props, [resvs])
+ inst_obj, flavor, filter_props, [resvs],
+ clean_shutdown=True)
self.assertIn('cold migrate', nvh.message)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
@@ -1974,7 +1986,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'flavor', hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=expected_filter_props,
- node=hosts[0]['nodename']).AndRaise(exc_info)
+ node=hosts[0]['nodename'],
+ clean_shutdown=True).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
@@ -1996,7 +2009,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
- filter_props, [resvs])
+ filter_props, [resvs],
+ clean_shutdown=True)
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
@@ -2028,7 +2042,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
- [resvs])
+ [resvs], clean_shutdown=True)
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
diff --git a/nova/tests/unit/objects/test_service.py b/nova/tests/unit/objects/test_service.py
index 122d9ddf79..2d837a676d 100644
--- a/nova/tests/unit/objects/test_service.py
+++ b/nova/tests/unit/objects/test_service.py
@@ -110,9 +110,9 @@ class _TestServiceObject(object):
db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
fake_service)
self.mox.ReplayAll()
- service_obj = service.Service()
+ service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
- service_obj.create(self.context)
+ service_obj.create()
self.assertEqual(fake_service['id'], service_obj.id)
def test_recreate_fails(self):
@@ -120,9 +120,9 @@ class _TestServiceObject(object):
db.service_create(self.context, {'host': 'fake-host'}).AndReturn(
fake_service)
self.mox.ReplayAll()
- service_obj = service.Service()
+ service_obj = service.Service(context=self.context)
service_obj.host = 'fake-host'
- service_obj.create(self.context)
+ service_obj.create()
self.assertRaises(exception.ObjectActionError, service_obj.create,
self.context)
@@ -131,16 +131,16 @@ class _TestServiceObject(object):
db.service_update(self.context, 123, {'host': 'fake-host'}).AndReturn(
fake_service)
self.mox.ReplayAll()
- service_obj = service.Service()
+ service_obj = service.Service(context=self.context)
service_obj.id = 123
service_obj.host = 'fake-host'
- service_obj.save(self.context)
+ service_obj.save()
@mock.patch.object(db, 'service_create',
return_value=fake_service)
def test_set_id_failure(self, db_mock):
- service_obj = service.Service()
- service_obj.create(self.context)
+ service_obj = service.Service(context=self.context)
+ service_obj.create()
self.assertRaises(exception.ReadOnlyFieldError, setattr,
service_obj, 'id', 124)
@@ -148,9 +148,9 @@ class _TestServiceObject(object):
self.mox.StubOutWithMock(db, 'service_destroy')
db.service_destroy(self.context, 123)
self.mox.ReplayAll()
- service_obj = service.Service()
+ service_obj = service.Service(context=self.context)
service_obj.id = 123
- service_obj.destroy(self.context)
+ service_obj.destroy()
def test_destroy(self):
# The test harness needs db.service_destroy to work,
@@ -194,10 +194,10 @@ class _TestServiceObject(object):
'get_by_metadata_key')
db.service_get_all(self.context, disabled=None).AndReturn(
[dict(fake_service, topic='compute')])
- agg = aggregate.Aggregate()
+ agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'availability_zone': 'test-az'}
- agg.create(self.context)
+ agg.create()
agg.hosts = [fake_service['host']]
aggregate.AggregateList.get_by_metadata_key(self.context,
'availability_zone', hosts=set(agg.hosts)).AndReturn([agg])
diff --git a/nova/tests/unit/objects/test_tag.py b/nova/tests/unit/objects/test_tag.py
index 487858afb1..936319e075 100644
--- a/nova/tests/unit/objects/test_tag.py
+++ b/nova/tests/unit/objects/test_tag.py
@@ -32,8 +32,8 @@ fake_tag2 = {
fake_tag_list = [fake_tag1, fake_tag2]
-def _get_tag(resource_id, tag_name):
- t = tag.Tag()
+def _get_tag(resource_id, tag_name, context=None):
+ t = tag.Tag(context=context)
t.resource_id = resource_id
t.tag = tag_name
return t
@@ -43,8 +43,8 @@ class _TestTagObject(object):
@mock.patch('nova.db.instance_tag_add')
def test_create(self, tag_add):
tag_add.return_value = fake_tag1
- tag_obj = _get_tag(RESOURCE_ID, TAG_NAME1)
- tag_obj.create(self.context)
+ tag_obj = _get_tag(RESOURCE_ID, TAG_NAME1, context=self.context)
+ tag_obj.create()
tag_add.assert_called_once_with(self.context, RESOURCE_ID, TAG_NAME1)
self.compare_obj(tag_obj, fake_tag1)
diff --git a/nova/tests/unit/objects/test_virtual_interface.py b/nova/tests/unit/objects/test_virtual_interface.py
index 6c416315c4..8eba8fac4e 100644
--- a/nova/tests/unit/objects/test_virtual_interface.py
+++ b/nova/tests/unit/objects/test_virtual_interface.py
@@ -67,7 +67,7 @@ class _TestVirtualInterface(object):
self._compare(self, fake_vif, vif)
def test_create(self):
- vif = vif_obj.VirtualInterface()
+ vif = vif_obj.VirtualInterface(context=self.context)
vif.address = '00:00:00:00:00:00'
vif.network_id = 123
vif.instance_uuid = 'fake-uuid'
@@ -75,7 +75,7 @@ class _TestVirtualInterface(object):
with mock.patch.object(db, 'virtual_interface_create') as create:
create.return_value = fake_vif
- vif.create(self.context)
+ vif.create()
self.assertEqual(self.context, vif._context)
vif._context = None
diff --git a/nova/tests/unit/virt/hyperv/test_imagecache.py b/nova/tests/unit/virt/hyperv/test_imagecache.py
new file mode 100644
index 0000000000..72ce5cce6f
--- /dev/null
+++ b/nova/tests/unit/virt/hyperv/test_imagecache.py
@@ -0,0 +1,119 @@
+# Copyright 2014 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+from oslo.config import cfg
+
+from nova import exception
+from nova import test
+from nova.tests.unit import fake_instance
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import imagecache
+
+CONF = cfg.CONF
+
+
+class ImageCacheTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V ImageCache class."""
+
+ FAKE_BASE_DIR = 'fake/base/dir'
+ FAKE_FORMAT = 'fake_format'
+ FAKE_IMAGE_REF = 'fake_image_ref'
+
+ def setUp(self):
+ super(ImageCacheTestCase, self).setUp()
+
+ self.context = 'fake-context'
+ self.instance = fake_instance.fake_instance_obj(self.context)
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(imagecache.utilsfactory,
+ "get_hostutils")
+ patched_get_pathutils = mock.patch.object(imagecache.utilsfactory,
+ "get_pathutils")
+ patched_func.start()
+ patched_get_pathutils.start()
+ self.addCleanup(patched_func.stop)
+ self.addCleanup(patched_get_pathutils.stop)
+
+ self.imagecache = imagecache.ImageCache()
+ self.imagecache._pathutils = mock.MagicMock()
+ self.imagecache._vhdutils = mock.MagicMock()
+
+ def _prepare_get_cached_image(self, path_exists, use_cow):
+ self.instance.image_ref = self.FAKE_IMAGE_REF
+ self.imagecache._pathutils.get_base_vhd_dir.return_value = (
+ self.FAKE_BASE_DIR)
+ self.imagecache._pathutils.exists.return_value = path_exists
+ self.imagecache._vhdutils.get_vhd_format.return_value = (
+ constants.DISK_FORMAT_VHD)
+
+ CONF.set_override('use_cow_images', use_cow)
+
+ expected_path = os.path.join(self.FAKE_BASE_DIR,
+ self.FAKE_IMAGE_REF)
+ expected_vhd_path = "%s.%s" % (expected_path,
+ constants.DISK_FORMAT_VHD.lower())
+ return (expected_path, expected_vhd_path)
+
+ @mock.patch.object(imagecache.images, 'fetch')
+ def test_get_cached_image_with_fetch(self, mock_fetch):
+ (expected_path,
+ expected_vhd_path) = self._prepare_get_cached_image(False, False)
+
+ result = self.imagecache.get_cached_image(self.context, self.instance)
+ self.assertEqual(expected_vhd_path, result)
+
+ mock_fetch.assert_called_once_with(self.context, self.FAKE_IMAGE_REF,
+ expected_path,
+ self.instance['user_id'],
+ self.instance['project_id'])
+ self.imagecache._vhdutils.get_vhd_format.assert_called_once_with(
+ expected_path)
+ self.imagecache._pathutils.rename.assert_called_once_with(
+ expected_path, expected_vhd_path)
+
+ @mock.patch.object(imagecache.images, 'fetch')
+ def test_get_cached_image_with_fetch_exception(self, mock_fetch):
+ (expected_path,
+ expected_vhd_path) = self._prepare_get_cached_image(False, False)
+
+ # path doesn't exist until fetched.
+ self.imagecache._pathutils.exists.side_effect = [False, False, True]
+ mock_fetch.side_effect = exception.InvalidImageRef(
+ image_href=self.FAKE_IMAGE_REF)
+
+ self.assertRaises(exception.InvalidImageRef,
+ self.imagecache.get_cached_image,
+ self.context, self.instance)
+
+ self.imagecache._pathutils.remove.assert_called_once_with(
+ expected_path)
+
+ @mock.patch.object(imagecache.ImageCache, '_resize_and_cache_vhd')
+ def test_get_cached_image_use_cow(self, mock_resize):
+ (expected_path,
+ expected_vhd_path) = self._prepare_get_cached_image(True, True)
+
+ expected_resized_vhd_path = expected_vhd_path + 'x'
+ mock_resize.return_value = expected_resized_vhd_path
+
+ result = self.imagecache.get_cached_image(self.context, self.instance)
+ self.assertEqual(expected_resized_vhd_path, result)
+
+ mock_resize.assert_called_once_with(self.instance, expected_vhd_path)
diff --git a/nova/tests/unit/virt/hyperv/test_livemigrationops.py b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
index 5d330e14c1..1a0f24c256 100644
--- a/nova/tests/unit/virt/hyperv/test_livemigrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
@@ -87,6 +87,7 @@ class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase):
mock_get_cached_image,
mock_ebs_root_in_block_devices):
mock_instance = fake_instance.fake_instance_obj(self.context)
+ mock_instance.image_ref = "fake_image_ref"
mock_ebs_root_in_block_devices.return_value = None
CONF.set_override('use_cow_images', True)
self._livemigrops.pre_live_migration(
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
index 025d2616dd..9d20a3eb6b 100644
--- a/nova/tests/unit/virt/ironic/test_client_wrapper.py
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -28,6 +28,10 @@ CONF = cfg.CONF
FAKE_CLIENT = ironic_utils.FakeClient()
+def get_new_fake_client(*args, **kwargs):
+ return ironic_utils.FakeClient()
+
+
class IronicClientWrapperTestCase(test.NoDBTestCase):
def setUp(self):
@@ -124,3 +128,28 @@ class IronicClientWrapperTestCase(test.NoDBTestCase):
def test__multi_getattr_fail(self):
self.assertRaises(AttributeError, self.ironicclient._multi_getattr,
FAKE_CLIENT, "nonexistent")
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__client_is_cached(self, mock_get_client):
+ mock_get_client.side_effect = get_new_fake_client
+ ironicclient = client_wrapper.IronicClientWrapper()
+ first_client = ironicclient._get_client()
+ second_client = ironicclient._get_client()
+ self.assertEqual(id(first_client), id(second_client))
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test__invalidate_cached_client(self, mock_get_client):
+ mock_get_client.side_effect = get_new_fake_client
+ ironicclient = client_wrapper.IronicClientWrapper()
+ first_client = ironicclient._get_client()
+ ironicclient._invalidate_cached_client()
+ second_client = ironicclient._get_client()
+ self.assertNotEqual(id(first_client), id(second_client))
+
+ @mock.patch.object(ironic_client, 'get_client')
+ def test_call_uses_cached_client(self, mock_get_client):
+ mock_get_client.side_effect = get_new_fake_client
+ ironicclient = client_wrapper.IronicClientWrapper()
+ for n in range(0, 4):
+ ironicclient.call("node.list")
+ self.assertEqual(1, mock_get_client.call_count)
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 20519e4924..7d4294485a 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -1,4 +1,4 @@
-# Copyright 2014 Red Hat, Inc.
+# Copyright 2015 Red Hat, Inc.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -79,9 +79,12 @@ FAKE_CLIENT_WRAPPER = FakeClientWrapper()
@mock.patch.object(cw, 'IronicClientWrapper', lambda *_: FAKE_CLIENT_WRAPPER)
class IronicDriverTestCase(test.NoDBTestCase):
+ @mock.patch.object(cw, 'IronicClientWrapper',
+ lambda *_: FAKE_CLIENT_WRAPPER)
def setUp(self):
super(IronicDriverTestCase, self).setUp()
self.flags(**IRONIC_FLAGS)
+
self.driver = ironic_driver.IronicDriver(None)
self.driver.virtapi = fake.FakeVirtAPI()
self.ctx = nova_context.get_admin_context()
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 31b95fbeeb..c80f6d3d32 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -8717,7 +8717,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
cpu_0.id = 2 * i
cpu_0.socket_id = i
cpu_0.core_id = 0
- cpu_0.sibling = 2 * i
+ cpu_0.siblings = set([2 * i, 2 * i + 1])
mempages_0 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_0.size = 4
mempages_0.total = 1024 * i
@@ -8725,7 +8725,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
cpu_1.id = 2 * i + 1
cpu_1.socket_id = i
cpu_1.core_id = 1
- cpu_1.sibling = 2 * i + 1
+ cpu_1.siblings = set([2 * i, 2 * i + 1])
mempages_1 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_1.size = 2048
mempages_1.total = 0 + i
@@ -8777,6 +8777,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
+ self.assertEqual(expected_topo_dict, got_topo_dict)
+ self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
+ self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
+ self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
+ self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
+ self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
+ self.assertEqual([], got_topo.cells[1].siblings)
+
def test_get_host_numa_topology_empty(self):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
index 5eb80b5681..d2074ca1a4 100644
--- a/nova/tests/unit/virt/libvirt/test_imagecache.py
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -334,7 +334,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
(base_file3, False, True)])
@contextlib.contextmanager
- def _make_base_file(self, checksum=True):
+ def _make_base_file(self, checksum=True, lock=True):
"""Make a base file for testing."""
with utils.tempdir() as tmpdir:
@@ -347,6 +347,15 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
base_file = open(fname, 'w')
base_file.write('data')
base_file.close()
+
+ if lock:
+ lockdir = os.path.join(tmpdir, 'locks')
+ lockname = os.path.join(lockdir, 'nova-aaa')
+ os.mkdir(lockdir)
+ lock_file = open(lockname, 'w')
+ lock_file.write('data')
+ lock_file.close()
+
base_file = open(fname, 'r')
if checksum:
@@ -361,9 +370,14 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
+ lock_name = 'nova-' + os.path.split(fname)[-1]
+ lock_dir = os.path.join(CONF.instances_path, 'locks')
+ lock_file = os.path.join(lock_dir, lock_name)
+
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
+ self.assertTrue(os.path.exists(lock_file))
# Old files get cleaned up though
os.utime(fname, (-1, time.time() - 3601))
@@ -371,6 +385,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
+ self.assertFalse(os.path.exists(lock_file))
def test_remove_base_file_original(self):
with self._make_base_file() as fname:
diff --git a/nova/virt/hyperv/imagecache.py b/nova/virt/hyperv/imagecache.py
index 36673ac0fb..1db61e37ee 100644
--- a/nova/virt/hyperv/imagecache.py
+++ b/nova/virt/hyperv/imagecache.py
@@ -96,7 +96,7 @@ class ImageCache(object):
return resized_vhd_path
def get_cached_image(self, context, instance):
- image_id = instance['image_ref']
+ image_id = instance.image_ref
base_vhd_dir = self._pathutils.get_base_vhd_dir()
base_vhd_path = os.path.join(base_vhd_dir, image_id)
@@ -113,8 +113,8 @@ class ImageCache(object):
if not vhd_path:
try:
images.fetch(context, image_id, base_vhd_path,
- instance['user_id'],
- instance['project_id'])
+ instance.user_id,
+ instance.project_id)
format_ext = self._vhdutils.get_vhd_format(base_vhd_path)
vhd_path = base_vhd_path + '.' + format_ext.lower()
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 8cade154c0..72fef72e96 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -87,7 +87,7 @@ class LiveMigrationOps(object):
if CONF.use_cow_images:
boot_from_volume = self._volumeops.ebs_root_in_block_devices(
block_device_info)
- if not boot_from_volume:
+ if not boot_from_volume and instance.image_ref:
self._imagecache.get_cached_image(context, instance)
self._volumeops.initialize_volumes_connection(block_device_info)
diff --git a/nova/virt/ironic/client_wrapper.py b/nova/virt/ironic/client_wrapper.py
index bfa76bcd86..f74e0e2b17 100644
--- a/nova/virt/ironic/client_wrapper.py
+++ b/nova/virt/ironic/client_wrapper.py
@@ -50,10 +50,18 @@ class IronicClientWrapper(object):
if not hasattr(ironic, 'client'):
ironic.client = importutils.import_module(
'ironicclient.client')
+ self._cached_client = None
+
+ def _invalidate_cached_client(self):
+ """Tell the wrapper to invalidate the cached ironic-client."""
+ self._cached_client = None
def _get_client(self):
- # TODO(deva): save and reuse existing client & auth token
- # until it expires or is no longer valid
+ # If we've already constructed a valid, authed client, just return
+ # that.
+ if self._cached_client is not None:
+ return self._cached_client
+
auth_token = CONF.ironic.admin_auth_token
if auth_token is None:
kwargs = {'os_username': CONF.ironic.admin_username,
@@ -69,6 +77,10 @@ class IronicClientWrapper(object):
try:
cli = ironic.client.get_client(CONF.ironic.api_version, **kwargs)
+ # Cache the client so we don't have to reconstruct and
+ # reauthenticate it every time we need it.
+ self._cached_client = cli
+
except ironic.exc.Unauthorized:
msg = _("Unable to authenticate Ironic client.")
LOG.error(msg)
@@ -105,16 +117,28 @@ class IronicClientWrapper(object):
for attempt in range(1, num_attempts + 1):
client = self._get_client()
+
try:
return self._multi_getattr(client, method)(*args, **kwargs)
+ except ironic.exc.Unauthorized:
+ # In this case, the authorization token of the cached
+ # ironic-client probably expired. So invalidate the cached
+ # client and the next try will start with a fresh one.
+ self._invalidate_cached_client()
+ LOG.debug("The Ironic client became unauthorized. "
+ "Will attempt to reauthorize and try again.")
except retry_excs:
- msg = (_("Error contacting Ironic server for '%(method)s'. "
- "Attempt %(attempt)d of %(total)d")
- % {'method': method,
- 'attempt': attempt,
- 'total': num_attempts})
- if attempt == num_attempts:
- LOG.error(msg)
- raise exception.NovaException(msg)
- LOG.warn(msg)
- time.sleep(CONF.ironic.api_retry_interval)
+ pass
+
+ # We want to perform this logic for all exception cases listed
+ # above.
+ msg = (_("Error contacting Ironic server for "
+ "'%(method)s'. Attempt %(attempt)d of %(total)d") %
+ {'method': method,
+ 'attempt': attempt,
+ 'total': num_attempts})
+ if attempt == num_attempts:
+ LOG.error(msg)
+ raise exception.NovaException(msg)
+ LOG.warning(msg)
+ time.sleep(CONF.ironic.api_retry_interval)
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 6fb464bdc8..70b8585270 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -180,6 +180,8 @@ class IronicDriver(virt_driver.ComputeDriver):
logger = py_logging.getLogger('ironicclient')
logger.setLevel(level)
+ self.ironicclient = client_wrapper.IronicClientWrapper()
+
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an acceptable state.
@@ -288,7 +290,6 @@ class IronicDriver(virt_driver.ComputeDriver):
def _add_driver_fields(self, node, instance, image_meta, flavor,
preserve_ephemeral=None):
- ironicclient = client_wrapper.IronicClientWrapper()
patch = patcher.create(node).get_deploy_patch(instance,
image_meta,
flavor,
@@ -298,7 +299,7 @@ class IronicDriver(virt_driver.ComputeDriver):
patch.append({'path': '/instance_uuid', 'op': 'add',
'value': instance.uuid})
try:
- ironicclient.call('node.update', node.uuid, patch)
+ self.ironicclient.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
msg = (_("Failed to add deploy parameters on node %(node)s "
"when provisioning the instance %(instance)s")
@@ -308,7 +309,6 @@ class IronicDriver(virt_driver.ComputeDriver):
def _cleanup_deploy(self, context, node, instance, network_info,
flavor=None):
- ironicclient = client_wrapper.IronicClientWrapper()
if flavor is None:
# TODO(mrda): It would be better to use instance.get_flavor() here
# but right now that doesn't include extra_specs which are required
@@ -322,7 +322,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# Unassociate the node
patch.append({'op': 'remove', 'path': '/instance_uuid'})
try:
- ironicclient.call('node.update', node.uuid, patch)
+ self.ironicclient.call('node.update', node.uuid, patch)
except ironic.exc.BadRequest:
LOG.error(_LE("Failed to clean up the parameters on node %(node)s "
"when unprovisioning the instance %(instance)s"),
@@ -393,9 +393,8 @@ class IronicDriver(virt_driver.ComputeDriver):
:returns: True if the instance exists. False if not.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
try:
- _validate_instance_and_node(ironicclient, instance)
+ _validate_instance_and_node(self.ironicclient, instance)
return True
except exception.InstanceNotFound:
return False
@@ -406,10 +405,10 @@ class IronicDriver(virt_driver.ComputeDriver):
:returns: a list of instance names.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
- node_list = ironicclient.call("node.list", associated=True, limit=0)
+ node_list = self.ironicclient.call("node.list", associated=True,
+ limit=0)
context = nova_context.get_admin_context()
return [objects.Instance.get_by_uuid(context,
i.instance_uuid).name
@@ -421,10 +420,10 @@ class IronicDriver(virt_driver.ComputeDriver):
:returns: a list of instance UUIDs.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
- node_list = ironicclient.call("node.list", associated=True, limit=0)
+ node_list = self.ironicclient.call("node.list", associated=True,
+ limit=0)
return list(n.instance_uuid for n in node_list)
def node_is_available(self, nodename):
@@ -448,18 +447,16 @@ class IronicDriver(virt_driver.ComputeDriver):
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
- ironicclient = client_wrapper.IronicClientWrapper()
try:
- ironicclient.call("node.get", nodename)
+ self.ironicclient.call("node.get", nodename)
return True
except ironic.exc.NotFound:
return False
def _refresh_cache(self):
- ironicclient = client_wrapper.IronicClientWrapper()
# NOTE(lucasagomes): limit == 0 is an indicator to continue
# pagination until there're no more values to be returned.
- node_list = ironicclient.call('node.list', detail=True, limit=0)
+ node_list = self.ironicclient.call('node.list', detail=True, limit=0)
node_cache = {}
for node in node_list:
node_cache[node.uuid] = node
@@ -512,8 +509,7 @@ class IronicDriver(virt_driver.ComputeDriver):
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
- ironicclient = client_wrapper.IronicClientWrapper()
- node = ironicclient.call("node.get", nodename)
+ node = self.ironicclient.call("node.get", nodename)
return self._node_resource(node)
def get_info(self, instance):
@@ -525,9 +521,8 @@ class IronicDriver(virt_driver.ComputeDriver):
:param instance: the instance object.
:returns: a InstanceInfo object
"""
- ironicclient = client_wrapper.IronicClientWrapper()
try:
- node = _validate_instance_and_node(ironicclient, instance)
+ node = _validate_instance_and_node(self.ironicclient, instance)
except exception.InstanceNotFound:
return hardware.InstanceInfo(
state=map_power_state(ironic_states.NOSTATE))
@@ -570,12 +565,11 @@ class IronicDriver(virt_driver.ComputeDriver):
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
try:
- node = ironicclient.call("node.get", instance.node)
+ node = self.ironicclient.call("node.get", instance.node)
except ironic.exc.NotFound:
return None
- ports = ironicclient.call("node.list_ports", node.uuid)
+ ports = self.ironicclient.call("node.list_ports", node.uuid)
return set([p.address for p in ports])
def spawn(self, context, instance, image_meta, injected_files,
@@ -604,8 +598,7 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
- ironicclient = client_wrapper.IronicClientWrapper()
- node = ironicclient.call("node.get", node_uuid)
+ node = self.ironicclient.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance.instance_type_id)
@@ -619,7 +612,7 @@ class IronicDriver(virt_driver.ComputeDriver):
instance.save()
# validate we are ready to do the deploy
- validate_chk = ironicclient.call("node.validate", node_uuid)
+ validate_chk = self.ironicclient.call("node.validate", node_uuid)
if not validate_chk.deploy or not validate_chk.power:
# something is wrong. undo what we have done
self._cleanup_deploy(context, node, instance, network_info,
@@ -646,7 +639,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# trigger the node deploy
try:
- ironicclient.call("node.set_provision_state", node_uuid,
+ self.ironicclient.call("node.set_provision_state", node_uuid,
ironic_states.ACTIVE)
except Exception as e:
with excutils.save_and_reraise_exception():
@@ -659,7 +652,8 @@ class IronicDriver(virt_driver.ComputeDriver):
flavor=flavor)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
- ironicclient, instance)
+ self.ironicclient,
+ instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
except Exception:
@@ -725,9 +719,8 @@ class IronicDriver(virt_driver.ComputeDriver):
:param migrate_data: implementation specific params.
Ignored by this driver.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
try:
- node = _validate_instance_and_node(ironicclient, instance)
+ node = _validate_instance_and_node(self.ironicclient, instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance.uuid)
@@ -741,7 +734,7 @@ class IronicDriver(virt_driver.ComputeDriver):
ironic_states.DEPLOYFAIL,
ironic_states.ERROR,
ironic_states.DEPLOYWAIT):
- self._unprovision(ironicclient, instance, node)
+ self._unprovision(self.ironicclient, instance, node)
self._cleanup_deploy(context, node, instance, network_info)
@@ -766,13 +759,12 @@ class IronicDriver(virt_driver.ComputeDriver):
encountered. Ignored by this driver.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
- node = _validate_instance_and_node(ironicclient, instance)
- ironicclient.call("node.set_power_state", node.uuid, 'reboot')
+ node = _validate_instance_and_node(self.ironicclient, instance)
+ self.ironicclient.call("node.set_power_state", node.uuid, 'reboot')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
- ironicclient, instance, 'reboot')
+ self.ironicclient, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_off(self, instance, timeout=0, retry_interval=0):
@@ -789,13 +781,12 @@ class IronicDriver(virt_driver.ComputeDriver):
:param retry_interval: How often to signal node while waiting
for it to shutdown. Ignored by this driver.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
- node = _validate_instance_and_node(ironicclient, instance)
- ironicclient.call("node.set_power_state", node.uuid, 'off')
+ node = _validate_instance_and_node(self.ironicclient, instance)
+ self.ironicclient.call("node.set_power_state", node.uuid, 'off')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
- ironicclient, instance, 'power off')
+ self.ironicclient, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def power_on(self, context, instance, network_info,
@@ -813,13 +804,12 @@ class IronicDriver(virt_driver.ComputeDriver):
information. Ignored by this driver.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
- node = _validate_instance_and_node(ironicclient, instance)
- ironicclient.call("node.set_power_state", node.uuid, 'on')
+ node = _validate_instance_and_node(self.ironicclient, instance)
+ self.ironicclient.call("node.set_power_state", node.uuid, 'on')
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_power_state,
- ironicclient, instance, 'power on')
+ self.ironicclient, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
def refresh_security_group_rules(self, security_group_id):
@@ -888,8 +878,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# start by ensuring the ports are clear
self._unplug_vifs(node, instance, network_info)
- ironicclient = client_wrapper.IronicClientWrapper()
- ports = ironicclient.call("node.list_ports", node.uuid)
+ ports = self.ironicclient.call("node.list_ports", node.uuid)
if len(network_info) > len(ports):
raise exception.VirtualInterfacePlugException(_(
@@ -908,7 +897,7 @@ class IronicDriver(virt_driver.ComputeDriver):
patch = [{'op': 'add',
'path': '/extra/vif_port_id',
'value': port_id}]
- ironicclient.call("port.update", pif.uuid, patch)
+ self.ironicclient.call("port.update", pif.uuid, patch)
def _unplug_vifs(self, node, instance, network_info):
# NOTE(PhilDay): Accessing network_info will block if the thread
@@ -919,8 +908,7 @@ class IronicDriver(virt_driver.ComputeDriver):
{'uuid': instance.uuid,
'network_info': network_info_str})
if network_info and len(network_info) > 0:
- ironicclient = client_wrapper.IronicClientWrapper()
- ports = ironicclient.call("node.list_ports", node.uuid,
+ ports = self.ironicclient.call("node.list_ports", node.uuid,
detail=True)
# not needed if no vif are defined
@@ -929,7 +917,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# we can not attach a dict directly
patch = [{'op': 'remove', 'path': '/extra/vif_port_id'}]
try:
- ironicclient.call("port.update", pif.uuid, patch)
+ self.ironicclient.call("port.update", pif.uuid, patch)
except ironic.exc.BadRequest:
pass
@@ -940,8 +928,7 @@ class IronicDriver(virt_driver.ComputeDriver):
:param network_info: Instance network information.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
- node = ironicclient.call("node.get", instance.node)
+ node = self.ironicclient.call("node.get", instance.node)
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
@@ -951,8 +938,7 @@ class IronicDriver(virt_driver.ComputeDriver):
:param network_info: Instance network information.
"""
- ironicclient = client_wrapper.IronicClientWrapper()
- node = ironicclient.call("node.get", instance.node)
+ node = self.ironicclient.call("node.get", instance.node)
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
@@ -1002,8 +988,7 @@ class IronicDriver(virt_driver.ComputeDriver):
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance.node
- ironicclient = client_wrapper.IronicClientWrapper()
- node = ironicclient.call("node.get", node_uuid)
+ node = self.ironicclient.call("node.get", node_uuid)
flavor = objects.Flavor.get_by_id(context,
instance.instance_type_id)
@@ -1012,7 +997,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# Trigger the node rebuild/redeploy.
try:
- ironicclient.call("node.set_provision_state",
+ self.ironicclient.call("node.set_provision_state",
node_uuid, ironic_states.REBUILD)
except (exception.NovaException, # Retry failed
ironic.exc.InternalServerError, # Validations
@@ -1025,5 +1010,6 @@ class IronicDriver(virt_driver.ComputeDriver):
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
- ironicclient, instance)
+ self.ironicclient,
+ instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 0d9d1eeb69..28c6ae3818 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -3726,6 +3726,56 @@ class LibvirtDriver(driver.ComputeDriver):
return membacking
+ def _get_flavor(self, ctxt, instance, flavor):
+ if flavor is not None:
+ return flavor
+ with utils.temporary_mutation(ctxt, read_deleted="yes"):
+ return objects.Flavor.get_by_id(ctxt, instance['instance_type_id'])
+
+ def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
+ image_meta, root_device_name):
+ if virt_type == "xen":
+ if guest.os_type == vm_mode.HVM:
+ guest.os_loader = CONF.libvirt.xen_hvmloader_path
+ elif virt_type in ("kvm", "qemu"):
+ if caps.host.cpu.arch in (arch.I686, arch.X86_64):
+ guest.sysinfo = self._get_guest_config_sysinfo(instance)
+ guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
+ guest.os_mach_type = self._get_machine_type(image_meta, caps)
+ elif virt_type == "lxc":
+ guest.os_init_path = "/sbin/init"
+ guest.os_cmdline = CONSOLE
+ elif virt_type == "uml":
+ guest.os_kernel = "/usr/bin/linux"
+ guest.os_root = root_device_name
+
+ def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue,
+ instance, inst_path, image_meta, disk_info):
+ if rescue:
+ self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
+ root_device_name)
+ elif instance['kernel_id']:
+ self._set_guest_for_inst_kernel(instance, guest, inst_path,
+ virt_type, root_device_name,
+ image_meta)
+ else:
+ guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
+
+ def _create_consoles(self, virt_type, guest, instance, flavor, image_meta,
+ caps):
+ if virt_type in ("qemu", "kvm"):
+ # Create the serial console char devices
+ self._create_serial_console_devices(guest, instance, flavor,
+ image_meta)
+ if caps.host.cpu.arch in (arch.S390, arch.S390X):
+ consolepty = vconfig.LibvirtConfigGuestConsole()
+ consolepty.target_type = "sclp"
+ else:
+ consolepty = vconfig.LibvirtConfigGuestSerial()
+ else:
+ consolepty = vconfig.LibvirtConfigGuestConsole()
+ return consolepty
+
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None, flavor=None):
@@ -3736,10 +3786,7 @@ class LibvirtDriver(driver.ComputeDriver):
'kernel_id' if a kernel is needed for the rescue image.
"""
ctxt = context or nova_context.get_admin_context()
- if flavor is None:
- with utils.temporary_mutation(ctxt, read_deleted="yes"):
- flavor = objects.Flavor.get_by_id(ctxt,
- instance['instance_type_id'])
+ flavor = self._get_flavor(ctxt, instance, flavor)
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
@@ -3785,38 +3832,15 @@ class LibvirtDriver(driver.ComputeDriver):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
- guest.os_type = vm_mode.get_from_instance(instance)
-
- if guest.os_type is None:
- guest.os_type = self._get_guest_os_type(virt_type)
+ guest.os_type = (vm_mode.get_from_instance(instance) or
+ self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
- if virt_type == "xen":
- if guest.os_type == vm_mode.HVM:
- guest.os_loader = CONF.libvirt.xen_hvmloader_path
-
- if virt_type in ("kvm", "qemu"):
- if caps.host.cpu.arch in (arch.I686, arch.X86_64):
- guest.sysinfo = self._get_guest_config_sysinfo(instance)
- guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
- guest.os_mach_type = self._get_machine_type(image_meta, caps)
-
- if virt_type == "lxc":
- guest.os_init_path = "/sbin/init"
- guest.os_cmdline = CONSOLE
- elif virt_type == "uml":
- guest.os_kernel = "/usr/bin/linux"
- guest.os_root = root_device_name
- else:
- if rescue:
- self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
- root_device_name)
- elif instance['kernel_id']:
- self._set_guest_for_inst_kernel(instance, guest, inst_path,
- virt_type, root_device_name,
- image_meta)
- else:
- guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
+ self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
+ image_meta, root_device_name)
+ if virt_type not in ('lxc', 'uml'):
+ self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
+ instance, inst_path, image_meta, disk_info)
self._set_features(guest, instance.os_type, caps, virt_type)
self._set_clock(guest, instance.os_type, image_meta, virt_type)
@@ -3833,18 +3857,8 @@ class LibvirtDriver(driver.ComputeDriver):
flavor, virt_type)
guest.add_device(config)
- if virt_type in ("qemu", "kvm"):
- # Create the serial console char devices
- self._create_serial_console_devices(guest, instance, flavor,
- image_meta)
- if caps.host.cpu.arch in (arch.S390, arch.S390X):
- consolepty = vconfig.LibvirtConfigGuestConsole()
- consolepty.target_type = "sclp"
- else:
- consolepty = vconfig.LibvirtConfigGuestSerial()
- else:
- consolepty = vconfig.LibvirtConfigGuestConsole()
-
+ consolepty = self._create_consoles(virt_type, guest, instance, flavor,
+ image_meta, caps)
consolepty.type = "pty"
guest.add_device(consolepty)
@@ -3868,8 +3882,8 @@ class LibvirtDriver(driver.ComputeDriver):
tablet.bus = "usb"
guest.add_device(tablet)
- if CONF.spice.enabled and CONF.spice.agent_enabled and \
- virt_type not in ('lxc', 'uml', 'xen'):
+ if (CONF.spice.enabled and CONF.spice.agent_enabled and
+ virt_type not in ('lxc', 'uml', 'xen')):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
@@ -3888,8 +3902,8 @@ class LibvirtDriver(driver.ComputeDriver):
guest.add_device(graphics)
add_video_driver = True
- if CONF.spice.enabled and \
- virt_type not in ('lxc', 'uml', 'xen'):
+ if (CONF.spice.enabled and
+ virt_type not in ('lxc', 'uml', 'xen')):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
@@ -4588,25 +4602,35 @@ class LibvirtDriver(driver.ComputeDriver):
if topology is None or not topology.cells:
return
- topology = objects.NUMATopology(
- cells=[objects.NUMACell(
- id=cell.id,
- cpuset=set(cpu.id for cpu in cell.cpus),
- memory=cell.memory / units.Ki,
- cpu_usage=0, memory_usage=0,
- mempages=[
- objects.NUMAPagesTopology(
- size_kb=pages.size,
- total=pages.total,
- used=0)
- for pages in cell.mempages])
- for cell in topology.cells])
-
+ cells = []
allowed_cpus = hardware.get_vcpu_pin_set()
- if allowed_cpus:
- for cell in topology.cells:
- cell.cpuset &= allowed_cpus
- return topology
+
+ for cell in topology.cells:
+ cpuset = set(cpu.id for cpu in cell.cpus)
+ siblings = sorted(map(set,
+ set(tuple(cpu.siblings)
+ if cpu.siblings else ()
+ for cpu in cell.cpus)
+ ))
+ if allowed_cpus:
+ cpuset &= allowed_cpus
+ siblings = [sib & allowed_cpus for sib in siblings]
+ # Filter out singles and empty sibling sets that may be left
+ siblings = [sib for sib in siblings if len(sib) > 1]
+
+ cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
+ memory=cell.memory / units.Ki,
+ cpu_usage=0, memory_usage=0,
+ siblings=siblings,
+ mempages=[
+ objects.NUMAPagesTopology(
+ size_kb=pages.size,
+ total=pages.total,
+ used=0)
+ for pages in cell.mempages])
+ cells.append(cell)
+
+ return objects.NUMATopology(cells=cells)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index a7e982bec7..f4ff2d9e4d 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -27,6 +27,7 @@ import time
from oslo.config import cfg
from oslo.serialization import jsonutils
+from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from nova.i18n import _LE
@@ -440,7 +441,8 @@ class ImageCacheManager(imagecache.ImageCacheManager):
return (True, age)
- def _remove_old_enough_file(self, base_file, maxage, remove_sig=True):
+ def _remove_old_enough_file(self, base_file, maxage, remove_sig=True,
+ remove_lock=True):
"""Remove a single swap or base file if it is old enough."""
exists, age = self._get_age_of_file(base_file)
if not exists:
@@ -463,11 +465,28 @@ class ImageCacheManager(imagecache.ImageCacheManager):
{'base_file': base_file,
'error': e})
+ if remove_lock:
+ try:
+ # NOTE(jichenjc) The lock file will be constructed first
+ # time the image file was accessed. the lock file looks
+ # like nova-9e881789030568a317fad9daae82c5b1c65e0d4a
+ # or nova-03d8e206-6500-4d91-b47d-ee74897f9b4e
+ # according to the original file name
+ lock_file = os.path.split(base_file)[-1]
+ lockutils.remove_external_lock_file(lock_file,
+ lock_file_prefix='nova-', lock_path=self.lock_path)
+ except OSError as e:
+ LOG.debug('Failed to remove %(lock_file)s, '
+ 'error was %(error)s',
+ {'lock_file': lock_file,
+ 'error': e})
+
def _remove_swap_file(self, base_file):
"""Remove a single swap base file if it is old enough."""
maxage = CONF.remove_unused_original_minimum_age_seconds
- self._remove_old_enough_file(base_file, maxage, remove_sig=False)
+ self._remove_old_enough_file(base_file, maxage, remove_sig=False,
+ remove_lock=False)
def _remove_base_file(self, base_file):
"""Remove a single base file if it is old enough."""