summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--etc/nova/nova.conf.sample4
-rw-r--r--etc/nova/rootwrap.d/compute.filters6
-rw-r--r--nova/api/ec2/cloud.py12
-rw-r--r--nova/api/metadata/handler.py5
-rw-r--r--nova/api/openstack/compute/contrib/aggregates.py2
-rw-r--r--nova/api/openstack/compute/contrib/floating_ips.py18
-rw-r--r--nova/api/openstack/compute/plugins/v3/aggregates.py4
-rw-r--r--nova/api/openstack/compute/views/servers.py5
-rw-r--r--nova/cells/messaging.py2
-rw-r--r--nova/compute/api.py6
-rw-r--r--nova/compute/manager.py139
-rw-r--r--nova/compute/rpcapi.py140
-rw-r--r--nova/db/sqlalchemy/api.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py16
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/216_sync_quota_usages.py8
-rw-r--r--nova/exception.py18
-rw-r--r--nova/network/api.py26
-rw-r--r--nova/network/linux_net.py11
-rw-r--r--nova/network/neutronv2/__init__.py61
-rw-r--r--nova/network/neutronv2/api.py45
-rw-r--r--nova/network/security_group/neutron_driver.py10
-rw-r--r--nova/openstack/common/processutils.py22
-rw-r--r--nova/openstack/common/rpc/impl_qpid.py18
-rw-r--r--nova/openstack/common/strutils.py74
-rw-r--r--nova/tests/api/ec2/test_api.py6
-rw-r--r--nova/tests/api/ec2/test_cloud.py74
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_aggregates.py11
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_floating_ips.py53
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py16
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_security_groups.py19
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py11
-rw-r--r--nova/tests/api/openstack/compute/plugins/v3/test_servers.py18
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py21
-rw-r--r--nova/tests/cells/test_cells_messaging.py4
-rw-r--r--nova/tests/compute/test_compute.py67
-rw-r--r--nova/tests/compute/test_compute_api.py1
-rw-r--r--nova/tests/compute/test_compute_mgr.py8
-rw-r--r--nova/tests/compute/test_shelve.py20
-rw-r--r--nova/tests/db/test_db_api.py29
-rw-r--r--nova/tests/db/test_migrations.py20
-rw-r--r--nova/tests/network/security_group/test_neutron_driver.py43
-rw-r--r--nova/tests/network/test_api.py64
-rw-r--r--nova/tests/network/test_linux_net.py7
-rw-r--r--nova/tests/network/test_neutronv2.py232
-rw-r--r--nova/tests/test_quota.py14
-rw-r--r--nova/tests/test_utils.py7
-rw-r--r--nova/tests/virt/docker/test_driver.py14
-rwxr-xr-x[-rw-r--r--]nova/tests/virt/hyperv/test_hypervapi.py53
-rw-r--r--nova/tests/virt/hyperv/test_vhdutils.py44
-rw-r--r--nova/tests/virt/hyperv/test_vhdutilsv2.py84
-rw-r--r--nova/tests/virt/hyperv/test_vmutils.py43
-rw-r--r--nova/tests/virt/hyperv/test_vmutilsv2.py45
-rw-r--r--nova/tests/virt/libvirt/fake_imagebackend.py4
-rw-r--r--nova/tests/virt/libvirt/fake_libvirt_utils.py16
-rw-r--r--nova/tests/virt/libvirt/fakelibvirt.py3
-rw-r--r--nova/tests/virt/libvirt/test_imagebackend.py384
-rw-r--r--nova/tests/virt/libvirt/test_libvirt.py480
-rw-r--r--nova/tests/virt/libvirt/test_libvirt_volume.py12
-rw-r--r--nova/tests/virt/powervm/test_powervm.py22
-rw-r--r--nova/tests/virt/test_virt.py2
-rw-r--r--nova/tests/virt/test_virt_drivers.py25
-rw-r--r--nova/tests/virt/vmwareapi/stubs.py2
-rwxr-xr-xnova/tests/virt/vmwareapi/test_vmwareapi.py180
-rwxr-xr-xnova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py84
-rw-r--r--nova/tests/virt/vmwareapi/test_vmwareapi_vmops.py67
-rw-r--r--nova/tests/virt/vmwareapi/test_vmwareapi_volumeops.py8
-rw-r--r--nova/tests/virt/xenapi/test_vmops.py3
-rw-r--r--nova/tests/virt/xenapi/test_xenapi.py10
-rwxr-xr-x[-rw-r--r--]nova/utils.py44
-rw-r--r--nova/virt/configdrive.py4
-rw-r--r--nova/virt/disk/mount/api.py4
-rw-r--r--nova/virt/disk/mount/nbd.py9
-rw-r--r--nova/virt/disk/vfs/guestfs.py11
-rw-r--r--nova/virt/docker/driver.py6
-rw-r--r--nova/virt/driver.py16
-rw-r--r--nova/virt/fake.py2
-rw-r--r--nova/virt/firewall.py48
-rw-r--r--nova/virt/hyperv/__init__.py4
-rw-r--r--nova/virt/hyperv/basevolumeutils.py3
-rw-r--r--nova/virt/hyperv/driver.py13
-rw-r--r--nova/virt/hyperv/imagecache.py11
-rw-r--r--nova/virt/hyperv/migrationops.py2
-rw-r--r--nova/virt/hyperv/snapshotops.py2
-rw-r--r--nova/virt/hyperv/vhdutils.py41
-rw-r--r--nova/virt/hyperv/vhdutilsv2.py100
-rw-r--r--nova/virt/hyperv/vmops.py53
-rw-r--r--nova/virt/hyperv/vmutils.py104
-rw-r--r--nova/virt/hyperv/vmutilsv2.py42
-rw-r--r--nova/virt/hyperv/volumeops.py14
-rw-r--r--nova/virt/libvirt/driver.py176
-rw-r--r--nova/virt/libvirt/firewall.py9
-rw-r--r--nova/virt/libvirt/imagebackend.py220
-rw-r--r--nova/virt/libvirt/utils.py33
-rw-r--r--nova/virt/libvirt/volume.py50
-rw-r--r--nova/virt/powervm/driver.py12
-rw-r--r--nova/virt/vmwareapi/driver.py45
-rw-r--r--nova/virt/vmwareapi/fake.py7
-rwxr-xr-x[-rw-r--r--]nova/virt/vmwareapi/host.py7
-rw-r--r--nova/virt/vmwareapi/vim.py10
-rwxr-xr-x[-rw-r--r--]nova/virt/vmwareapi/vm_util.py71
-rwxr-xr-x[-rw-r--r--]nova/virt/vmwareapi/vmops.py195
-rw-r--r--nova/virt/xenapi/driver.py5
-rw-r--r--nova/virt/xenapi/vmops.py3
-rw-r--r--requirements.txt6
-rw-r--r--setup.cfg2
-rw-r--r--test-requirements.txt2
-rw-r--r--tox.ini5
107 files changed, 3525 insertions, 915 deletions
diff --git a/etc/nova/nova.conf.sample b/etc/nova/nova.conf.sample
index daa5dcab3c..6fb6b41097 100644
--- a/etc/nova/nova.conf.sample
+++ b/etc/nova/nova.conf.sample
@@ -1914,8 +1914,8 @@
# Driver to use for controlling virtualization. Options
# include: libvirt.LibvirtDriver, xenapi.XenAPIDriver,
# fake.FakeDriver, baremetal.BareMetalDriver,
-# vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver (string
-# value)
+# vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver,
+# hyperv.HyperVDriver (string value)
#compute_driver=<None>
# The default format an ephemeral_volume will be formatted
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index e98c3f265d..eda7fcdf3b 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -31,6 +31,10 @@ qemu-nbd: CommandFilter, qemu-nbd, root
# nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
losetup: CommandFilter, losetup, root
+# nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
+# nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
+blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
+
# nova/virt/disk/vfs/localfs.py: 'tee', canonpath
tee: CommandFilter, tee, root
@@ -199,7 +203,7 @@ systool: CommandFilter, systool, root
sginfo: CommandFilter, sginfo, root
sg_scan: CommandFilter, sg_scan, root
cryptsetup: CommandFilter, cryptsetup, root
-ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/ip-.*-iscsi-iqn.2010-10.org.openstack:volume-.*, /dev/disk/by-path/ip-.*-iscsi-iqn.2010-10.org.openstack:volume-.*
+ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/ip-.*-iscsi-iqn.*, /dev/disk/by-path/ip-.*-iscsi-iqn.*
# nova/virt/xenapi/vm_utils.py:
xenstore-read: CommandFilter, xenstore-read, root
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 94ff160bc5..e80456901f 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -30,6 +30,7 @@ from oslo.config import cfg
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
from nova.api.metadata import password
+from nova.api.openstack import extensions
from nova.api import validator
from nova import availability_zones
from nova import block_device
@@ -85,6 +86,9 @@ LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
+security_group_authorizer = extensions.extension_authorizer('compute',
+ 'security_groups')
+
def validate_ec2_id(val):
if not validator.validate_str()(val):
@@ -631,6 +635,8 @@ class CloudController(object):
security_group = self.security_group_api.get(context, group_name,
group_id)
+ security_group_authorizer(context, security_group)
+
prevalues = kwargs.get('ip_permissions', [kwargs])
rule_ids = []
@@ -665,6 +671,8 @@ class CloudController(object):
security_group = self.security_group_api.get(context, group_name,
group_id)
+ security_group_authorizer(context, security_group)
+
prevalues = kwargs.get('ip_permissions', [kwargs])
postvalues = []
for values in prevalues:
@@ -737,6 +745,8 @@ class CloudController(object):
security_group = self.security_group_api.get(context, group_name,
group_id)
+ security_group_authorizer(context, security_group)
+
self.security_group_api.destroy(context, security_group)
return True
@@ -888,7 +898,7 @@ class CloudController(object):
'volumeId': ec2utils.id_to_ec2_vol_id(volume_id)}
def _get_instance_from_volume(self, context, volume):
- if volume['instance_uuid']:
+ if volume.get('instance_uuid'):
try:
return db.instance_get_by_uuid(context,
volume['instance_uuid'])
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 7ac902304d..74bb4f7e42 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -31,6 +31,7 @@ from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
+from nova import utils
from nova import wsgi
CACHE_EXPIRATION = 15 # in seconds
@@ -48,7 +49,7 @@ metadata_proxy_opts = [
cfg.StrOpt(
'neutron_metadata_proxy_shared_secret',
default='',
- deprecated_name='quantum_metadata_proxy_shared_secret',
+ deprecated_name='quantum_metadata_proxy_shared_secret', secret=True,
help='Shared secret to validate proxies Neutron metadata requests')
]
@@ -172,7 +173,7 @@ class MetadataRequestHandler(wsgi.Application):
instance_id,
hashlib.sha256).hexdigest()
- if expected_signature != signature:
+ if not utils.constant_time_compare(expected_signature, signature):
if instance_id:
LOG.warn(_('X-Instance-ID-Signature: %(signature)s does not '
'match the expected value: %(expected_signature)s '
diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py
index f381441532..0488cc9c36 100644
--- a/nova/api/openstack/compute/contrib/aggregates.py
+++ b/nova/api/openstack/compute/contrib/aggregates.py
@@ -129,6 +129,8 @@ class AggregateController(object):
try:
aggregate = self.api.update_aggregate(context, id, updates)
+ except exception.AggregateNameExists as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
except exception.AggregateNotFound:
LOG.info(_('Cannot update aggregate: %s'), id)
raise exc.HTTPNotFound()
diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py
index 92c42a5320..542f205699 100644
--- a/nova/api/openstack/compute/contrib/floating_ips.py
+++ b/nova/api/openstack/compute/contrib/floating_ips.py
@@ -181,16 +181,14 @@ class FloatingIPController(object):
# get the associated instance object (if any)
instance = get_instance_by_floating_ip_addr(self, context, address)
-
- # disassociate if associated
- if floating_ip.get('fixed_ip_id'):
- try:
- disassociate_floating_ip(self, context, instance, address)
- except exception.FloatingIpNotAssociated:
- LOG.info(_("Floating ip %s has been disassociated") % address)
-
- # release ip from project
- self.network_api.release_floating_ip(context, address)
+ try:
+ self.network_api.disassociate_and_release_floating_ip(
+ context, instance, floating_ip)
+ except exception.Forbidden:
+ raise webob.exc.HTTPForbidden()
+ except exception.CannotDisassociateAutoAssignedFloatingIP:
+ msg = _('Cannot disassociate auto assigned floating ip')
+ raise webob.exc.HTTPForbidden(explanation=msg)
return webob.Response(status_int=202)
def _get_ip_by_id(self, context, value):
diff --git a/nova/api/openstack/compute/plugins/v3/aggregates.py b/nova/api/openstack/compute/plugins/v3/aggregates.py
index bc1bfd1569..3615c7493a 100644
--- a/nova/api/openstack/compute/plugins/v3/aggregates.py
+++ b/nova/api/openstack/compute/plugins/v3/aggregates.py
@@ -108,7 +108,7 @@ class AggregateController(wsgi.Controller):
raise exc.HTTPNotFound(explanation=e.format_message())
return self._marshall_aggregate(aggregate)
- @extensions.expected_errors((400, 404))
+ @extensions.expected_errors((400, 404, 409))
def update(self, req, id, body):
"""Updates the name and/or availability_zone of given aggregate."""
context = _get_context(req)
@@ -133,6 +133,8 @@ class AggregateController(wsgi.Controller):
try:
aggregate = self.api.update_aggregate(context, id, updates)
+ except exception.AggregateNameExists as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
except exception.AggregateNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index 54886e1425..36412d104f 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -47,7 +47,7 @@ class ViewBuilder(common.ViewBuilder):
)
_fault_statuses = (
- "ERROR",
+ "ERROR", "DELETED"
)
def __init__(self):
@@ -147,6 +147,9 @@ class ViewBuilder(common.ViewBuilder):
@staticmethod
def _get_vm_state(instance):
+ # If the instance is deleted the vm and task states don't really matter
+ if instance.get("deleted"):
+ return "DELETED"
return common.status_from_state(instance.get("vm_state"),
instance.get("task_state"))
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 410084503a..76b5177c4c 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -875,7 +875,7 @@ class _TargetedMessageMethods(_BaseMessageMethods):
extra_instance_updates):
"""Resize an instance via compute_api.resize()."""
self._call_compute_api_with_obj(message.ctxt, instance, 'resize',
- flavor_id=flavor['id'],
+ flavor_id=flavor['flavorid'],
**extra_instance_updates)
def live_migrate_instance(self, message, instance, block_migration,
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 4a281d6f59..1efcfd7186 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1924,6 +1924,7 @@ class API(base.Base):
:returns: the new image metadata
"""
image_meta['name'] = name
+ image_meta['is_public'] = False
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
@@ -3192,8 +3193,8 @@ class AggregateAPI(base.Base):
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
- msg = _("Host already in availability zone"
- "%s.") % host_az
+ msg = _("Host already in availability zone "
+ "%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
@@ -3700,6 +3701,7 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
+ return []
def populate_security_groups(self, instance, security_groups):
if not security_groups:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 7d85e24fab..bcf3063bd0 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -668,9 +668,10 @@ class ComputeManager(manager.SchedulerDependentManager):
return
net_info = compute_utils.get_nw_info_for_instance(instance)
-
- self.driver.plug_vifs(instance, net_info)
-
+ try:
+ self.driver.plug_vifs(instance, net_info)
+ except NotImplementedError as e:
+ LOG.debug(e, instance=instance)
if instance['task_state'] == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
try:
@@ -683,7 +684,7 @@ class ComputeManager(manager.SchedulerDependentManager):
block_dev_info = self._get_instance_volume_block_device_info(
context, instance)
- self.driver.finish_revert_migration(
+ self.driver.finish_revert_migration(context,
instance, net_info, block_dev_info, power_on)
except Exception as e:
@@ -859,7 +860,12 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
@utils.synchronized(instance['uuid'])
def _sync_refresh():
- return self.driver.refresh_instance_security_rules(instance)
+ try:
+ return self.driver.refresh_instance_security_rules(instance)
+ except NotImplementedError:
+ LOG.warning(_('Hypervisor driver does not support '
+ 'security groups.'), instance=instance)
+
return _sync_refresh()
@wrap_exception()
@@ -2351,8 +2357,10 @@ class ComputeManager(manager.SchedulerDependentManager):
msg = _("Instance disappeared during snapshot")
LOG.debug(msg, instance=instance)
except exception.ImageNotFound:
- msg = _("Image not found")
- LOG.debug(msg, instance=instance)
+ instance.task_state = None
+ instance.save()
+ msg = _("Image not found during snapshot")
+ LOG.warn(msg, instance=instance)
except exception.UnexpectedTaskStateError as e:
actual_task_state = e.kwargs.get('actual', None)
if actual_task_state == 'deleting':
@@ -2836,7 +2844,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, refresh_conn_info=True)
power_on = old_vm_state != vm_states.STOPPED
- self.driver.finish_revert_migration(instance,
+ self.driver.finish_revert_migration(context, instance,
network_info,
block_device_info, power_on)
@@ -3454,7 +3462,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def _unshelve_instance(self, context, instance, image):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
+ compute_info = self._get_compute_info(context.elevated(), self.host)
instance.task_state = task_states.SPAWNING
+ instance.node = compute_info['hypervisor_hostname']
+ instance.host = self.host
instance.save()
network_info = self._get_instance_nw_info(context, instance)
@@ -3462,6 +3473,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, legacy=False)
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
+
+ if image:
+ shelved_image_ref = instance.image_ref
+ instance.image_ref = image['id']
+
try:
self.driver.spawn(context, instance, image, injected_files=[],
admin_password=None,
@@ -3472,6 +3488,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.exception(_('Instance failed to spawn'), instance=instance)
if image:
+ instance.image_ref = shelved_image_ref
image_service = glance.get_default_image_service()
image_service.delete(context, image['id'])
@@ -4153,8 +4170,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# but we must do it explicitly here when block_migration
# is false, as the network devices at the source must be
# torn down
- self.driver.unplug_vifs(instance_ref, network_info)
-
+ try:
+ self.driver.unplug_vifs(instance_ref, network_info)
+ except NotImplementedError as e:
+ LOG.debug(e, instance=instance_ref)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(ctxt, instance_ref,
self.host, teardown=True)
@@ -4327,38 +4346,76 @@ class ComputeManager(manager.SchedulerDependentManager):
return
self._last_info_cache_heal = curr_time
- instance_uuids = getattr(self, '_instance_uuids_to_heal', None)
+ instance_uuids = getattr(self, '_instance_uuids_to_heal', [])
instance = None
- while not instance or instance['host'] != self.host:
- if instance_uuids:
+ LOG.debug(_('Starting heal instance info cache'))
+
+ if not instance_uuids:
+ # The list of instances to heal is empty so rebuild it
+ LOG.debug(_('Rebuilding the list of instances to heal'))
+ db_instances = instance_obj.InstanceList.get_by_host(
+ context, self.host, expected_attrs=[])
+ for inst in db_instances:
+ # We don't want to refersh the cache for instances
+ # which are building or deleting so don't put them
+ # in the list. If they are building they will get
+ # added to the list next time we build it.
+ if (inst.vm_state == vm_states.BUILDING):
+ LOG.debug(_('Skipping network cache update for instance '
+ 'because it is Building.'), instance=inst)
+ continue
+ if (inst.task_state == task_states.DELETING):
+ LOG.debug(_('Skipping network cache update for instance '
+ 'because it is being deleted.'), instance=inst)
+ continue
+
+ if not instance:
+ # Save the first one we find so we don't
+ # have to get it again
+ instance = inst
+ else:
+ instance_uuids.append(inst['uuid'])
+
+ self._instance_uuids_to_heal = instance_uuids
+ else:
+ # Find the next valid instance on the list
+ while instance_uuids:
try:
- instance = instance_obj.Instance.get_by_uuid(
- context, instance_uuids.pop(0),
- expected_attrs=['system_metadata'])
+ inst = instance_obj.Instance.get_by_uuid(
+ context, instance_uuids.pop(0),
+ expected_attrs=['system_metadata', 'info_cache'])
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
- else:
- # No more in our copy of uuids. Pull from the DB.
- db_instances = instance_obj.InstanceList.get_by_host(
- context, self.host, expected_attrs=[])
- if not db_instances:
- # None.. just return.
- return
- instance = db_instances[0]
- instance_uuids = [inst['uuid'] for inst in db_instances[1:]]
- self._instance_uuids_to_heal = instance_uuids
- # We have an instance now and it's ours
- try:
- # Call to network API to get instance info.. this will
- # force an update to the instance's info_cache
- self._get_instance_nw_info(context, instance)
- LOG.debug(_('Updated the info_cache for instance'),
- instance=instance)
- except Exception as e:
- LOG.debug(_("An error occurred: %s"), e)
+ # Check the instance hasn't been migrated
+ if inst.host != self.host:
+ LOG.debug(_('Skipping network cache update for instance '
+ 'because it has been migrated to another '
+ 'host.'), instance=inst)
+ # Check the instance isn't being deleting
+ elif inst.task_state == task_states.DELETING:
+ LOG.debug(_('Skipping network cache update for instance '
+ 'because it is being deleted.'), instance=inst)
+ else:
+ instance = inst
+ break
+
+ if instance:
+ # We have an instance now to refresh
+ try:
+ # Call to network API to get instance info.. this will
+ # force an update to the instance's info_cache
+ self._get_instance_nw_info(context, instance)
+ LOG.debug(_('Updated the network info_cache for instance'),
+ instance=instance)
+ except Exception:
+ LOG.error(_('An error occurred while refreshing the network '
+ 'cache.'), instance=instance, exc_info=True)
+ else:
+ LOG.debug(_("Didn't find any instances for network info cache "
+ "update."))
@periodic_task.periodic_task
def _poll_rebooting_instances(self, context):
@@ -4763,7 +4820,6 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_states.RESCUED,
vm_states.RESIZED,
vm_states.SUSPENDED,
- vm_states.PAUSED,
vm_states.ERROR):
# TODO(maoy): we ignore these vm_state for now.
pass
@@ -4826,6 +4882,17 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.exception(_("error during stop() in "
"sync_power_state."),
instance=db_instance)
+ elif vm_state == vm_states.PAUSED:
+ if vm_power_state in (power_state.SHUTDOWN,
+ power_state.CRASHED):
+ LOG.warn(_("Paused instance shutdown by itself. Calling "
+ "the stop API."), instance=db_instance)
+ try:
+ self.compute_api.force_stop(context, db_instance)
+ except Exception:
+ LOG.exception(_("error during stop() in "
+ "sync_power_state."),
+ instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index f115857606..5784f67ba6 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -62,13 +62,17 @@ def _compute_host(host, instance):
return instance['host']
+def _icehouse_compat():
+ return CONF.upgrade_levels.compute == 'icehouse-compat'
+
+
def _get_version(version):
# NOTE(russellb) If "[upgrade_levels] compute=icehouse-compat" is set in
# the config, we switch into a special mode where we send 3.0 as the
# version number instead of 2.latest. 3.0 == 2.latest, and both Havana
# and Icehouse compute nodes support 3.0. This allows for a live
# upgrade environment with a mix of Havana and Icehouse compute nodes.
- if CONF.upgrade_levels.compute == 'icehouse-compat':
+ if _icehouse_compat():
return ComputeAPI.VERSION_ALIASES['icehouse-compat']
return version
@@ -294,28 +298,28 @@ class ComputeAPI(rpcclient.RpcProxy):
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
- if self.client.can_send_version('2.38'):
- version = '2.38'
+ if _icehouse_compat() or self.client.can_send_version('2.38'):
+ version = _get_version('2.38')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=destination,
- version=_get_version(version))
+ version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
- if self.client.can_send_version('2.38'):
- version = '2.38'
+ if _icehouse_compat() or self.client.can_send_version('2.38'):
+ version = _get_version('2.38')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
@@ -330,8 +334,8 @@ class ComputeAPI(rpcclient.RpcProxy):
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
- if self.client.can_send_version('2.39'):
- version = '2.39'
+ if _icehouse_compat() or self.client.can_send_version('2.39'):
+ version = _get_version('2.39')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
@@ -339,7 +343,7 @@ class ComputeAPI(rpcclient.RpcProxy):
objects_base.obj_to_primitive(migration))
version = '2.7'
cctxt = self.client.prepare(server=_compute_host(host, instance),
- version=_get_version(version))
+ version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
@@ -361,8 +365,8 @@ class ComputeAPI(rpcclient.RpcProxy):
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
- if self.client.can_send_version('2.46'):
- version = '2.46'
+ if _icehouse_compat() or self.client.can_send_version('2.46'):
+ version = _get_version('2.46')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
@@ -370,15 +374,15 @@ class ComputeAPI(rpcclient.RpcProxy):
objects_base.obj_to_primitive(migration))
version = '2.8'
cctxt = self.client.prepare(server=host,
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
- if self.client.can_send_version('2.47'):
- version = '2.47'
+ if _icehouse_compat() or self.client.can_send_version('2.47'):
+ version = _get_version('2.47')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
@@ -386,7 +390,7 @@ class ComputeAPI(rpcclient.RpcProxy):
objects_base.obj_to_primitive(migration))
version = '2.13'
cctxt = self.client.prepare(server=host,
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
@@ -466,14 +470,14 @@ class ComputeAPI(rpcclient.RpcProxy):
file_contents=file_contents)
def inject_network_info(self, ctxt, instance):
- if self.client.can_send_version('2.41'):
- version = '2.41'
+ if _icehouse_compat() or self.client.can_send_version('2.41'):
+ version = _get_version('2.41')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
@@ -486,14 +490,14 @@ class ComputeAPI(rpcclient.RpcProxy):
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
- if self.client.can_send_version('2.36'):
- version = '2.36'
+ if _icehouse_compat() or self.client.can_send_version('2.36'):
+ version = _get_version('2.36')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
@@ -530,8 +534,8 @@ class ComputeAPI(rpcclient.RpcProxy):
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None):
- if self.client.can_send_version('2.43'):
- version = '2.43'
+ if _icehouse_compat() or self.client.can_send_version('2.43'):
+ version = _get_version('2.43')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
@@ -539,7 +543,7 @@ class ComputeAPI(rpcclient.RpcProxy):
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(server=host,
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'prep_resize',
instance=instance,
instance_type=instance_type_p,
@@ -550,14 +554,14 @@ class ComputeAPI(rpcclient.RpcProxy):
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
- if not self.client.can_send_version('2.32'):
+ if _icehouse_compat() or self.client.can_send_version('2.32'):
+ version = _get_version('2.32')
+ else:
version = '2.23'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
- else:
- version = '2.32'
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
@@ -623,20 +627,20 @@ class ComputeAPI(rpcclient.RpcProxy):
rescue_password=rescue_password)
def reset_network(self, ctxt, instance):
- if self.client.can_send_version('2.40'):
- version = '2.40'
+ if _icehouse_compat() or self.client.can_send_version('2.40'):
+ version = _get_version('2.40')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None):
- if self.client.can_send_version('2.45'):
- version = '2.45'
+ if _icehouse_compat() or self.client.can_send_version('2.45'):
+ version = _get_version('2.45')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
@@ -645,27 +649,27 @@ class ComputeAPI(rpcclient.RpcProxy):
version = '2.16'
instance_type_p = jsonutils.to_primitive(instance_type)
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'resize_instance',
instance=instance, migration=migration,
image=image, reservations=reservations,
instance_type=instance_type_p)
def resume_instance(self, ctxt, instance):
- if self.client.can_send_version('2.33'):
- version = '2.33'
+ if _icehouse_compat() or self.client.can_send_version('2.33'):
+ version = _get_version('2.33')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
- if self.client.can_send_version('2.39'):
- version = '2.39'
+ if _icehouse_compat() or self.client.can_send_version('2.39'):
+ version = _get_version('2.39')
else:
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
@@ -673,7 +677,7 @@ class ComputeAPI(rpcclient.RpcProxy):
objects_base.obj_to_primitive(migration))
version = '2.12'
cctxt = self.client.prepare(server=_compute_host(host, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
@@ -697,13 +701,13 @@ class ComputeAPI(rpcclient.RpcProxy):
'admin_password': admin_password,
'is_first_time': is_first_time, 'node': node}
- if self.client.can_send_version('2.37'):
- version = '2.37'
+ if _icehouse_compat() or self.client.can_send_version('2.37'):
+ version = _get_version('2.37')
msg_kwargs['legacy_bdm_in_spec'] = legacy_bdm_in_spec
else:
version = '2.19'
cctxt = self.client.prepare(server=host,
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
@@ -747,8 +751,8 @@ class ComputeAPI(rpcclient.RpcProxy):
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
- if self.client.can_send_version('2.42'):
- version = '2.42'
+ if _icehouse_compat() or self.client.can_send_version('2.42'):
+ version = _get_version('2.42')
method = 'backup_instance'
extra_kwargs = dict()
else:
@@ -758,7 +762,7 @@ class ComputeAPI(rpcclient.RpcProxy):
extra_kwargs = dict(image_type='backup')
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, method,
instance=instance,
image_id=image_id,
@@ -767,8 +771,8 @@ class ComputeAPI(rpcclient.RpcProxy):
**extra_kwargs)
def snapshot_instance(self, ctxt, instance, image_id):
- if self.client.can_send_version('2.42'):
- version = '2.42'
+ if _icehouse_compat() or self.client.can_send_version('2.42'):
+ version = _get_version('2.42')
extra_kwargs = dict()
else:
instance = jsonutils.to_primitive(
@@ -778,68 +782,68 @@ class ComputeAPI(rpcclient.RpcProxy):
rotation=None)
version = '2.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id,
**extra_kwargs)
def start_instance(self, ctxt, instance):
- if self.client.can_send_version('2.29'):
- version = '2.29'
+ if _icehouse_compat() or self.client.can_send_version('2.29'):
+ version = _get_version('2.29')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True):
- if self.client.can_send_version('2.29'):
- version = '2.29'
+ if _icehouse_compat() or self.client.can_send_version('2.29'):
+ version = _get_version('2.29')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', instance=instance)
def suspend_instance(self, ctxt, instance):
- if self.client.can_send_version('2.33'):
- version = '2.33'
+ if _icehouse_compat() or self.client.can_send_version('2.33'):
+ version = _get_version('2.33')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
- if self.client.can_send_version('2.35'):
- version = '2.35'
+ if _icehouse_compat() or self.client.can_send_version('2.35'):
+ version = _get_version('2.35')
else:
version = '2.27'
instance = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms_p,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
- if self.client.can_send_version('2.36'):
- version = '2.36'
+ if _icehouse_compat() or self.client.can_send_version('2.36'):
+ version = _get_version('2.36')
else:
version = '2.0'
instance = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
@@ -849,13 +853,13 @@ class ComputeAPI(rpcclient.RpcProxy):
cctxt.cast(ctxt, 'unrescue_instance', instance=instance_p)
def soft_delete_instance(self, ctxt, instance, reservations=None):
- if self.client.can_send_version('2.35'):
- version = '2.35'
+ if _icehouse_compat() or self.client.can_send_version('2.35'):
+ version = _get_version('2.35')
else:
version = '2.27'
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
- version=_get_version(version))
+ version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 04a77bb46a..d06e81966c 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -475,6 +475,7 @@ def service_create(context, values):
@require_admin_context
+@_retry_on_deadlock
def service_update(context, service_id, values):
session = get_session()
with session.begin():
@@ -658,6 +659,7 @@ def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
@require_admin_context
+@_retry_on_deadlock
def compute_node_update(context, compute_id, values, prune_stats=False):
"""Updates the ComputeNode record with the most recent data."""
stats = values.pop('stats', {})
@@ -3005,38 +3007,26 @@ def _reservation_create(context, uuid, usage, project_id, user_id, resource,
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
-def _get_user_quota_usages(context, session, project_id, user_id):
- # Broken out for testability
- rows = model_query(context, models.QuotaUsage,
- read_deleted="no",
- session=session).\
- filter_by(project_id=project_id).\
- filter(or_(models.QuotaUsage.user_id == user_id,
- models.QuotaUsage.user_id == None)).\
- with_lockmode('update').\
- all()
- return dict((row.resource, row) for row in rows)
-
-
-def _get_project_quota_usages(context, session, project_id):
+def _get_project_user_quota_usages(context, session, project_id,
+ user_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
- result = dict()
+ proj_result = dict()
+ user_result = dict()
# Get the total count of in_use,reserved
for row in rows:
- if row.resource in result:
- result[row.resource]['in_use'] += row.in_use
- result[row.resource]['reserved'] += row.reserved
- result[row.resource]['total'] += (row.in_use + row.reserved)
- else:
- result[row.resource] = dict(in_use=row.in_use,
- reserved=row.reserved,
- total=row.in_use + row.reserved)
- return result
+ proj_result.setdefault(row.resource,
+ dict(in_use=0, reserved=0, total=0))
+ proj_result[row.resource]['in_use'] += row.in_use
+ proj_result[row.resource]['reserved'] += row.reserved
+ proj_result[row.resource]['total'] += (row.in_use + row.reserved)
+ if row.user_id is None or row.user_id == user_id:
+ user_result[row.resource] = row
+ return proj_result, user_result
@require_context
@@ -3054,10 +3044,8 @@ def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
user_id = context.user_id
# Get the current usages
- user_usages = _get_user_quota_usages(context, session,
- project_id, user_id)
- project_usages = _get_project_quota_usages(context, session,
- project_id)
+ project_usages, user_usages = _get_project_user_quota_usages(
+ context, session, project_id, user_id)
# Handle usage refresh
work = set(deltas.keys())
@@ -3253,11 +3241,12 @@ def _quota_reservations_query(session, context, reservations):
def reservation_commit(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
- usages = _get_user_quota_usages(context, session, project_id, user_id)
+ _project_usages, user_usages = _get_project_user_quota_usages(
+ context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
- usage = usages[reservation.resource]
+ usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
@@ -3269,11 +3258,12 @@ def reservation_commit(context, reservations, project_id=None, user_id=None):
def reservation_rollback(context, reservations, project_id=None, user_id=None):
session = get_session()
with session.begin():
- usages = _get_user_quota_usages(context, session, project_id, user_id)
+ _project_usages, user_usages = _get_project_user_quota_usages(
+ context, session, project_id, user_id)
reservation_query = _quota_reservations_query(session, context,
reservations)
for reservation in reservation_query.all():
- usage = usages[reservation.resource]
+ usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@@ -4316,6 +4306,7 @@ def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _instance_type_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
+ order_by(asc("deleted"), asc("id")).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
@@ -5042,6 +5033,18 @@ def aggregate_host_get_by_metadata_key(context, key):
@require_admin_context
def aggregate_update(context, aggregate_id, values):
session = get_session()
+
+ if "name" in values:
+ aggregate_by_name = (_aggregate_get_query(context,
+ models.Aggregate,
+ models.Aggregate.name,
+ values['name'],
+ session=session,
+ read_deleted='no').first())
+ if aggregate_by_name and aggregate_by_name.id != aggregate_id:
+ # there is another aggregate with the new name
+ raise exception.AggregateNameExists(aggregate_name=values['name'])
+
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
index 0f62e39b98..422e35890c 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py
@@ -83,7 +83,7 @@ def downgrade(migrate_engine):
# Only need to update nova-compute availability_zones
if rec['binary'] != 'nova-compute':
continue
- result = select([aggregate_metadata.c.value],
+ query = select([aggregate_metadata.c.value],
from_obj=aggregate_metadata.join(
agg_hosts,
agg_hosts.c.aggregate_id == aggregate_metadata.c.aggregate_id
@@ -96,8 +96,12 @@ def downgrade(migrate_engine):
agg_hosts.c.host == rec['host']
)
- services.update().values(
- availability_zone=list(result.execute())[0][0]
- ).where(
- services.c.id == rec['id']
- )
+ result = list(query.execute())
+ # Check that there used to be an availability_zone. It is possible to
+ # have no aggregates.
+ if len(result) > 0:
+ services.update().values(
+ availability_zone=result[0][0]
+ ).where(
+ services.c.id == rec['id']
+ )
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/216_sync_quota_usages.py b/nova/db/sqlalchemy/migrate_repo/versions/216_sync_quota_usages.py
index 9a11150c42..f4d50946f9 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/216_sync_quota_usages.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/216_sync_quota_usages.py
@@ -29,16 +29,16 @@ def upgrade(migrate_engine):
quota_usages = Table('quota_usages', meta, autoload=True)
reservations = Table('reservations', meta, autoload=True)
+ resource_tuples = select(columns=[quota_usages.c.resource],
+ distinct=True).execute().fetchall()
+ resources = [resource[0] for resource in resource_tuples]
+
for resource in ['instances', 'cores', 'ram', 'security_groups']:
delete_null_rows(resource, quota_usages, reservations)
for resource in ['fixed_ips', 'floating_ips', 'networks']:
delete_per_user_rows(resource, quota_usages, reservations)
- resource_tuples = select(columns=[quota_usages.c.resource],
- distinct=True).execute().fetchall()
- resources = [resource[0] for resource in resource_tuples]
-
if 'instances' in resources:
sync_instances(meta, quota_usages)
diff --git a/nova/exception.py b/nova/exception.py
index b0b6909124..443bf99e33 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -449,6 +449,14 @@ class InvalidDiskFormat(Invalid):
msg_fmt = _("Disk format %(disk_format)s is not acceptable")
+class InvalidDiskInfo(Invalid):
+ msg_fmt = _("Disk info file is invalid: %(reason)s")
+
+
+class DiskInfoReadWriteFail(Invalid):
+ msg_fmt = _("Failed to read or write disk info file: %(reason)s")
+
+
class ImageUnacceptable(Invalid):
msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
@@ -585,6 +593,11 @@ class NetworkAmbiguous(Invalid):
"network ID(s) to select which one(s) to connect to,")
+class ExternalNetworkAttachForbidden(NotAuthorized):
+ msg_fmt = _("It is not allowed to create an interface on "
+ "external network %(network_uuid)s")
+
+
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
@@ -891,6 +904,11 @@ class InstanceTypeNotFoundByName(InstanceTypeNotFound):
"could not be found.")
+class ConsolePortRangeExhausted(NovaException):
+ msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
+ "exhausted.")
+
+
class FlavorNotFound(NotFound):
msg_fmt = _("Flavor %(flavor_id)s could not be found.")
diff --git a/nova/network/api.py b/nova/network/api.py
index cd3d6d08ee..17d8c13154 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -28,6 +28,7 @@ from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova.objects import instance_info_cache as info_cache_obj
+from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import policy
@@ -71,7 +72,7 @@ def update_instance_cache_with_nw_info(api, context, instance, nw_info=None,
LOG.debug(_('Updating cache with info: %s'), nw_info)
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
- if not nw_info:
+ if nw_info is None:
nw_info = api._get_instance_nw_info(context, instance)
# NOTE(comstud): The save() method actually handles updating or
# creating the instance. We don't need to retrieve the object
@@ -81,7 +82,8 @@ def update_instance_cache_with_nw_info(api, context, instance, nw_info=None,
ic.network_info = nw_info
ic.save(update_cells=update_cells)
except Exception:
- LOG.exception(_('Failed storing info cache'), instance=instance)
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_('Failed storing info cache'), instance=instance)
def wrap_check_policy(func):
@@ -224,6 +226,26 @@ class API(base.Base):
return self.floating_manager.deallocate_floating_ip(context, address,
affect_auto_assigned)
+ def disassociate_and_release_floating_ip(self, context, instance,
+ floating_ip):
+ """Removes (deallocates) and deletes the floating ip.
+
+ This api call was added to allow this to be done in one operation
+ if using neutron.
+ """
+
+ address = floating_ip['address']
+ if floating_ip.get('fixed_ip_id'):
+ try:
+ self.disassociate_floating_ip(context, instance, address)
+ except exception.FloatingIpNotAssociated:
+ msg = ("Floating ip %s has already been disassociated, "
+ "perhaps by another concurrent action.") % address
+ LOG.debug(msg)
+
+ # release ip from project
+ return self.release_floating_ip(context, address)
+
@wrap_check_policy
@refresh_cache
def associate_floating_ip(self, context, instance,
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index d984e5e7f7..39eb838597 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -185,6 +185,12 @@ class IptablesTable(object):
self.remove_chains = set()
self.dirty = True
+ def has_chain(self, name, wrap=True):
+ if wrap:
+ return name in self.chains
+ else:
+ return name in self.unwrapped_chains
+
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
@@ -1028,8 +1034,7 @@ def restart_dhcp(context, dev, network_ref):
write_to_file(optsfile, get_dhcp_opts(context, network_ref))
os.chmod(optsfile, 0o644)
- if network_ref['multi_host']:
- _add_dhcp_mangle_rule(dev)
+ _add_dhcp_mangle_rule(dev)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0o644)
@@ -1531,7 +1536,7 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
- if fields[-2] == 'secondary':
+ if fields[-2] in ('secondary', 'dynamic', ):
params = fields[1:-2]
else:
params = fields[1:-1]
diff --git a/nova/network/neutronv2/__init__.py b/nova/network/neutronv2/__init__.py
index 9bd2e5f821..7169890e19 100644
--- a/nova/network/neutronv2/__init__.py
+++ b/nova/network/neutronv2/__init__.py
@@ -15,57 +15,60 @@
# License for the specific language governing permissions and limitations
# under the License.
-from neutronclient import client
from neutronclient.common import exceptions
from neutronclient.v2_0 import client as clientv20
from oslo.config import cfg
-from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
+from nova.openstack.common import local
from nova.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
-def _get_auth_token():
- try:
- httpclient = client.HTTPClient(
- username=CONF.neutron_admin_username,
- tenant_name=CONF.neutron_admin_tenant_name,
- region_name=CONF.neutron_region_name,
- password=CONF.neutron_admin_password,
- auth_url=CONF.neutron_admin_auth_url,
- timeout=CONF.neutron_url_timeout,
- auth_strategy=CONF.neutron_auth_strategy,
- ca_cert=CONF.neutron_ca_certificates_file,
- insecure=CONF.neutron_api_insecure)
- httpclient.authenticate()
- return httpclient.auth_token
- except exceptions.NeutronClientException as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_('Neutron client authentication failed: %s'), e)
-
-
def _get_client(token=None):
- if not token and CONF.neutron_auth_strategy:
- token = _get_auth_token()
params = {
'endpoint_url': CONF.neutron_url,
'timeout': CONF.neutron_url_timeout,
'insecure': CONF.neutron_api_insecure,
'ca_cert': CONF.neutron_ca_certificates_file,
}
+
if token:
params['token'] = token
- else:
params['auth_strategy'] = None
+ else:
+ params['username'] = CONF.neutron_admin_username
+ params['tenant_name'] = CONF.neutron_admin_tenant_name
+ params['password'] = CONF.neutron_admin_password
+ params['auth_url'] = CONF.neutron_admin_auth_url
+ params['auth_strategy'] = CONF.neutron_auth_strategy
return clientv20.Client(**params)
def get_client(context, admin=False):
- if admin:
- token = None
- else:
+ # NOTE(dprince): In the case where no auth_token is present
+ # we allow use of neutron admin tenant credentials if
+ # it is an admin context.
+ # This is to support some services (metadata API) where
+ # an admin context is used without an auth token.
+ if admin or (context.is_admin and not context.auth_token):
+ # NOTE(dims): We need to use admin token, let us cache a
+ # thread local copy for re-using this client
+ # multiple times and to avoid excessive calls
+ # to neutron to fetch tokens. Some of the hackiness in this code
+ # will go away once BP auth-plugins is implemented.
+ # That blue print will ensure that tokens can be shared
+ # across clients as well
+ if not hasattr(local.strong_store, 'neutron_client'):
+ local.strong_store.neutron_client = _get_client(token=None)
+ return local.strong_store.neutron_client
+
+ # We got a user token that we can use that as-is
+ if context.auth_token:
token = context.auth_token
- return _get_client(token=token)
+ return _get_client(token=token)
+
+ # We did not get a user token and we should not be using
+ # an admin token so log an error
+ raise exceptions.Unauthorized()
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index 3efe77ec6b..b169ceb9fa 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -144,6 +144,15 @@ class API(base.Base):
nets,
net_ids)
+ if not context.is_admin:
+ for net in nets:
+ # Perform this check here rather than in validate_networks to
+ # ensure the check is performed everytime allocate_for_instance
+ # is invoked
+ if net.get('router:external'):
+ raise exception.ExternalNetworkAttachForbidden(
+ network_uuid=net['id'])
+
return nets
def _create_port(self, port_client, instance, network_id, port_req_body,
@@ -372,7 +381,8 @@ class API(base.Base):
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron_extension_sync_interval)):
- neutron = neutronv2.get_client(context.get_admin_context())
+ neutron = neutronv2.get_client(context.get_admin_context(),
+ admin=True)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
@@ -409,6 +419,15 @@ class API(base.Base):
requested_networks = kwargs.get('requested_networks') or {}
ports_to_skip = [port_id for nets, fips, port_id in requested_networks]
ports = set(ports) - set(ports_to_skip)
+ # Reset device_id and device_owner for the ports that are skipped
+ for port in ports_to_skip:
+ port_req_body = {'port': {'device_id': '', 'device_owner': ''}}
+ try:
+ neutronv2.get_client(context).update_port(port,
+ port_req_body)
+ except Exception:
+ LOG.info(_('Unable to reset device ID for port %s'), port,
+ instance=instance)
for port in ports:
try:
@@ -421,6 +440,13 @@ class API(base.Base):
LOG.exception(_("Failed to delete neutron port %s"),
port)
+ # NOTE(arosen): This clears out the network_cache only if the instance
+ # hasn't already been deleted. This is needed when an instance fails to
+ # launch and is rescheduled onto another compute node. If the instance
+ # has already been deleted this call does nothing.
+ update_instance_info_cache(self, context, instance,
+ network_model.NetworkInfo([]))
+
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None,
conductor_api=None):
@@ -865,9 +891,24 @@ class API(base.Base):
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
+ self._release_floating_ip(context, address)
+
+ def disassociate_and_release_floating_ip(self, context, instance,
+ floating_ip):
+ """Removes (deallocates) and deletes the floating ip.
+
+ This api call was added to allow this to be done in one operation
+ if using neutron.
+ """
+ self._release_floating_ip(context, floating_ip['address'],
+ raise_if_associated=False)
+
+ def _release_floating_ip(self, context, address,
+ raise_if_associated=True):
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
- if fip['port_id']:
+
+ if raise_if_associated and fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
diff --git a/nova/network/security_group/neutron_driver.py b/nova/network/security_group/neutron_driver.py
index 8f3e7c250f..705f7f68bd 100644
--- a/nova/network/security_group/neutron_driver.py
+++ b/nova/network/security_group/neutron_driver.py
@@ -112,12 +112,12 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
nova_rule['protocol'] = rule['protocol']
if (nova_rule['protocol'] and rule.get('port_range_min') is None and
rule.get('port_range_max') is None):
- if nova_rule['protocol'].upper() == 'ICMP':
- nova_rule['from_port'] = -1
- nova_rule['to_port'] = -1
- elif rule['protocol'].upper() in ['TCP', 'UDP']:
+ if rule['protocol'].upper() in ['TCP', 'UDP']:
nova_rule['from_port'] = 1
nova_rule['to_port'] = 65535
+ else:
+ nova_rule['from_port'] = -1
+ nova_rule['to_port'] = -1
else:
nova_rule['from_port'] = rule.get('port_range_min')
nova_rule['to_port'] = rule.get('port_range_max')
@@ -381,7 +381,7 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
servers = [{'id': instance_uuid}]
sg_bindings = self.get_instances_security_groups_bindings(
context, servers, detailed)
- return sg_bindings.get(instance_uuid)
+ return sg_bindings.get(instance_uuid, [])
def _has_security_group_requirements(self, port):
port_security_enabled = port.get('port_security_enabled', True)
diff --git a/nova/openstack/common/processutils.py b/nova/openstack/common/processutils.py
index 0ead82f983..cc27e642c8 100644
--- a/nova/openstack/common/processutils.py
+++ b/nova/openstack/common/processutils.py
@@ -19,7 +19,7 @@
System-level utilities and helper functions.
"""
-import logging as stdlib_logging
+import logging
import os
import random
import shlex
@@ -29,7 +29,7 @@ from eventlet.green import subprocess
from eventlet import greenthread
from nova.openstack.common.gettextutils import _ # noqa
-from nova.openstack.common import log as logging
+from nova.openstack.common import strutils
LOG = logging.getLogger(__name__)
@@ -104,8 +104,7 @@ def execute(*cmd, **kwargs):
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
- :type loglevel: int. (Should be stdlib_logging.DEBUG or
- stdlib_logging.INFO)
+ :type loglevel: int. (Should be logging.DEBUG or logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
@@ -120,7 +119,7 @@ def execute(*cmd, **kwargs):
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
- loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
+ loglevel = kwargs.pop('loglevel', logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
@@ -140,11 +139,12 @@ def execute(*cmd, **kwargs):
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
+ sanitized_cmd = strutils.mask_password(' '.join(cmd))
while attempts > 0:
attempts -= 1
try:
- LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
+ LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd)
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
@@ -172,16 +172,18 @@ def execute(*cmd, **kwargs):
LOG.log(loglevel, _('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
+ sanitized_stdout = strutils.mask_password(stdout)
+ sanitized_stderr = strutils.mask_password(stderr)
raise ProcessExecutionError(exit_code=_returncode,
- stdout=stdout,
- stderr=stderr,
- cmd=' '.join(cmd))
+ stdout=sanitized_stdout,
+ stderr=sanitized_stderr,
+ cmd=sanitized_cmd)
return result
except ProcessExecutionError:
if not attempts:
raise
else:
- LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
+ LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py
index a403e04a0f..fcc16eadf3 100644
--- a/nova/openstack/common/rpc/impl_qpid.py
+++ b/nova/openstack/common/rpc/impl_qpid.py
@@ -369,7 +369,7 @@ class DirectPublisher(Publisher):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
- node_name = msg_id
+ node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
@@ -468,6 +468,10 @@ class Connection(object):
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
+
+ brokers_count = len(self.brokers)
+ self.next_broker_indices = itertools.cycle(range(brokers_count))
+
self.connection_create(self.brokers[0])
self.reconnect()
@@ -495,29 +499,27 @@ class Connection(object):
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
- attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
- except qpid_exceptions.ConnectionError:
+ except qpid_exceptions.MessagingError:
pass
- broker = self.brokers[attempt % len(self.brokers)]
- attempt += 1
+ broker = self.brokers[next(self.next_broker_indices)]
try:
self.connection_create(broker)
self.connection.open()
- except qpid_exceptions.ConnectionError as e:
+ except qpid_exceptions.MessagingError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
- delay = min(2 * delay, 60)
+ delay = min(delay + 1, 5)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
@@ -539,7 +541,7 @@ class Connection(object):
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
- qpid_exceptions.ConnectionError) as e:
+ qpid_exceptions.MessagingError) as e:
if error_callback:
error_callback(e)
self.reconnect()
diff --git a/nova/openstack/common/strutils.py b/nova/openstack/common/strutils.py
index 62e547e601..3c05cf31f5 100644
--- a/nova/openstack/common/strutils.py
+++ b/nova/openstack/common/strutils.py
@@ -23,6 +23,8 @@ import re
import sys
import unicodedata
+import six
+
from nova.openstack.common.gettextutils import _
@@ -44,6 +46,39 @@ SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
+# NOTE(flaper87): The following globals are used by `mask_password`
+_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
+
+# NOTE(ldbragst): Let's build a list of regex objects using the list of
+# _SANITIZE_KEYS we already have. This way, we only have to add the new key
+# to the list of _SANITIZE_KEYS and we can generate regular expressions
+# for XML and JSON automatically.
+_SANITIZE_PATTERNS_2 = []
+_SANITIZE_PATTERNS_1 = []
+
+# NOTE(amrith): Some regular expressions have only one parameter, some
+# have two parameters. Use different lists of patterns here.
+_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+']
+_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
+ r'(%(key)s\s+[\"\']).*?([\"\'])',
+ r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)',
+ r'(<%(key)s>).*?(</%(key)s>)',
+ r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
+ r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
+ r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?'
+ '[\'"]).*?([\'"])',
+ r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
+
+for key in _SANITIZE_KEYS:
+ for pattern in _FORMAT_PATTERNS_2:
+ reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
+ _SANITIZE_PATTERNS_2.append(reg_ex)
+
+ for pattern in _FORMAT_PATTERNS_1:
+ reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
+ _SANITIZE_PATTERNS_1.append(reg_ex)
+
+
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
@@ -214,3 +249,42 @@ def to_slug(value, incoming=None, errors="strict"):
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
+
+
+def mask_password(message, secret="***"):
+ """Replace password with 'secret' in message.
+
+ :param message: The string which includes security information.
+ :param secret: value with which to replace passwords.
+ :returns: The unicode value of message with the password fields masked.
+
+ For example:
+
+ >>> mask_password("'adminPass' : 'aaaaa'")
+ "'adminPass' : '***'"
+ >>> mask_password("'admin_pass' : 'aaaaa'")
+ "'admin_pass' : '***'"
+ >>> mask_password('"password" : "aaaaa"')
+ '"password" : "***"'
+ >>> mask_password("'original_password' : 'aaaaa'")
+ "'original_password' : '***'"
+ >>> mask_password("u'original_password' : u'aaaaa'")
+ "u'original_password' : u'***'"
+ """
+ message = six.text_type(message)
+
+ # NOTE(ldbragst): Check to see if anything in message contains any key
+ # specified in _SANITIZE_KEYS, if not then just return the message since
+ # we don't have to mask any passwords.
+ if not any(key in message for key in _SANITIZE_KEYS):
+ return message
+
+ substitute = r'\g<1>' + secret + r'\g<2>'
+ for pattern in _SANITIZE_PATTERNS_2:
+ message = re.sub(pattern, substitute, message)
+
+ substitute = r'\g<1>' + secret
+ for pattern in _SANITIZE_PATTERNS_1:
+ message = re.sub(pattern, substitute, message)
+
+ return message
diff --git a/nova/tests/api/ec2/test_api.py b/nova/tests/api/ec2/test_api.py
index 0acea1f7e3..0cb9220ca5 100644
--- a/nova/tests/api/ec2/test_api.py
+++ b/nova/tests/api/ec2/test_api.py
@@ -18,6 +18,7 @@
"""Unit tests for the API endpoint."""
+import pkg_resources
import random
import re
import StringIO
@@ -272,10 +273,11 @@ class ApiEc2TestCase(test.TestCase):
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
# pylint: disable=E1103
- if boto.Version >= '2.14':
+ boto_version = pkg_resources.parse_version(boto.Version)
+ if boto_version >= pkg_resources.parse_version('2.14'):
self.ec2.new_http_connection(host or self.host, 8773,
is_secure).AndReturn(self.http)
- elif boto.Version >= '2':
+ elif boto_version >= pkg_resources.parse_version('2'):
self.ec2.new_http_connection(host or '%s:8773' % (self.host),
is_secure).AndReturn(self.http)
else:
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 3070c9fded..b932408e1b 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -23,11 +23,13 @@ import copy
import datetime
import functools
import iso8601
+import mock
import os
import string
import tempfile
import fixtures
+import mock
from oslo.config import cfg
from nova.api.ec2 import cloud
@@ -47,7 +49,9 @@ from nova.image import s3
from nova.network import api as network_api
from nova.network import neutronv2
from nova.openstack.common import log as logging
+from nova.openstack.common import policy as common_policy
from nova.openstack.common import timeutils
+from nova.openstack.common import uuidutils
from nova import test
from nova.tests.api.openstack.compute.contrib import (
test_neutron_security_groups as test_neutron)
@@ -304,6 +308,16 @@ class CloudTestCase(test.TestCase):
'floating_ips': []})
self.stubs.Set(network_api.API, 'get_instance_id_by_floating_address',
lambda *args: 1)
+
+ def fake_update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None,
+ update_cells=True):
+
+ return
+
+ self.stubs.Set(network_api, "update_instance_cache_with_nw_info",
+ fake_update_instance_cache_with_nw_info)
+
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@@ -461,6 +475,34 @@ class CloudTestCase(test.TestCase):
delete = self.cloud.delete_security_group
self.assertRaises(exception.MissingParameter, delete, self.context)
+ def test_delete_security_group_policy_not_allowed(self):
+ rules = common_policy.Rules(
+ {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')})
+ common_policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.delete_security_group, self.context,
+ 'fake-name', 'fake-id')
+
+ def test_authorize_security_group_ingress_policy_not_allowed(self):
+ rules = common_policy.Rules(
+ {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')})
+ common_policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.authorize_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
def test_authorize_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
@@ -565,6 +607,20 @@ class CloudTestCase(test.TestCase):
db.security_group_destroy(self.context, sec2['id'])
db.security_group_destroy(self.context, sec1['id'])
+ def test_revoke_security_group_ingress_policy_not_allowed(self):
+ rules = common_policy.Rules(
+ {'compute_extension:security_groups':
+ common_policy.parse_rule('project_id:%(project_id)s')})
+ common_policy.set_rules(rules)
+
+ with mock.patch.object(self.cloud.security_group_api,
+ 'get') as get:
+ get.return_value = {'project_id': 'invalid'}
+
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.cloud.revoke_security_group_ingress, self.context,
+ 'fake-name', 'fake-id')
+
def test_revoke_security_group_ingress(self):
kwargs = {'project_id': self.context.project_id, 'name': 'test'}
sec = db.security_group_create(self.context, kwargs)
@@ -2823,6 +2879,24 @@ class CloudTestCase(test.TestCase):
ec2utils.resource_type_from_id(self.context, 'x-12345'),
None)
+ @mock.patch.object(ec2utils, 'ec2_vol_id_to_uuid',
+ side_effect=lambda
+ ec2_volume_id: uuidutils.generate_uuid())
+ def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
+ # Validates that VolumeUnattached is raised if the volume doesn't
+ # have an instance_uuid value.
+ ec2_volume_id = 'vol-987654321'
+
+ with mock.patch.object(self.cloud.volume_api, 'get',
+ side_effect=lambda context, volume_id:
+ {'id': volume_id}) as mock_get:
+ self.assertRaises(exception.VolumeUnattached,
+ self.cloud.detach_volume,
+ self.context,
+ ec2_volume_id)
+ mock_get.assert_called_once_with(self.context, mock.ANY)
+ mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
+
class CloudTestCaseNeutronProxy(test.TestCase):
def setUp(self):
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
index cafc613973..3d78f44735 100644
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
@@ -233,6 +233,17 @@ class AggregateTestCase(test.NoDBTestCase):
self.assertRaises(exc.HTTPNotFound, self.controller.update,
self.req, "2", body=test_metadata)
+ def test_update_with_duplicated_name(self):
+ test_metadata = {"aggregate": {"name": "test_name"}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNameExists(aggregate_name="test_name")
+
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+ self.assertRaises(exc.HTTPConflict, self.controller.update,
+ self.req, "2", body=test_metadata)
+
def test_invalid_action(self):
body = {"append_host": {"host": "host1"}}
self.assertRaises(exc.HTTPBadRequest,
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
index a781c6bf2b..7e2feee892 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
@@ -14,9 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import contextlib
import uuid
from lxml import etree
+import mock
import webob
from nova.api.openstack.compute.contrib import floating_ips
@@ -101,6 +103,38 @@ def get_instance_by_floating_ip_addr(self, context, address):
return None
+class FloatingIpTestNeutron(test.NoDBTestCase):
+
+ def setUp(self):
+ super(FloatingIpTestNeutron, self).setUp()
+ self.flags(network_api_class='nova.network.neutronv2.api.API')
+ self.controller = floating_ips.FloatingIPController()
+
+ def test_floatingip_delete(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
+ fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
+ with contextlib.nested(
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_and_release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'get_instance_id_by_floating_address',
+ return_value=None),
+ mock.patch.object(self.controller.network_api,
+ 'get_floating_ip',
+ return_value=fip_val)) as (
+ disoc_fip, dis_and_del, rel_fip, _, _):
+ self.controller.delete(req, 1)
+ self.assertFalse(disoc_fip.called)
+ self.assertFalse(rel_fip.called)
+ # Only disassociate_and_release_floating_ip is
+ # called if using neutron
+ self.assertTrue(dis_and_del.called)
+
+
class FloatingIpTest(test.TestCase):
floating_ip = "10.10.10.10"
floating_ip_2 = "10.10.10.11"
@@ -163,6 +197,25 @@ class FloatingIpTest(test.TestCase):
self._delete_floating_ip()
super(FloatingIpTest, self).tearDown()
+ def test_floatingip_delete(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips/1')
+ fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'}
+ with contextlib.nested(
+ mock.patch.object(self.controller.network_api,
+ 'disassociate_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'release_floating_ip'),
+ mock.patch.object(self.controller.network_api,
+ 'get_instance_id_by_floating_address',
+ return_value=None),
+ mock.patch.object(self.controller.network_api,
+ 'get_floating_ip',
+ return_value=fip_val)) as (
+ disoc_fip, rel_fip, _, _):
+ self.controller.delete(req, 1)
+ self.assertTrue(disoc_fip.called)
+ self.assertTrue(rel_fip.called)
+
def test_translate_floating_ip_view(self):
floating_ip_address = self.floating_ip
floating_ip = db.floating_ip_get_by_address(self.context,
diff --git a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
index 6b20c377d3..4f9b2f36a2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
@@ -16,10 +16,10 @@
# under the License.
#
# @author: Aaron Rosen, Nicira Networks, Inc.
-
import uuid
from lxml import etree
+import mock
from neutronclient.common import exceptions as n_exc
from oslo.config import cfg
import webob
@@ -315,6 +315,20 @@ class TestNeutronSecurityGroups(
context.get_admin_context(), test_security_groups.FAKE_UUID1)
self.assertEquals(sgs, expected)
+ @mock.patch('nova.network.security_group.neutron_driver.SecurityGroupAPI.'
+ 'get_instances_security_groups_bindings')
+ def test_get_security_group_empty_for_instance(self, neutron_sg_bind_mock):
+ servers = [{'id': test_security_groups.FAKE_UUID1}]
+ neutron_sg_bind_mock.return_value = {}
+
+ security_group_api = self.controller.security_group_api
+ ctx = context.get_admin_context()
+ sgs = security_group_api.get_instance_security_groups(ctx,
+ test_security_groups.FAKE_UUID1)
+
+ neutron_sg_bind_mock.assert_called_once_with(ctx, servers, False)
+ self.assertEqual([], sgs)
+
def test_create_port_with_sg_and_port_security_enabled_true(self):
sg1 = self._create_sg_template(name='test1').get('security_group')
net = self._create_network()
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index 9a2c9eb86f..be91f4b5a3 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -16,6 +16,7 @@
# under the License.
from lxml import etree
+import mock
import mox
from oslo.config import cfg
import webob
@@ -372,6 +373,24 @@ class TestSecurityGroups(test.TestCase):
self.assertEquals(res_dict, expected)
+ @mock.patch('nova.db.instance_get_by_uuid')
+ @mock.patch('nova.db.security_group_get_by_instance', return_value=[])
+ def test_get_security_group_empty_for_instance(self, mock_sec_group,
+ mock_db_get_ins):
+ expected = {'security_groups': []}
+
+ def return_instance(context, server_id,
+ columns_to_join=None, use_slave=False):
+ self.assertEqual(server_id, FAKE_UUID1)
+ return return_server_by_uuid(context, server_id)
+ mock_db_get_ins.side_effect = return_instance
+ req = fakes.HTTPRequest.blank('/v2/%s/servers/%s/os-security-groups' %
+ ('fake', FAKE_UUID1))
+ res_dict = self.server_controller.index(req, FAKE_UUID1)
+ self.assertEqual(expected, res_dict)
+ mock_sec_group.assert_called_once_with(req.environ['nova.context'],
+ FAKE_UUID1)
+
def test_get_security_group_by_instance_non_existing(self):
self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
self.stubs.Set(nova.db, 'instance_get_by_uuid',
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py b/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py
index 384eceafc3..b4f4775774 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py
@@ -220,6 +220,17 @@ class AggregateTestCase(test.NoDBTestCase):
self.assertRaises(exc.HTTPNotFound, self.controller.update,
self.req, "2", body=test_metadata)
+ def test_update_with_duplicated_name(self):
+ test_metadata = {"aggregate": {"name": "test_name"}}
+
+ def stub_update_aggregate(context, aggregate, metadata):
+ raise exception.AggregateNameExists(aggregate_name="test_name")
+
+ self.stubs.Set(self.controller.api, "update_aggregate",
+ stub_update_aggregate)
+ self.assertRaises(exc.HTTPConflict, self.controller.update,
+ self.req, "2", body=test_metadata)
+
def test_update_with_invalid_request(self):
test_metadata = {"aggregate": 1}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
index e473f359c2..057fbf850e 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_servers.py
@@ -2999,6 +2999,24 @@ class ServersViewBuilderTest(test.TestCase):
output = self.view_builder.show(self.request, self.instance)
self.assertThat(output, matchers.DictMatches(expected_server))
+ def test_build_server_detail_with_fault_that_has_been_deleted(self):
+ self.instance['deleted'] = 1
+ self.instance['vm_state'] = vm_states.ERROR
+ fault = fake_instance.fake_fault_obj(self.uuid, code=500,
+ message="No valid host was found")
+ self.instance['fault'] = fault
+
+ expected_fault = {"code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "No valid host was found"}
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ # Regardless of vm_state deleted servers sholud be DELETED
+ self.assertEqual("DELETED", output['server']['status'])
+ self.assertThat(output['server']['fault'],
+ matchers.DictMatches(expected_fault))
+
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 2fa9d5d60b..82823a7d2f 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -3863,6 +3863,27 @@ class ServersViewBuilderTest(test.TestCase):
self.assertThat(output,
matchers.DictMatches(self.expected_detailed_server))
+ def test_build_server_detail_with_fault_that_has_been_deleted(self):
+ self.instance['deleted'] = 1
+ self.instance['vm_state'] = vm_states.ERROR
+ fault = fake_instance.fake_fault_obj(self.uuid, code=500,
+ message="No valid host was found")
+ self.instance['fault'] = fault
+
+ # Regardless of the vm_state deleted servers sholud have DELETED status
+ self.expected_detailed_server["server"]["status"] = "DELETED"
+ self.expected_detailed_server["server"]["fault"] = {
+ "code": 500,
+ "created": "2010-10-10T12:00:00Z",
+ "message": "No valid host was found",
+ }
+ del self.expected_detailed_server["server"]["progress"]
+
+ self.request.context = context.RequestContext('fake', 'fake')
+ output = self.view_builder.show(self.request, self.instance)
+ self.assertThat(output,
+ matchers.DictMatches(self.expected_detailed_server))
+
def test_build_server_detail_with_fault_no_details_not_admin(self):
self.instance['vm_state'] = vm_states.ERROR
self.instance['fault'] = fake_instance.fake_fault_obj(
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index 59de188812..131dc5edba 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -1246,9 +1246,9 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self._test_instance_action_method('unpause', (), {}, (), {}, False)
def test_resize_instance(self):
- kwargs = dict(flavor=dict(id=42),
+ kwargs = dict(flavor=dict(id=42, flavorid='orangemocchafrappuccino'),
extra_instance_updates=dict(cow='moo'))
- expected_kwargs = dict(flavor_id=42, cow='moo')
+ expected_kwargs = dict(flavor_id='orangemocchafrappuccino', cow='moo')
self._test_instance_action_method('resize', (), kwargs,
(), expected_kwargs,
False)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 069b26ddc0..9fcf841720 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -2480,6 +2480,20 @@ class ComputeTestCase(BaseTestCase):
def test_snapshot_fails_cleanup_ignores_exception(self):
self._test_snapshot_fails(True)
+ def test_snapshot_fails_with_glance_error(self):
+ def fake_snapshot(*args, **kwargs):
+ raise exception.ImageNotFound(image_id='xxx')
+
+ self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
+ fake_image.stub_out_image_service(self.stubs)
+
+ inst_obj = self._get_snapshotting_instance()
+
+ self.compute.snapshot_instance(
+ self.context, image_id='fakesnap',
+ instance=inst_obj)
+ self._assert_state({'task_state': None})
+
def test_snapshot_handles_cases_when_instance_is_deleted(self):
inst_obj = self._get_snapshotting_instance()
inst_obj.task_state = task_states.DELETING
@@ -4172,7 +4186,7 @@ class ComputeTestCase(BaseTestCase):
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
- inst = args[0]
+ inst = args[1]
sys_meta = inst.system_metadata
self.assertEqual(sys_meta['instance_type_flavorid'], '1')
@@ -4996,7 +5010,7 @@ class ComputeTestCase(BaseTestCase):
instance_map = {}
instances = []
- for x in xrange(5):
+ for x in xrange(8):
inst_uuid = 'fake-uuid-%s' % x
instance_map[inst_uuid] = fake_instance.fake_db_instance(
uuid=inst_uuid, host=CONF.host, created_at=None)
@@ -5015,7 +5029,8 @@ class ComputeTestCase(BaseTestCase):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
- self.assertEqual(['system_metadata'], columns_to_join)
+ self.assertEqual(['system_metadata', 'info_cache'],
+ columns_to_join)
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
@@ -5035,40 +5050,50 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
- call_info['expected_instance'] = instances[0]
+ # Make an instance appear to be still Building
+ instances[0]['vm_state'] = vm_states.BUILDING
+ # Make an instance appear to be Deleting
+ instances[1]['task_state'] = task_states.DELETING
+ # '0', '1' should be skipped..
+ call_info['expected_instance'] = instances[2]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(0, call_info['get_by_uuid'])
self.assertEqual(1, call_info['get_nw_info'])
- call_info['expected_instance'] = instances[1]
+ call_info['expected_instance'] = instances[3]
self.compute._heal_instance_info_cache(ctxt)
self.assertEqual(1, call_info['get_all_by_host'])
self.assertEqual(1, call_info['get_by_uuid'])
self.assertEqual(2, call_info['get_nw_info'])
# Make an instance switch hosts
- instances[2]['host'] = 'not-me'
+ instances[4]['host'] = 'not-me'
# Make an instance disappear
- instance_map.pop(instances[3]['uuid'])
- # '2' and '3' should be skipped..
- call_info['expected_instance'] = instances[4]
+ instance_map.pop(instances[5]['uuid'])
+ # Make an instance switch to be Deleting
+ instances[6]['task_state'] = task_states.DELETING
+ # '4', '5', and '6' should be skipped..
+ call_info['expected_instance'] = instances[7]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 1)
- # Incremented for '2' and '4'.. '3' caused a raise above.
- self.assertEqual(call_info['get_by_uuid'], 3)
- self.assertEqual(call_info['get_nw_info'], 3)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(4, call_info['get_by_uuid'])
+ self.assertEqual(3, call_info['get_nw_info'])
# Should be no more left.
- self.assertEqual(len(self.compute._instance_uuids_to_heal), 0)
+ self.assertEqual(0, len(self.compute._instance_uuids_to_heal))
+
+ # This should cause a DB query now, so get a list of instances
+ # where none can be processed to make sure we handle that case
+ # cleanly. Use just '0' (Building) and '1' (Deleting)
+ instances = instances[0:2]
- # This should cause a DB query now so we get first instance
- # back again
- call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 2)
- # Stays the same, because the instance came from the DB
- self.assertEqual(call_info['get_by_uuid'], 3)
- self.assertEqual(call_info['get_nw_info'], 4)
+ # Should have called the list once more
+ self.assertEqual(2, call_info['get_all_by_host'])
+ # Stays the same because we remove invalid entries from the list
+ self.assertEqual(4, call_info['get_by_uuid'])
+ # Stays the same because we didn't find anything to process
+ self.assertEqual(3, call_info['get_nw_info'])
def test_poll_rescued_instances(self):
timed_out_time = timeutils.utcnow() - datetime.timedelta(minutes=5)
diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py
index 3cd7d68169..21215f640c 100644
--- a/nova/tests/compute/test_compute_api.py
+++ b/nova/tests/compute/test_compute_api.py
@@ -1397,6 +1397,7 @@ class _ComputeAPIUnitTestMixIn(object):
'name': 'test-snapshot',
'properties': {'root_device_name': 'vda', 'mappings': 'DONTCARE'},
'size': 0,
+ 'is_public': False
}
def fake_get_instance_bdms(context, instance):
diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py
index f9366bfdb5..2269be8275 100644
--- a/nova/tests/compute/test_compute_mgr.py
+++ b/nova/tests/compute/test_compute_mgr.py
@@ -311,7 +311,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
utils.instance_sys_meta(instance).AndReturn(sys_meta)
self.compute._get_instance_volume_block_device_info(
self.context, instance).AndReturn([])
- self.compute.driver.finish_revert_migration(instance, [], [], power_on)
+ self.compute.driver.finish_revert_migration(self.context, instance,
+ [], [], power_on)
self.compute._instance_update(self.context, instance['uuid'],
task_state=None).AndReturn(fixed)
self.compute.driver.get_info(fixed).AndReturn(
@@ -466,6 +467,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
for ps in (power_state.SHUTDOWN, power_state.CRASHED,
power_state.SUSPENDED):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
+
+ for ps in (power_state.SHUTDOWN, power_state.CRASHED):
+ self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps,
+ force=True)
+
self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
power_state.RUNNING, force=True)
diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py
index 804f2fc196..b36bff88e3 100644
--- a/nova/tests/compute/test_shelve.py
+++ b/nova/tests/compute/test_shelve.py
@@ -148,11 +148,14 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = image['id']
sys_meta['shelved_host'] = host
+ hypervisor_hostname = 'fake_hypervisor_hostname'
+ fake_compute_info = {'hypervisor_hostname': hypervisor_hostname}
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.deleted_image_id = None
@@ -165,8 +168,12 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ fake_compute_info)
db.instance_update_and_get_original(self.context, instance['uuid'],
- {'task_state': task_states.SPAWNING},
+ {'task_state': task_states.SPAWNING, 'host': host,
+ 'node': hypervisor_hostname},
update_cells=False,
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance, db_instance))
@@ -183,6 +190,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
{'power_state': 123,
'vm_state': vm_states.ACTIVE,
'task_state': None,
+ 'image_ref': instance['image_ref'],
'key_data': None,
'auto_disk_config': False,
'expected_task_state': task_states.SPAWNING,
@@ -197,6 +205,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(self.context, instance,
image=image)
self.assertEqual(image['id'], self.deleted_image_id)
+ self.assertEqual(instance.host, self.compute.host)
self.mox.VerifyAll()
self.mox.UnsetStubs()
@@ -219,17 +228,24 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
sys_meta['shelved_image_id'] = None
sys_meta['shelved_host'] = host
+ hypervisor_hostname = 'fake_hypervisor_hostname'
+ fake_compute_info = {'hypervisor_hostname': hypervisor_hostname}
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
+ self.compute._get_compute_info(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(
+ fake_compute_info)
db.instance_update_and_get_original(self.context, instance['uuid'],
- {'task_state': task_states.SPAWNING},
+ {'task_state': task_states.SPAWNING, 'host': host,
+ 'node': hypervisor_hostname},
update_cells=False,
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
index a9a34542d6..c2ebc58892 100644
--- a/nova/tests/db/test_db_api.py
+++ b/nova/tests/db/test_db_api.py
@@ -156,6 +156,9 @@ class DecoratorTestCase(test.TestCase):
def test_require_admin_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
+ def test_require_deadlock_retry_wraps_functions_properly(self):
+ self._test_decorator_wraps_helper(sqlalchemy_api._retry_on_deadlock)
+
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
@@ -494,6 +497,17 @@ class AggregateDBApiTestCase(test.TestCase):
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
+ def test_aggregate_update_raise_name_exist(self):
+ ctxt = context.get_admin_context()
+ _create_aggregate(context=ctxt, values={'name': 'test1'},
+ metadata={'availability_zone': 'fake_avail_zone'})
+ _create_aggregate(context=ctxt, values={'name': 'test2'},
+ metadata={'availability_zone': 'fake_avail_zone'})
+ aggregate_id = 1
+ new_values = {'name': 'test2'}
+ self.assertRaises(exception.AggregateNameExists,
+ db.aggregate_update, ctxt, aggregate_id, new_values)
+
def test_aggregate_get_all(self):
ctxt = context.get_admin_context()
counter = 3
@@ -2720,6 +2734,21 @@ class InstanceTypeTestCase(BaseInstanceTypeTestCase):
inst_type['flavorid'], read_deleted='yes')
self.assertEqual(inst_type['id'], inst_type_by_fid['id'])
+ def test_flavor_get_by_flavor_id_deleted_and_recreat(self):
+ # NOTE(wingwj): Aims to test difference between mysql and postgresql
+ # for bug 1288636
+ param_dict = {'name': 'abc', 'flavorid': '123'}
+
+ self._create_inst_type(param_dict)
+ db.flavor_destroy(self.ctxt, 'abc')
+
+ # Recreate the flavor with the same params
+ flavor = self._create_inst_type(param_dict)
+
+ flavor_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
+ flavor['flavorid'], read_deleted='yes')
+ self.assertEqual(flavor['id'], flavor_by_fid['id'])
+
class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py
index fa33aec863..89796b1c2b 100644
--- a/nova/tests/db/test_migrations.py
+++ b/nova/tests/db/test_migrations.py
@@ -3088,12 +3088,16 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
'vcpus': 2, 'memory_mb': 256, 'uuid': 'uuid1',
'deleted': 0},
{'user_id': '234', 'project_id': '5678',
- 'vcpus': 1, 'memory_mb': 256, 'deleted': 0}],
+ 'vcpus': 1, 'memory_mb': 256, 'deleted': 0},
+ {'user_id': '4321', 'project_id': '1234',
+ 'vcpus': 1, 'memory_mb': 512, 'deleted': 0}],
'security_groups': [{'user_id': '1234', 'project_id': '5678',
'deleted': 0},
{'user_id': '234', 'project_id': '5678',
'deleted': 0},
{'user_id': '234', 'project_id': '5678',
+ 'deleted': 0},
+ {'user_id': '4321', 'project_id': '1234',
'deleted': 0}],
'floating_ips': [{'deleted': 0, 'project_id': '5678',
'auto_assigned': False},
@@ -3107,10 +3111,14 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
'resource': 'instances', 'in_use': 1, 'reserved': 0},
{'user_id': None, 'project_id': '5678',
'resource': 'instances', 'in_use': 1, 'reserved': 0},
+ {'user_id': None, 'project_id': '1234',
+ 'resource': 'instances', 'in_use': 1, 'reserved': 0},
{'user_id': '1234', 'project_id': '5678',
'resource': 'security_groups', 'in_use': 1, 'reserved': 0},
{'user_id': '234', 'project_id': '5678',
'resource': 'security_groups', 'in_use': 2, 'reserved': 0},
+ {'user_id': None, 'project_id': '1234',
+ 'resource': 'security_groups', 'in_use': 1, 'reserved': 0},
{'user_id': None, 'project_id': '5678',
'resource': 'security_groups', 'in_use': 1, 'reserved': 0},
{'user_id': '1234', 'project_id': '5678',
@@ -3157,7 +3165,9 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
per_user = {'1234': {'instances': 1, 'cores': 2, 'ram': 256,
'security_groups': 1},
'234': {'instances': 1, 'cores': 1, 'ram': 256,
- 'security_groups': 2}}
+ 'security_groups': 2},
+ '4321': {'instances': 1, 'cores': 1, 'ram': 512,
+ 'security_groups': 1}}
per_project = {'floating_ips': 2, 'fixed_ips': 1, 'networks': 1}
@@ -3168,6 +3178,12 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn):
).fetchall()
self.assertEqual(0, len(rows))
+ rows = quota_usages.select().where(
+ quota_usages.c.user_id == '4321').where(
+ quota_usages.c.resource == resource).execute(
+ ).fetchall()
+ self.assertEqual(1, len(rows))
+
for user in per_user.keys():
rows = quota_usages.select().where(
quota_usages.c.user_id == user).where(
diff --git a/nova/tests/network/security_group/test_neutron_driver.py b/nova/tests/network/security_group/test_neutron_driver.py
index a8fc365ef1..2819bccb76 100644
--- a/nova/tests/network/security_group/test_neutron_driver.py
+++ b/nova/tests/network/security_group/test_neutron_driver.py
@@ -84,6 +84,39 @@ class TestNeutronDriver(test.NoDBTestCase):
self.assertRaises(exception.SecurityGroupLimitExceeded,
sg_api.add_rules, self.context, None, name, [vals])
+ def test_list_security_group_with_no_port_range_and_not_tcp_udp_icmp(self):
+ sg1 = {'description': 'default',
+ 'id': '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'name': 'default',
+ 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6',
+ 'security_group_rules':
+ [{'direction': 'ingress',
+ 'ethertype': 'IPv4',
+ 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb',
+ 'port_range_max': None,
+ 'port_range_min': None,
+ 'protocol': '51',
+ 'remote_group_id': None,
+ 'remote_ip_prefix': None,
+ 'security_group_id':
+ '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'tenant_id': 'c166d9316f814891bcb66b96c4c891d6'}]}
+
+ self.moxed_client.list_security_groups().AndReturn(
+ {'security_groups': [sg1]})
+ self.mox.ReplayAll()
+ sg_api = neutron_driver.SecurityGroupAPI()
+ result = sg_api.list(self.context)
+ expected = [{'rules':
+ [{'from_port': -1, 'protocol': '51', 'to_port': -1,
+ 'parent_group_id': '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'cidr': '0.0.0.0/0', 'group_id': None,
+ 'id': '0a4647f1-e1aa-488d-90e1-97a7d0293beb'}],
+ 'project_id': 'c166d9316f814891bcb66b96c4c891d6',
+ 'id': '07f1362f-34f6-4136-819a-2dcde112269e',
+ 'name': 'default', 'description': 'default'}]
+ self.assertEqual(expected, result)
+
def test_instances_security_group_bindings(self):
server_id = 'c5a20e8d-c4b0-47cf-9dca-ebe4f758acb1'
port1_id = '4c505aec-09aa-47bc-bcc0-940477e84dc0'
@@ -179,3 +212,13 @@ class TestNeutronDriver(test.NoDBTestCase):
result = sg_api.get_instances_security_groups_bindings(
self.context, servers)
self.assertEquals(result, sg_bindings)
+
+ def test_instance_empty_security_groups(self):
+
+ port_list = {'ports': [{'id': 1, 'device_id': '1',
+ 'security_groups': []}]}
+ self.moxed_client.list_ports(device_id=['1']).AndReturn(port_list)
+ self.mox.ReplayAll()
+ sg_api = neutron_driver.SecurityGroupAPI()
+ result = sg_api.get_instance_security_groups(self.context, '1')
+ self.assertEqual([], result)
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index a3ebc9127d..e57e3873d4 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -24,11 +24,13 @@ import mox
from nova.compute import flavors
from nova import context
+from nova import db
from nova import exception
from nova import network
from nova.network import api
from nova.network import floating_ips
from nova.network import rpcapi as network_rpcapi
+from nova.network.model import NetworkInfo, VIF
from nova import policy
from nova import test
from nova import utils
@@ -129,6 +131,14 @@ class ApiTestCase(test.TestCase):
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
+ def fake_update_instance_cache_with_nw_info(api, context, instance,
+ nw_info=None,
+ update_cells=True):
+ return
+
+ self.stubs.Set(api, "update_instance_cache_with_nw_info",
+ fake_update_instance_cache_with_nw_info)
+
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
@@ -272,3 +282,57 @@ class ApiTestCase(test.TestCase):
self.stubs.Set(self.network_api, 'get', fake_get)
self.network_api.associate(self.context, FAKE_UUID, project=None)
+
+
+class TestUpdateInstanceCache(test.TestCase):
+ def setUp(self):
+ super(TestUpdateInstanceCache, self).setUp()
+ self.context = context.get_admin_context()
+ self.instance = {'uuid': FAKE_UUID}
+ self.impl = self.mox.CreateMock(api.API)
+ self.nw_info = NetworkInfo([VIF(id='super_vif')])
+ self.is_nw_info = mox.Func(lambda d: 'super_vif' in d['network_info'])
+
+ def expect_cache_update(self, nw_info):
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_info_cache_update(self.context,
+ self.instance['uuid'],
+ nw_info)
+
+ def test_update_nw_info_none(self):
+ self.impl._get_instance_nw_info(self.context, self.instance)\
+ .AndReturn(self.nw_info)
+ self.expect_cache_update(self.is_nw_info)
+ self.mox.ReplayAll()
+ api.update_instance_cache_with_nw_info(self.impl, self.context,
+ self.instance, None)
+
+ def test_update_nw_info_one_network(self):
+ self.expect_cache_update(self.is_nw_info)
+ self.mox.ReplayAll()
+ api.update_instance_cache_with_nw_info(self.impl, self.context,
+ self.instance, self.nw_info)
+
+ def test_update_nw_info_empty_list(self):
+ self.expect_cache_update({'network_info': '[]'})
+ self.mox.ReplayAll()
+ api.update_instance_cache_with_nw_info(self.impl, self.context,
+ self.instance, NetworkInfo([]))
+
+ def test_decorator_return_object(self):
+ @api.refresh_cache
+ def func(self, context, instance):
+ return NetworkInfo([])
+ self.expect_cache_update({'network_info': '[]'})
+ self.mox.ReplayAll()
+ func(self.impl, self.context, self.instance)
+
+ def test_decorator_return_none(self):
+ @api.refresh_cache
+ def func(self, context, instance):
+ pass
+ self.impl._get_instance_nw_info(self.context, self.instance)\
+ .AndReturn(self.nw_info)
+ self.expect_cache_update(self.is_nw_info)
+ self.mox.ReplayAll()
+ func(self.impl, self.context, self.instance)
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index a87e77e6dc..a0392282f2 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -516,7 +516,12 @@ class LinuxNetworkTestCase(test.NoDBTestCase):
executes.append(args)
return "", ""
+ def fake_add_dhcp_mangle_rule(*args, **kwargs):
+ executes.append(args)
+
self.stubs.Set(linux_net, '_execute', fake_execute)
+ self.stubs.Set(linux_net, '_add_dhcp_mangle_rule',
+ fake_add_dhcp_mangle_rule)
self.stubs.Set(os, 'chmod', lambda *a, **kw: None)
self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None)
@@ -552,7 +557,7 @@ class LinuxNetworkTestCase(test.NoDBTestCase):
if extra_expected:
expected += extra_expected
- self.assertEqual([tuple(expected)], executes)
+ self.assertEqual([(dev,), tuple(expected)], executes)
def test_dnsmasq_execute(self):
self._test_dnsmasq_execute()
diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py
index a6d150b5c6..331ae68a04 100644
--- a/nova/tests/network/test_neutronv2.py
+++ b/nova/tests/network/test_neutronv2.py
@@ -33,6 +33,7 @@ from nova.network import neutronv2
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova.openstack.common import jsonutils
+from nova.openstack.common import local
from nova import test
from nova import utils
@@ -101,6 +102,7 @@ class TestNeutronClient(test.TestCase):
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
+ auth_strategy=None,
endpoint_url=CONF.neutron_url,
token=my_context.auth_token,
timeout=CONF.neutron_url_timeout,
@@ -109,29 +111,41 @@ class TestNeutronClient(test.TestCase):
self.mox.ReplayAll()
neutronv2.get_client(my_context)
- def test_withouttoken_keystone_connection_error(self):
- self.flags(neutron_auth_strategy='keystone')
- self.flags(neutron_url='http://anyhost/')
+ def test_withouttoken(self):
my_context = context.RequestContext('userid', 'my_tenantid')
- self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
+ self.assertRaises(exceptions.Unauthorized,
neutronv2.get_client,
my_context)
- def test_withouttoken_keystone_not_auth(self):
- self.flags(neutron_auth_strategy=None)
+ def test_withtoken_context_is_admin(self):
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
- my_context = context.RequestContext('userid', 'my_tenantid')
+ my_context = context.RequestContext('userid',
+ 'my_tenantid',
+ auth_token='token',
+ is_admin=True)
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
- endpoint_url=CONF.neutron_url,
auth_strategy=None,
+ endpoint_url=CONF.neutron_url,
+ token=my_context.auth_token,
timeout=CONF.neutron_url_timeout,
insecure=False,
ca_cert=None).AndReturn(None)
self.mox.ReplayAll()
+ # Note that although we have admin set in the context we
+ # are not asking for an admin client, and so we auth with
+ # our own token
neutronv2.get_client(my_context)
+ def test_withouttoken_keystone_connection_error(self):
+ self.flags(neutron_auth_strategy='keystone')
+ self.flags(neutron_url='http://anyhost/')
+ my_context = context.RequestContext('userid', 'my_tenantid')
+ self.assertRaises(NEUTRON_CLIENT_EXCEPTION,
+ neutronv2.get_client,
+ my_context)
+
class TestNeutronv2Base(test.TestCase):
@@ -166,8 +180,13 @@ class TestNeutronv2Base(test.TestCase):
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
-
- self.nets = [self.nets1, self.nets2, self.nets3, self.nets4]
+ # A network request with external networks
+ self.nets5 = self.nets1 + [{'id': 'the-external-one',
+ 'name': 'out-of-this-world',
+ 'router:external': True,
+ 'tenant_id': 'should-be-an-admin'}]
+ self.nets = [self.nets1, self.nets2, self.nets3,
+ self.nets4, self.nets5]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
@@ -596,6 +615,12 @@ class TestNeutronv2(TestNeutronv2Base):
def test_refresh_neutron_extensions_cache(self):
api = neutronapi.API()
+
+ # Note: Don't want the default get_client from setUp()
+ self.mox.ResetAll()
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).AndReturn(
+ self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
@@ -604,6 +629,12 @@ class TestNeutronv2(TestNeutronv2Base):
def test_populate_neutron_extension_values_rxtx_factor(self):
api = neutronapi.API()
+
+ # Note: Don't want the default get_client from setUp()
+ self.mox.ResetAll()
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).AndReturn(
+ self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': 'nvp-qos'}]})
self.mox.ReplayAll()
@@ -797,6 +828,9 @@ class TestNeutronv2(TestNeutronv2Base):
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
+ neutronv2.get_client(mox.IgnoreArg(),
+ admin=True).AndReturn(
+ self.moxed_client)
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
@@ -846,18 +880,48 @@ class TestNeutronv2(TestNeutronv2Base):
api.allocate_for_instance, self.context,
self.instance, requested_networks=requested_networks)
- def _deallocate_for_instance(self, number):
+ def _deallocate_for_instance(self, number, requested_networks=None):
+ api = neutronapi.API()
port_data = number == 1 and self.port_data1 or self.port_data2
+ ret_data = copy.deepcopy(port_data)
+ if requested_networks:
+ for net, fip, port in requested_networks:
+ ret_data.append({'network_id': net,
+ 'device_id': self.instance['uuid'],
+ 'device_owner': 'compute:nova',
+ 'id': port,
+ 'status': 'DOWN',
+ 'admin_state_up': True,
+ 'fixed_ips': [],
+ 'mac_address': 'fake_mac', })
self.moxed_client.list_ports(
device_id=self.instance['uuid']).AndReturn(
- {'ports': port_data})
+ {'ports': ret_data})
+ if requested_networks:
+ for net, fip, port in requested_networks:
+ self.moxed_client.update_port(port)
for port in reversed(port_data):
self.moxed_client.delete_port(port['id'])
+ self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
+ api.db.instance_info_cache_update(self.context,
+ self.instance['uuid'],
+ {'network_info': '[]'})
self.mox.ReplayAll()
api = neutronapi.API()
- api.deallocate_for_instance(self.context, self.instance)
+ api.deallocate_for_instance(self.context, self.instance,
+ requested_networks=requested_networks)
+
+ def test_deallocate_for_instance_1_with_requested(self):
+ requested = [('fake-net', 'fake-fip', 'fake-port')]
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(1, requested_networks=requested)
+
+ def test_deallocate_for_instance_2_with_requested(self):
+ requested = [('fake-net', 'fake-fip', 'fake-port')]
+ # Test to deallocate in one port env.
+ self._deallocate_for_instance(2, requested_networks=requested)
def test_deallocate_for_instance_1(self):
# Test to deallocate in one port env.
@@ -1121,7 +1185,8 @@ class TestNeutronv2(TestNeutronv2Base):
api.get_fixed_ip_by_address,
self.context, address)
- def _get_available_networks(self, prv_nets, pub_nets, req_ids=None):
+ def _get_available_networks(self, prv_nets, pub_nets,
+ req_ids=None, context=None):
api = neutronapi.API()
nets = prv_nets + pub_nets
if req_ids:
@@ -1138,9 +1203,10 @@ class TestNeutronv2(TestNeutronv2Base):
**mox_list_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
- rets = api._get_available_networks(self.context,
- self.instance['project_id'],
- req_ids)
+ rets = api._get_available_networks(
+ context if context else self.context,
+ self.instance['project_id'],
+ req_ids)
self.assertEqual(rets, nets)
def test_get_available_networks_all_private(self):
@@ -1159,6 +1225,20 @@ class TestNeutronv2(TestNeutronv2Base):
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
+ def test_get_available_networks_with_externalnet_fails(self):
+ req_ids = [net['id'] for net in self.nets5]
+ self.assertRaises(
+ exception.ExternalNetworkAttachForbidden,
+ self._get_available_networks,
+ self.nets5, pub_nets=[], req_ids=req_ids)
+
+ def test_get_available_networks_with_externalnet_admin_ctx(self):
+ admin_ctx = context.RequestContext('userid', 'my_tenantid',
+ is_admin=True)
+ req_ids = [net['id'] for net in self.nets5]
+ self._get_available_networks(self.nets5, pub_nets=[],
+ req_ids=req_ids, context=admin_ctx)
+
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
@@ -1356,6 +1436,19 @@ class TestNeutronv2(TestNeutronv2Base):
self.mox.ReplayAll()
api.release_floating_ip(self.context, address)
+ def test_disassociate_and_release_floating_ip(self):
+ api = neutronapi.API()
+ address = self.fip_unassociated['floating_ip_address']
+ fip_id = self.fip_unassociated['id']
+ floating_ip = {'address': address}
+
+ self.moxed_client.list_floatingips(floating_ip_address=address).\
+ AndReturn({'floatingips': [self.fip_unassociated]})
+ self.moxed_client.delete_floatingip(fip_id)
+ self.mox.ReplayAll()
+ api.disassociate_and_release_floating_ip(self.context, None,
+ floating_ip)
+
def test_release_floating_ip_associated(self):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
@@ -1709,7 +1802,7 @@ class TestNeutronv2Portbinding(TestNeutronv2Base):
def test_populate_neutron_extension_values_binding(self):
api = neutronapi.API()
- neutronv2.get_client(mox.IgnoreArg()).AndReturn(
+ neutronv2.get_client(mox.IgnoreArg(), admin=True).AndReturn(
self.moxed_client)
self.moxed_client.list_extensions().AndReturn(
{'extensions': [{'name': constants.PORTBINDING_EXT}]})
@@ -1795,3 +1888,106 @@ class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
CONF.set_override('dhcp_options_enabled', False)
+
+
+class TestNeutronClientForAdminScenarios(test.TestCase):
+ def test_get_cached_neutron_client_for_admin(self):
+ self.flags(neutron_url='http://anyhost/')
+ self.flags(neutron_url_timeout=30)
+ my_context = context.RequestContext('userid',
+ 'my_tenantid',
+ auth_token='token')
+
+ # Make multiple calls and ensure we get the same
+ # client back again and again
+ client = neutronv2.get_client(my_context, True)
+ client2 = neutronv2.get_client(my_context, True)
+ client3 = neutronv2.get_client(my_context, True)
+ self.assertEqual(client, client2)
+ self.assertEqual(client, client3)
+
+ # clear the cache
+ local.strong_store.neutron_client = None
+
+ # A new client should be created now
+ client4 = neutronv2.get_client(my_context, True)
+ self.assertNotEqual(client, client4)
+
+ def test_get_neutron_client_for_non_admin(self):
+ self.flags(neutron_url='http://anyhost/')
+ self.flags(neutron_url_timeout=30)
+ my_context = context.RequestContext('userid',
+ 'my_tenantid',
+ auth_token='token')
+
+ # Multiple calls should return different clients
+ client = neutronv2.get_client(my_context)
+ client2 = neutronv2.get_client(my_context)
+ self.assertNotEqual(client, client2)
+
+ def test_get_neutron_client_for_non_admin_and_no_token(self):
+ self.flags(neutron_url='http://anyhost/')
+ self.flags(neutron_url_timeout=30)
+ my_context = context.RequestContext('userid',
+ 'my_tenantid')
+
+ self.assertRaises(exceptions.Unauthorized,
+ neutronv2.get_client,
+ my_context)
+
+ def test_get_client_for_admin(self):
+
+ self.flags(neutron_auth_strategy=None)
+ self.flags(neutron_url='http://anyhost/')
+ self.flags(neutron_url_timeout=30)
+ my_context = context.RequestContext('userid', 'my_tenantid',
+ auth_token='token')
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ client.Client.__init__(
+ auth_url=CONF.neutron_admin_auth_url,
+ password=CONF.neutron_admin_password,
+ tenant_name=CONF.neutron_admin_tenant_name,
+ username=CONF.neutron_admin_username,
+ endpoint_url=CONF.neutron_url,
+ auth_strategy=None,
+ timeout=CONF.neutron_url_timeout,
+ insecure=False,
+ ca_cert=None).AndReturn(None)
+ self.mox.ReplayAll()
+
+ # clear the cache
+ if hasattr(local.strong_store, 'neutron_client'):
+ delattr(local.strong_store, 'neutron_client')
+
+ # Note that the context is not elevated, but the True is passed in
+ # which will force an elevation to admin credentials even though
+ # the context has an auth_token.
+ neutronv2.get_client(my_context, True)
+
+ def test_get_client_for_admin_context(self):
+
+ self.flags(neutron_auth_strategy=None)
+ self.flags(neutron_url='http://anyhost/')
+ self.flags(neutron_url_timeout=30)
+ my_context = context.get_admin_context()
+ self.mox.StubOutWithMock(client.Client, "__init__")
+ client.Client.__init__(
+ auth_url=CONF.neutron_admin_auth_url,
+ password=CONF.neutron_admin_password,
+ tenant_name=CONF.neutron_admin_tenant_name,
+ username=CONF.neutron_admin_username,
+ endpoint_url=CONF.neutron_url,
+ auth_strategy=None,
+ timeout=CONF.neutron_url_timeout,
+ insecure=False,
+ ca_cert=None).AndReturn(None)
+ self.mox.ReplayAll()
+
+ # clear the cache
+ if hasattr(local.strong_store, 'neutron_client'):
+ delattr(local.strong_store, 'neutron_client')
+
+ # Note that the context does not contain a token but is
+ # an admin context which will force an elevation to admin
+ # credentials.
+ neutronv2.get_client(my_context)
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 1a2b108323..6cdab7a076 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -2049,11 +2049,9 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def fake_get_session():
return FakeSession()
- def fake_get_project_quota_usages(context, session, project_id):
- return self.usages.copy()
-
- def fake_get_user_quota_usages(context, session, project_id, user_id):
- return self.usages.copy()
+ def fake_get_project_user_quota_usages(context, session, project_id,
+ user_id):
+ return self.usages.copy(), self.usages.copy()
def fake_quota_usage_create(context, project_id, user_id, resource,
in_use, reserved, until_refresh,
@@ -2078,10 +2076,8 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
return reservation_ref
self.stubs.Set(sqa_api, 'get_session', fake_get_session)
- self.stubs.Set(sqa_api, '_get_project_quota_usages',
- fake_get_project_quota_usages)
- self.stubs.Set(sqa_api, '_get_user_quota_usages',
- fake_get_user_quota_usages)
+ self.stubs.Set(sqa_api, '_get_project_user_quota_usages',
+ fake_get_project_user_quota_usages)
self.stubs.Set(sqa_api, '_quota_usage_create', fake_quota_usage_create)
self.stubs.Set(sqa_api, '_reservation_create', fake_reservation_create)
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index b38ea50461..820fe09bca 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -1083,3 +1083,10 @@ class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
# Verify that the foo1 key has not been inherited
self.assertTrue("foo1" not in image)
+
+
+class ConstantTimeCompareTestCase(test.NoDBTestCase):
+ def test_constant_time_compare(self):
+ self.assertTrue(utils.constant_time_compare("abcd1234", "abcd1234"))
+ self.assertFalse(utils.constant_time_compare("abcd1234", "a"))
+ self.assertFalse(utils.constant_time_compare("abcd1234", "ABCD234"))
diff --git a/nova/tests/virt/docker/test_driver.py b/nova/tests/virt/docker/test_driver.py
index 602e022176..daa1931741 100644
--- a/nova/tests/virt/docker/test_driver.py
+++ b/nova/tests/virt/docker/test_driver.py
@@ -71,3 +71,17 @@ class DockerDriverTestCase(_VirtDriverTestCase, test.TestCase):
self.connection.get_host_stats()['host_hostname'])
self.assertEqual('foo',
self.connection.get_host_stats()['host_hostname'])
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self.connection.plug_vifs,
+ instance=utils.get_test_instance(),
+ network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self.connection.unplug_vifs,
+ instance=utils.get_test_instance(),
+ network_info=None)
diff --git a/nova/tests/virt/hyperv/test_hypervapi.py b/nova/tests/virt/hyperv/test_hypervapi.py
index ed03badda2..5d75fe5085 100644..100755
--- a/nova/tests/virt/hyperv/test_hypervapi.py
+++ b/nova/tests/virt/hyperv/test_hypervapi.py
@@ -415,7 +415,7 @@ class HyperVAPITestCase(test.NoDBTestCase):
mox.IsA(str))
m.WithSideEffects(self._add_ide_disk)
- def _test_spawn_config_drive(self, use_cdrom):
+ def _test_spawn_config_drive(self, use_cdrom, format_error=False):
self.flags(force_config_drive=True)
self.flags(config_drive_cdrom=use_cdrom, group='hyperv')
self.flags(mkisofs_cmd='mkisofs.exe')
@@ -427,14 +427,25 @@ class HyperVAPITestCase(test.NoDBTestCase):
expected_ide_disks = 2
expected_ide_dvds = 0
- self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
- expected_ide_dvds=expected_ide_dvds,
- config_drive=True,
- use_cdrom=use_cdrom)
+ if format_error:
+ self.assertRaises(vmutils.UnsupportedConfigDriveFormatException,
+ self._test_spawn_instance,
+ with_exception=True,
+ config_drive=True,
+ use_cdrom=use_cdrom)
+ else:
+ self._test_spawn_instance(expected_ide_disks=expected_ide_disks,
+ expected_ide_dvds=expected_ide_dvds,
+ config_drive=True,
+ use_cdrom=use_cdrom)
def test_spawn_config_drive(self):
self._test_spawn_config_drive(False)
+ def test_spawn_config_drive_format_error(self):
+ CONF.set_override('config_drive_format', 'wrong_format')
+ self._test_spawn_config_drive(True, True)
+
def test_spawn_config_drive_cdrom(self):
self._test_spawn_config_drive(True)
@@ -977,7 +988,8 @@ class HyperVAPITestCase(test.NoDBTestCase):
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
- vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
with_exception=False,
@@ -1017,7 +1029,8 @@ class HyperVAPITestCase(test.NoDBTestCase):
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
- vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object))
+ vhdutils.VHDUtils.resize_vhd(mox.IsA(str), mox.IsA(object),
+ is_file_max_size=False)
self._setup_check_admin_permissions_mocks(
admin_permissions=admin_permissions)
@@ -1032,7 +1045,7 @@ class HyperVAPITestCase(test.NoDBTestCase):
block_device_info,
ephemeral_storage=ephemeral_storage)
- if config_drive:
+ if config_drive and not with_exception:
self._setup_spawn_config_drive_mocks(use_cdrom)
# TODO(alexpilotti) Based on where the exception is thrown
@@ -1289,7 +1302,7 @@ class HyperVAPITestCase(test.NoDBTestCase):
mount_point = '/dev/sdc'
def fake_login_storage_target(connection_info):
- raise Exception('Fake connection exception')
+ raise vmutils.HyperVException('Fake connection exception')
self.stubs.Set(self._conn._volumeops, '_login_storage_target',
fake_login_storage_target)
@@ -1597,7 +1610,8 @@ class HyperVAPITestCase(test.NoDBTestCase):
constants.HYPERV_VM_STATE_ENABLED)
self._mox.ReplayAll()
- self._conn.finish_revert_migration(instance, network_info, None,
+ self._conn.finish_revert_migration(self._context, instance,
+ network_info, None,
power_on)
self._mox.VerifyAll()
@@ -1616,6 +1630,11 @@ class HyperVAPITestCase(test.NoDBTestCase):
def test_finish_revert_migration_with_ephemeral_storage(self):
self._test_finish_revert_migration(False, ephemeral_storage=True)
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self._conn.refresh_instance_security_rules,
+ instance=None)
+
class VolumeOpsTestCase(HyperVAPITestCase):
"""Unit tests for VolumeOps class."""
@@ -1664,3 +1683,17 @@ class VolumeOpsTestCase(HyperVAPITestCase):
self.assertRaises(exception.NotFound,
self.volumeops._get_mounted_disk_from_lun,
target_iqn, target_lun)
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.plug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self._conn.unplug_vifs,
+ instance=self._test_spawn_instance,
+ network_info=None)
diff --git a/nova/tests/virt/hyperv/test_vhdutils.py b/nova/tests/virt/hyperv/test_vhdutils.py
index 937c098996..0902c55f00 100644
--- a/nova/tests/virt/hyperv/test_vhdutils.py
+++ b/nova/tests/virt/hyperv/test_vhdutils.py
@@ -88,3 +88,47 @@ class VHDUtilsTestCase(test.NoDBTestCase):
self.assertRaises(vmutils.HyperVException,
vhdutil.get_internal_vhd_size_by_file_size,
None, root_vhd_size)
+
+ def test_get_vhd_format_vhdx(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=vhdutils.VHDX_SIGNATURE),
+ create=True) as mock_open:
+
+ format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
+
+ self.assertEqual(constants.DISK_FORMAT_VHDX, format)
+
+ def test_get_vhd_format_vhd(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=vhdutils.VHD_SIGNATURE),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 1024
+
+ format = self._vhdutils.get_vhd_format(self._FAKE_VHD_PATH)
+
+ self.assertEqual(constants.DISK_FORMAT_VHD, format)
+
+ def test_get_vhd_format_invalid_format(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data='invalid'),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 1024
+
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.get_vhd_format,
+ self._FAKE_VHD_PATH)
+
+ def test_get_vhd_format_zero_length_file(self):
+ with mock.patch('nova.virt.hyperv.vhdutils.open',
+ mock.mock_open(read_data=''),
+ create=True) as mock_open:
+ f = mock_open.return_value
+ f.tell.return_value = 0
+
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.get_vhd_format,
+ self._FAKE_VHD_PATH)
+
+ f.seek.assert_called_once_with(0, 2)
diff --git a/nova/tests/virt/hyperv/test_vhdutilsv2.py b/nova/tests/virt/hyperv/test_vhdutilsv2.py
index 51f7a280ec..ecae6c2aa7 100644
--- a/nova/tests/virt/hyperv/test_vhdutilsv2.py
+++ b/nova/tests/virt/hyperv/test_vhdutilsv2.py
@@ -28,16 +28,24 @@ class VHDUtilsV2TestCase(test.NoDBTestCase):
_FAKE_VHD_PATH = "C:\\fake_path.vhdx"
_FAKE_PARENT_VHD_PATH = "C:\\fake_parent_path.vhdx"
_FAKE_FORMAT = 3
- _FAKE_MAK_INTERNAL_SIZE = 1000
+ _FAKE_MAK_INTERNAL_SIZE = 1 << 30
_FAKE_TYPE = 3
_FAKE_JOB_PATH = 'fake_job_path'
_FAKE_RET_VAL = 0
+ _FAKE_VHD_FORMAT = 'vhdx'
+ _FAKE_BLOCK_SIZE = 33554432
+ _FAKE_LOG_SIZE = 1048576
+ _FAKE_LOGICAL_SECTOR_SIZE = 4096
+ _FAKE_METADATA_SIZE = 1048576
def setUp(self):
self._vhdutils = vhdutilsv2.VHDUtilsV2()
self._vhdutils._conn = mock.MagicMock()
self._vhdutils._vmutils = mock.MagicMock()
+ self._vhdutils.get_vhd_format = mock.MagicMock(
+ return_value=self._FAKE_VHD_FORMAT)
+ self._fake_file_handle = mock.MagicMock()
self._fake_vhd_info_xml = (
'<INSTANCE CLASSNAME="Msvm_VirtualHardDiskSettingData">'
'<PROPERTY NAME="BlockSize" TYPE="uint32">'
@@ -146,6 +154,8 @@ class VHDUtilsV2TestCase(test.NoDBTestCase):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ResizeVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
+ self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock(
+ return_value=self._FAKE_MAK_INTERNAL_SIZE)
self._vhdutils.resize_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAK_INTERNAL_SIZE)
@@ -153,3 +163,75 @@ class VHDUtilsV2TestCase(test.NoDBTestCase):
mock_img_svc.ResizeVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE)
+
+ self.mock_get = self._vhdutils.get_internal_vhd_size_by_file_size
+ self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH,
+ self._FAKE_MAK_INTERNAL_SIZE)
+
+ def test_get_vhdx_internal_size(self):
+ self._vhdutils.get_vhd_info = mock.MagicMock(
+ return_value={'ParentPath': self._FAKE_PARENT_VHD_PATH,
+ 'Format': self._FAKE_FORMAT,
+ 'BlockSize': self._FAKE_BLOCK_SIZE,
+ 'LogicalSectorSize': self._FAKE_LOGICAL_SECTOR_SIZE,
+ 'Type': self._FAKE_TYPE})
+ self._vhdutils._get_vhdx_log_size = mock.MagicMock(
+ return_value=self._FAKE_LOG_SIZE)
+ self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
+ return_value=(self._FAKE_METADATA_SIZE, 1024))
+ self._vhdutils._get_vhdx_block_size = mock.MagicMock(
+ return_value=self._FAKE_BLOCK_SIZE)
+
+ file_mock = mock.MagicMock()
+ with mock.patch('__builtin__.open', file_mock):
+ internal_size = (
+ self._vhdutils.get_internal_vhd_size_by_file_size(
+ self._FAKE_VHD_PATH, self._FAKE_MAK_INTERNAL_SIZE))
+
+ self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE,
+ internal_size)
+
+ def test_get_vhdx_current_header(self):
+ VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024]
+ fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00',
+ '\x02\x00\x00\x00\x00\x00\x00\x00']
+ self._fake_file_handle.read = mock.MagicMock(
+ side_effect=fake_sequence_numbers)
+
+ offset = self._vhdutils._get_vhdx_current_header_offset(
+ self._fake_file_handle)
+ self.assertEqual(offset, VHDX_HEADER_OFFSETS[1])
+
+ def test_get_vhdx_metadata_size(self):
+ fake_metadata_offset = '\x01\x00\x00\x00\x00\x00\x00\x00'
+ fake_metadata_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ side_effect=[fake_metadata_offset, fake_metadata_size])
+
+ metadata_size, metadata_offset = (
+ self._vhdutils._get_vhdx_metadata_size_and_offset(
+ self._fake_file_handle))
+ self.assertEqual(metadata_size, 1)
+ self.assertEqual(metadata_offset, 1)
+
+ def test_get_block_size(self):
+ self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
+ return_value=(self._FAKE_METADATA_SIZE, 1024))
+ fake_block_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ return_value=fake_block_size)
+
+ block_size = self._vhdutils._get_vhdx_block_size(
+ self._fake_file_handle)
+ self.assertEqual(block_size, 1)
+
+ def test_get_log_size(self):
+ fake_current_header_offset = 64 * 1024
+ self._vhdutils._get_vhdx_current_header_offset = mock.MagicMock(
+ return_value=fake_current_header_offset)
+ fake_log_size = '\x01\x00\x00\x00'
+ self._fake_file_handle.read = mock.MagicMock(
+ return_value=fake_log_size)
+
+ log_size = self._vhdutils._get_vhdx_log_size(self._fake_file_handle)
+ self.assertEqual(log_size, 1)
diff --git a/nova/tests/virt/hyperv/test_vmutils.py b/nova/tests/virt/hyperv/test_vmutils.py
index d44af6a40a..0dad728ff6 100644
--- a/nova/tests/virt/hyperv/test_vmutils.py
+++ b/nova/tests/virt/hyperv/test_vmutils.py
@@ -28,6 +28,9 @@ class VMUtilsTestCase(test.NoDBTestCase):
_FAKE_VM_NAME = 'fake_vm'
_FAKE_MEMORY_MB = 2
_FAKE_VM_PATH = "fake_vm_path"
+ _FAKE_VHD_PATH = "fake_vhd_path"
+ _FAKE_DVD_PATH = "fake_dvd_path"
+ _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
def setUp(self):
self._vmutils = vmutils.VMUtils()
@@ -75,3 +78,43 @@ class VMUtilsTestCase(test.NoDBTestCase):
self.assertTrue(mock_s.DynamicMemoryEnabled)
else:
self.assertFalse(mock_s.DynamicMemoryEnabled)
+
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
+ def test_get_vm_storage_paths(self, mock_get_vm_disks):
+ self._lookup_vm()
+ mock_rasds = self._create_mock_disks()
+ mock_get_vm_disks.return_value = ([mock_rasds[0]], [mock_rasds[1]])
+
+ storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
+ (disk_files, volume_drives) = storage
+
+ self.assertEqual([self._FAKE_VHD_PATH], disk_files)
+ self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
+
+ def test_get_vm_disks(self):
+ mock_vm = self._lookup_vm()
+ mock_vmsettings = [mock.MagicMock()]
+ mock_vm.associators.return_value = mock_vmsettings
+
+ mock_rasds = self._create_mock_disks()
+ mock_vmsettings[0].associators.return_value = mock_rasds
+
+ (disks, volumes) = self._vmutils._get_vm_disks(mock_vm)
+
+ mock_vm.associators.assert_called_with(
+ wmi_result_class=self._vmutils._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
+ mock_vmsettings[0].associators.assert_called_with(
+ wmi_result_class=self._vmutils._STORAGE_ALLOC_SETTING_DATA_CLASS)
+ self.assertEqual([mock_rasds[0]], disks)
+ self.assertEqual([mock_rasds[1]], volumes)
+
+ def _create_mock_disks(self):
+ mock_rasd1 = mock.MagicMock()
+ mock_rasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ mock_rasd1.Connection = [self._FAKE_VHD_PATH]
+
+ mock_rasd2 = mock.MagicMock()
+ mock_rasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
+ mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
+
+ return [mock_rasd1, mock_rasd2]
diff --git a/nova/tests/virt/hyperv/test_vmutilsv2.py b/nova/tests/virt/hyperv/test_vmutilsv2.py
index 969ff794f6..95bb681b83 100644
--- a/nova/tests/virt/hyperv/test_vmutilsv2.py
+++ b/nova/tests/virt/hyperv/test_vmutilsv2.py
@@ -39,6 +39,8 @@ class VMUtilsV2TestCase(test.NoDBTestCase):
_FAKE_RES_DATA = "fake_res_data"
_FAKE_RES_PATH = "fake_res_path"
_FAKE_DYNAMIC_MEMORY_RATIO = 1.0
+ _FAKE_VHD_PATH = "fake_vhd_path"
+ _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
def setUp(self):
self._vmutils = vmutilsv2.VMUtilsV2()
@@ -115,6 +117,32 @@ class VMUtilsV2TestCase(test.NoDBTestCase):
self.assertTrue(self._vmutils._add_virt_resource.called)
+ def test_get_vm_storage_paths(self):
+ mock_vm = self._lookup_vm()
+
+ mock_vmsettings = [mock.MagicMock()]
+ mock_vm.associators.return_value = mock_vmsettings
+ mock_sasds = []
+ mock_sasd1 = mock.MagicMock()
+ mock_sasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ mock_sasd1.HostResource = [self._FAKE_VHD_PATH]
+ mock_sasd2 = mock.MagicMock()
+ mock_sasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
+ mock_sasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
+ mock_sasds.append(mock_sasd1)
+ mock_sasds.append(mock_sasd2)
+ mock_vmsettings[0].associators.return_value = mock_sasds
+
+ storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
+ (disk_files, volume_drives) = storage
+
+ mock_vm.associators.assert_called_with(
+ wmi_result_class='Msvm_VirtualSystemSettingData')
+ mock_vmsettings[0].associators.assert_called_with(
+ wmi_result_class='Msvm_StorageAllocationSettingData')
+ self.assertEqual([self._FAKE_VHD_PATH], disk_files)
+ self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
+
def test_destroy(self):
self._lookup_vm()
@@ -214,25 +242,30 @@ class VMUtilsV2TestCase(test.NoDBTestCase):
mock_svc.RemoveResourceSettings.assert_called_with(
[self._FAKE_RES_PATH])
- def test_enable_vm_metrics_collection(self):
+ @mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
+ def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
self._lookup_vm()
mock_svc = self._vmutils._conn.Msvm_MetricService()[0]
metric_def = mock.MagicMock()
+ mock_disk = mock.MagicMock()
+ mock_disk.path_.return_value = self._FAKE_RES_PATH
+ mock_get_vm_disks.return_value = ([mock_disk], [mock_disk])
- fake_metric_def_paths = ["fake_0", "fake_1", "fake_2"]
- metric_def.path_.side_effect = fake_metric_def_paths
+ fake_metric_def_paths = ["fake_0", None]
+ fake_metric_resource_paths = [self._FAKE_VM_PATH, self._FAKE_RES_PATH]
+ metric_def.path_.side_effect = fake_metric_def_paths
self._vmutils._conn.CIM_BaseMetricDefinition.return_value = [
metric_def]
self._vmutils.enable_vm_metrics_collection(self._FAKE_VM_NAME)
calls = []
- for fake_metric_def_path in fake_metric_def_paths:
+ for i in range(len(fake_metric_def_paths)):
calls.append(mock.call(
- Subject=self._FAKE_VM_PATH,
- Definition=fake_metric_def_path,
+ Subject=fake_metric_resource_paths[i],
+ Definition=fake_metric_def_paths[i],
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
diff --git a/nova/tests/virt/libvirt/fake_imagebackend.py b/nova/tests/virt/libvirt/fake_imagebackend.py
index 23672dc415..96600d69ce 100644
--- a/nova/tests/virt/libvirt/fake_imagebackend.py
+++ b/nova/tests/virt/libvirt/fake_imagebackend.py
@@ -54,7 +54,7 @@ class Backend(object):
return FakeImage(instance, name)
- def snapshot(self, path, name, image_type=''):
+ def snapshot(self, path, image_type=''):
#NOTE(bfilippov): this is done in favor for
# snapshot tests in test_libvirt.LibvirtConnTestCase
- return imagebackend.Backend(True).snapshot(path, name, image_type)
+ return imagebackend.Backend(True).snapshot(path, image_type)
diff --git a/nova/tests/virt/libvirt/fake_libvirt_utils.py b/nova/tests/virt/libvirt/fake_libvirt_utils.py
index 479983760c..21ee7c47b9 100644
--- a/nova/tests/virt/libvirt/fake_libvirt_utils.py
+++ b/nova/tests/virt/libvirt/fake_libvirt_utils.py
@@ -138,15 +138,7 @@ def chown(path, owner):
pass
-def create_snapshot(disk_path, snapshot_name):
- pass
-
-
-def delete_snapshot(disk_path, snapshot_name):
- pass
-
-
-def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
+def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
files[out_path] = ''
@@ -211,8 +203,10 @@ def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
def list_rbd_volumes(pool):
- fake_volumes = ['fakeinstancename.local', 'fakeinstancename.swap',
- 'fakeinstancename', 'wronginstancename']
+ fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local',
+ '875a8070-d0b9-4949-8b31-104d125c9a64.swap',
+ '875a8070-d0b9-4949-8b31-104d125c9a64',
+ 'wrong875a8070-d0b9-4949-8b31-104d125c9a64']
return fake_volumes
diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/virt/libvirt/fakelibvirt.py
index ce129f0834..7ddabd4f76 100644
--- a/nova/tests/virt/libvirt/fakelibvirt.py
+++ b/nova/tests/virt/libvirt/fakelibvirt.py
@@ -636,6 +636,9 @@ class Connection(object):
def domainEventRegisterAny(self, dom, eventid, callback, opaque):
self._event_callbacks[eventid] = [callback, opaque]
+ def registerCloseCallback(self, cb, opaque):
+ pass
+
def getCapabilities(self):
"""Return spoofed capabilities."""
return '''<capabilities>
diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py
index 2455ec892f..3935fe404e 100644
--- a/nova/tests/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/virt/libvirt/test_imagebackend.py
@@ -16,22 +16,27 @@
# under the License.
import os
+import shutil
+import tempfile
import fixtures
+import mock
from oslo.config import cfg
+from inspect import getargspec
+
from nova import exception
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
+from nova import utils
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(object):
- INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
@@ -40,10 +45,13 @@ class _ImageTestCase(object):
def setUp(self):
super(_ImageTestCase, self).setUp()
+ self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
+ self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
+ self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
@@ -61,6 +69,78 @@ class _ImageTestCase(object):
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
+ def fake_chown(path, owner_uid=None):
+ return None
+ self.stubs.Set(utils, 'chown', fake_chown)
+
+ def tearDown(self):
+ super(_ImageTestCase, self).tearDown()
+ shutil.rmtree(self.INSTANCES_PATH)
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: True)
+
+ # Call twice to verify testing fallocate is only called once.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
+
+ def test_prealloc_image_without_write_access(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+ self.stubs.Set(image, '_can_fallocate', lambda: True)
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: False)
+
+ # Testing fallocate is only called when user has write access.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
+
+class RawTestCase(_ImageTestCase, test.NoDBTestCase):
+
+ SIZE = 1024
+
+ def setUp(self):
+ self.image_class = imagebackend.Raw
+ super(RawTestCase, self).setUp()
+ self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
+
+ def fake_chown(path, owner_uid=None):
+ return None
+ self.stubs.Set(utils, 'chown', fake_chown)
+
+ def prepare_mocks(self):
+ fn = self.mox.CreateMockAnything()
+ self.mox.StubOutWithMock(imagebackend.utils.synchronized,
+ '__call__')
+ self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
+ self.mox.StubOutWithMock(imagebackend.disk, 'extend')
+ return fn
+
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
@@ -128,66 +208,6 @@ class _ImageTestCase(object):
self.mox.VerifyAll()
- def test_prealloc_image(self):
- CONF.set_override('preallocate_images', 'space')
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
- image = self.image_class(self.INSTANCE, self.NAME)
-
- def fake_fetch(target, *args, **kwargs):
- return
-
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(os, 'access', lambda p, w: True)
-
- # Call twice to verify testing fallocate is only called once.
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(),
- ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
- 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
- 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
-
- def test_prealloc_image_without_write_access(self):
- CONF.set_override('preallocate_images', 'space')
-
- fake_processutils.fake_execute_clear_log()
- fake_processutils.stub_out_processutils_execute(self.stubs)
- image = self.image_class(self.INSTANCE, self.NAME)
-
- def fake_fetch(target, *args, **kwargs):
- return
-
- self.stubs.Set(image, 'check_image_exists', lambda: True)
- self.stubs.Set(image, '_can_fallocate', lambda: True)
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(os, 'access', lambda p, w: False)
-
- # Testing fallocate is only called when user has write access.
- image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
-
- self.assertEqual(fake_processutils.fake_execute_get_log(), [])
-
-
-class RawTestCase(_ImageTestCase, test.NoDBTestCase):
-
- SIZE = 1024
-
- def setUp(self):
- self.image_class = imagebackend.Raw
- super(RawTestCase, self).setUp()
- self.stubs.Set(imagebackend.Raw, 'correct_format', lambda _: None)
-
- def prepare_mocks(self):
- fn = self.mox.CreateMockAnything()
- self.mox.StubOutWithMock(imagebackend.utils.synchronized,
- '__call__')
- self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
- self.mox.StubOutWithMock(imagebackend.disk, 'extend')
- return fn
-
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, max_size=None, image_id=None)
@@ -222,16 +242,21 @@ class RawTestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.VerifyAll()
def test_correct_format(self):
- info = self.mox.CreateMockAnything()
self.stubs.UnsetAll()
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(imagebackend.images, 'qemu_img_info')
+ def fake_chown(path, owner_uid=None):
+ return None
+ self.stubs.Set(utils, 'chown', fake_chown)
+
os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
info = self.mox.CreateMockAnything()
info.file_format = 'foo'
imagebackend.images.qemu_img_info(self.PATH).AndReturn(info)
+ os.path.exists(CONF.instances_path).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
@@ -239,6 +264,11 @@ class RawTestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.VerifyAll()
+ def test_resolve_driver_format(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'raw')
+
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024 * 1024 * 1024
@@ -249,6 +279,10 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / (1024 * 1024 * 1024)))
+ def fake_chown(path, owner_uid=None):
+ return None
+ self.stubs.Set(utils, 'chown', fake_chown)
+
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.utils.synchronized,
@@ -259,6 +293,80 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_template_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, target=self.TEMPLATE_PATH)
@@ -277,6 +385,8 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
@@ -296,6 +406,8 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
@@ -314,6 +426,8 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(CONF.instances_path).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
@@ -340,6 +454,9 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
'get_disk_backing_file')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
+ os.path.exists(self.INSTANCES_PATH).AndReturn(True)
+
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(True)
@@ -353,6 +470,53 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.VerifyAll()
+ def test_resolve_driver_format(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ driver_format = image.resolve_driver_format()
+ self.assertEqual(driver_format, 'qcow2')
+
+ def test_prealloc_image(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: True)
+
+ # Call twice to verify testing fallocate is only called once.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
+ 'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
+
+ def test_prealloc_image_without_write_access(self):
+ CONF.set_override('preallocate_images', 'space')
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+ image = self.image_class(self.INSTANCE, self.NAME)
+
+ def fake_fetch(target, *args, **kwargs):
+ return
+
+ self.stubs.Set(image, 'check_image_exists', lambda: True)
+ self.stubs.Set(image, '_can_fallocate', lambda: True)
+ self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stubs.Set(os, 'access', lambda p, w: False)
+
+ # Testing fallocate is only called when user has write access.
+ image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
@@ -429,6 +593,58 @@ class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.VerifyAll()
+ def test_cache(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_image_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ image.cache(None, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ if self.OLD_STYLE_INSTANCE_PATH:
+ os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.PATH).AndReturn(False)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
def test_create_image(self):
self._create_image(False)
@@ -521,10 +737,12 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
self.rbd = self.mox.CreateMockAnything()
+ self.rados = self.mox.CreateMockAnything()
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend, 'rbd')
+ self.mox.StubOutWithMock(imagebackend, 'rados')
return fn
def test_cache(self):
@@ -594,12 +812,30 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
self.mox.VerifyAll()
+ def test_cache_base_dir_exists(self):
+ self.mox.StubOutWithMock(os.path, 'exists')
+ os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
+ os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
+ fn = self.mox.CreateMockAnything()
+ fn(target=self.TEMPLATE_PATH)
+ self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
+ self.mox.ReplayAll()
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mock_create_image(image)
+ image.cache(fn, self.TEMPLATE)
+
+ self.mox.VerifyAll()
+
def test_create_image(self):
fn = self.prepare_mocks()
fn(max_size=None, rbd=self.rbd, target=self.TEMPLATE_PATH)
self.rbd.RBD_FEATURE_LAYERING = 1
+ self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
+ imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.SIZE)
rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME)
cmd = ('--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
@@ -618,11 +854,15 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
self.mox.StubOutWithMock(imagebackend, 'rbd')
+ self.mox.StubOutWithMock(imagebackend, 'rados')
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
+ def fake_resize(rbd_name, size):
+ return
+
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
@@ -630,12 +870,32 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
+ def test_parent_compatible(self):
+ self.assertEqual(getargspec(imagebackend.Image.libvirt_info),
+ getargspec(self.image_class.libvirt_info))
+
+ def test_resize(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ with mock.patch.object(imagebackend, "RBDVolumeProxy") as mock_proxy:
+ volume_mock = mock.Mock()
+ mock_proxy.side_effect = [mock_proxy]
+ mock_proxy.__enter__.side_effect = [volume_mock]
+ image._resize(image.rbd_name, self.SIZE)
+ volume_mock.resize.assert_called_once_with(self.SIZE)
+
class BackendTestCase(test.NoDBTestCase):
INSTANCE = {'name': 'fake-instance',
'uuid': uuidutils.generate_uuid()}
NAME = 'fake-name.suffix'
+ def setUp(self):
+ super(BackendTestCase, self).setUp()
+
+ def fake_chown(path, owner_uid=None):
+ return None
+ self.stubs.Set(utils, 'chown', fake_chown)
+
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
diff --git a/nova/tests/virt/libvirt/test_libvirt.py b/nova/tests/virt/libvirt/test_libvirt.py
index ba842ad68a..0957bbd482 100644
--- a/nova/tests/virt/libvirt/test_libvirt.py
+++ b/nova/tests/virt/libvirt/test_libvirt.py
@@ -136,6 +136,14 @@ _fake_NodeDevXml = \
</device>"""}
+def mocked_bdm(id, bdm_info):
+ bdm_mock = mock.MagicMock()
+ bdm_mock.__getitem__ = lambda s, k: bdm_info[k]
+ bdm_mock.get = lambda *k, **kw: bdm_info.get(*k, **kw)
+ bdm_mock.id = id
+ return bdm_mock
+
+
def _concurrency(signal, wait, done, target):
signal.send()
wait.wait()
@@ -339,6 +347,16 @@ class FakeVolumeDriver(object):
def get_xml(self, *args):
return ""
+ def connect_volume(self, *args):
+ """Connect the volume to a fake device."""
+ conf = vconfig.LibvirtConfigGuestDisk()
+ conf.source_type = "network"
+ conf.source_protocol = "fake"
+ conf.source_name = "fake"
+ conf.target_dev = "fake"
+ conf.target_bus = "fake"
+ return conf
+
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
@@ -390,6 +408,9 @@ class LibvirtConnTestCase(test.TestCase):
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
+
class FakeConn():
def getCapabilities(self):
"""Ensure standard capabilities being returned."""
@@ -403,6 +424,9 @@ class LibvirtConnTestCase(test.TestCase):
def getLibVersion(self):
return (0 * 1000 * 1000) + (9 * 1000) + 11
+ def domainEventRegisterAny(self, *args, **kwargs):
+ pass
+
def registerCloseCallback(self, cb, opaque):
pass
@@ -649,8 +673,15 @@ class LibvirtConnTestCase(test.TestCase):
self.assertThat(expected, matchers.DictMatches(result))
def test_close_callback(self):
- def get_lib_version_stub():
- return (1 * 1000 * 1000) + (0 * 1000) + 1
+ class FakeConn(object):
+ def getLibVersion(self):
+ return (1 * 1000 * 1000) + (0 * 1000) + 1
+
+ def domainEventRegisterAny(self, *args, **kwargs):
+ pass
+
+ def registerCloseCallback(self, cb, opaque):
+ pass
self.close_callback = None
@@ -658,26 +689,71 @@ class LibvirtConnTestCase(test.TestCase):
self.close_callback = cb
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.stubs.Set(self.conn, "getLibVersion", get_lib_version_stub)
+ conn._wrapped_conn = FakeConn()
+
self.mox.StubOutWithMock(conn, '_connect')
- self.mox.StubOutWithMock(self.conn, 'registerCloseCallback')
-
- conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(self.conn)
- self.conn.registerCloseCallback(
- mox.IgnoreArg(), mox.IgnoreArg()).WithSideEffects(
- set_close_callback)
- conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(self.conn)
- self.conn.registerCloseCallback(mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.StubOutWithMock(conn._conn, 'registerCloseCallback')
+
+ conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn._conn)
+ conn._conn.registerCloseCallback(
+ mox.IgnoreArg(), mox.IgnoreArg()
+ ).WithSideEffects(set_close_callback)
+ conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn._conn)
+ conn._conn.registerCloseCallback(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
- conn._get_connection()
+ conn._get_new_connection()
self.assertTrue(self.close_callback)
- self.close_callback(self.conn, 1, None)
+ self.close_callback(conn._conn, 1, None)
conn._get_connection()
+ self.mox.UnsetStubs()
+
+ def test_close_callback_bad_signature(self):
+ class FakeConn(object):
+ def getLibVersion(self):
+ return (1 * 1000 * 1000) + (0 * 1000) + 0
+
+ def domainEventRegisterAny(self, *args, **kwargs):
+ pass
+
+ def registerCloseCallback(self, cb, opaque, *args, **kwargs):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._wrapped_conn = FakeConn()
+
+ self.mox.StubOutWithMock(conn, '_connect')
+ self.mox.StubOutWithMock(conn._conn, 'registerCloseCallback')
+
+ conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn._conn)
+ conn._conn.registerCloseCallback(
+ mox.IgnoreArg(), mox.IgnoreArg()).AndRaise(TypeError)
+
+ self.mox.ReplayAll()
+ conn._get_new_connection()
+
+ def test_close_callback_not_defined(self):
+ class FakeConn():
+ def getLibVersion(self):
+ return (0 * 1000 * 1000) + (9 * 1000) + 0
+
+ def domainEventRegisterAny(self, *args, **kwargs):
+ pass
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn._wrapped_conn = FakeConn()
+
+ self.mox.StubOutWithMock(conn, '_connect')
+
+ conn._connect(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
+ conn._wrapped_conn)
+
+ self.mox.ReplayAll()
+ conn._get_new_connection()
def test_get_guest_config(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -828,8 +904,11 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
- {'connection_info': conn_info, 'mount_device': '/dev/vdc'},
- {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
+ mocked_bdm(1, {'connection_info': conn_info,
+ 'mount_device': '/dev/vdc'}),
+ mocked_bdm(1, {'connection_info': conn_info,
+ 'mount_device': '/dev/vdd'}),
+ ]}
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance_ref, info)
@@ -3515,8 +3594,8 @@ class LibvirtConnTestCase(test.TestCase):
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
- {'mount_device': 'vda',
- 'boot_index': 0}
+ mocked_bdm(1, {'mount_device': 'vda',
+ 'boot_index': 0}),
]
}
@@ -3537,6 +3616,7 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
+ block_device_info['block_device_mapping'][0].id = 2
instance = db.instance_create(self.context, instance_ref)
conn.spawn(self.context, instance, None, [], None,
@@ -4277,14 +4357,82 @@ class LibvirtConnTestCase(test.TestCase):
conn._create_images_and_backing(self.context, instance,
libvirt_utils.get_instance_path(instance),
disk_info_json)
- conn._create_domain_and_network(dummyxml, instance,
+ conn._create_domain_and_network(self.context, dummyxml, instance,
network_info, block_device_info,
- context=self.context, reboot=True)
+ reboot=True)
self.mox.ReplayAll()
conn._hard_reboot(self.context, instance, network_info,
block_device_info)
+ def test_power_on(self):
+
+ def _check_xml_bus(name, xml, block_info):
+ tree = etree.fromstring(xml)
+ got_disks = tree.findall('./devices/disk')
+ got_disk_targets = tree.findall('./devices/disk/target')
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+ want_device_bus = image_meta.get('hw_disk_bus')
+ if not want_device_bus:
+ want_device_bus = self.fake_img['properties']['hw_disk_bus']
+ got_device_bus = got_disk_targets[0].get('bus')
+ self.assertEqual(got_device_bus, want_device_bus)
+
+ def fake_get_info(instance_name):
+ called['count'] += 1
+ if called['count'] == 1:
+ state = power_state.SHUTDOWN
+ else:
+ state = power_state.RUNNING
+ return dict(state=state)
+
+ def _get_inst(with_meta=True):
+ inst_ref = self.test_instance
+ inst_ref['uuid'] = uuidutils.generate_uuid()
+ if with_meta:
+ inst_ref['system_metadata']['image_hw_disk_bus'] = 'ide'
+ instance = db.instance_create(self.context, inst_ref)
+ instance['image_ref'] = '70a599e0-31e7-49b7-b260-868f221a761e'
+ return instance
+
+ called = {'count': 0}
+ self.fake_img = {'id': '70a599e0-31e7-49b7-b260-868f221a761e',
+ 'name': 'myfakeimage',
+ 'created_at': '',
+ 'updated_at': '',
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'properties': {'hw_disk_bus': 'ide'}}
+
+ instance = _get_inst()
+ network_info = _fake_network_info(self.stubs, 1)
+ block_device_info = None
+ image_service_mock = mock.Mock()
+ image_service_mock.show.return_value = self.fake_img
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ with contextlib.nested(
+ mock.patch.object(conn, '_destroy', return_value=None),
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_create_domain_and_network'),
+ mock.patch('nova.image.glance.get_remote_image_service',
+ return_value=(image_service_mock,
+ instance['image_ref']))):
+ conn.get_info = fake_get_info
+ conn.get_instance_disk_info = _check_xml_bus
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ instance = _get_inst(with_meta=False)
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
@@ -4313,10 +4461,10 @@ class LibvirtConnTestCase(test.TestCase):
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
- _create_domain_and_network.assert_has_calls([mock.call(dummyxml,
- instance, network_info,
- block_device_info=block_device_info,
- context=self.context)])
+ _create_domain_and_network.assert_has_calls([mock.call(
+ self.context, dummyxml,
+ instance, network_info,
+ block_device_info=block_device_info)])
_attach_pci_devices.assert_has_calls([mock.call('fake_dom',
'fake_pci_devs')])
@@ -4356,8 +4504,10 @@ class LibvirtConnTestCase(test.TestCase):
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
- fake_volumes = ['fakeinstancename.local', 'fakeinstancename.swap',
- 'fakeinstancename', 'wronginstancename']
+ fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local',
+ '875a8070-d0b9-4949-8b31-104d125c9a64.swap',
+ '875a8070-d0b9-4949-8b31-104d125c9a64',
+ 'wrong875a8070-d0b9-4949-8b31-104d125c9a64']
fake_pool = 'fake_pool'
fake_instance = {'name': 'fakeinstancename', 'id': 'instanceid',
'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
@@ -5094,9 +5244,32 @@ class LibvirtConnTestCase(test.TestCase):
conn.lookupByID(2).AndReturn(DiagFakeDomain(5))
self.mox.ReplayAll()
-
self.assertEqual(5, driver.get_vcpu_used())
+ def test_failing_vcpu_count_none(self):
+ """Domain will return zero if the current number of vcpus used
+ is None. This is in case of VM state starting up or shutting
+ down. None type returned is counted as zero.
+ """
+
+ class DiagFakeDomain(object):
+ def __init__(self):
+ pass
+
+ def vcpus(self):
+ return None
+
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ conn = driver._conn
+ self.mox.StubOutWithMock(driver, 'list_instance_ids')
+ conn.lookupByID = self.mox.CreateMockAnything()
+
+ driver.list_instance_ids().AndReturn([1])
+ conn.lookupByID(1).AndReturn(DiagFakeDomain())
+
+ self.mox.ReplayAll()
+ self.assertEqual(0, driver.get_vcpu_used())
+
def test_get_instance_capabilities(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -5549,6 +5722,169 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEqual('foo', conn.get_hypervisor_hostname())
self.assertEqual('foo', conn.get_hypervisor_hostname())
+ def test_get_connection_serial(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call serially
+ get_conn_currency(driver)
+ get_conn_currency(driver)
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_get_connection_concurrency(self):
+
+ def get_conn_currency(driver):
+ driver._conn.getLibVersion()
+
+ def connect_with_block(*a, **k):
+ # enough to allow another connect to run
+ eventlet.sleep(0)
+ self.connect_calls += 1
+ return self.conn
+
+ def fake_register(*a, **k):
+ self.register_calls += 1
+
+ self.connect_calls = 0
+ self.register_calls = 0
+ self.stubs.Set(libvirt_driver.LibvirtDriver,
+ '_connect', connect_with_block)
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.stubs.Set(self.conn, 'domainEventRegisterAny', fake_register)
+
+ # call concurrently
+ thr1 = eventlet.spawn(get_conn_currency, driver=driver)
+ thr2 = eventlet.spawn(get_conn_currency, driver=driver)
+
+ # let threads run
+ eventlet.sleep(0)
+
+ thr1.wait()
+ thr2.wait()
+ self.assertEqual(self.connect_calls, 1)
+ self.assertEqual(self.register_calls, 1)
+
+ def test_post_live_migration_at_destination_with_block_device_info(self):
+ # Preparing mocks
+ dummyxml = ("<domain type='kvm'><name>instance-00000001</name>"
+ "<devices>"
+ "<graphics type='vnc' port='5900'/>"
+ "</devices></domain>")
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(mock_domain, "XMLDesc")
+ mock_domain.XMLDesc(0).AndReturn(dummyxml)
+ self.resultXML = None
+
+ def fake_none(*args, **kwargs):
+ return
+
+ def fake_getLibVersion():
+ return 9007
+
+ def fake_getCapabilities():
+ return """
+ <capabilities>
+ <host>
+ <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
+ <cpu>
+ <arch>x86_64</arch>
+ <model>Penryn</model>
+ <vendor>Intel</vendor>
+ <topology sockets='1' cores='2' threads='1'/>
+ <feature name='xtpr'/>
+ </cpu>
+ </host>
+ </capabilities>
+ """
+
+ def fake_to_xml(context, instance, network_info, disk_info,
+ image_meta=None, rescue=None,
+ block_device_info=None, write_to_disk=False):
+ conf = conn.get_guest_config(instance, network_info, image_meta,
+ disk_info, rescue, block_device_info)
+ self.resultXML = conf.to_xml()
+ return self.resultXML
+
+ def fake_lookup_name(instance_name):
+ return mock_domain
+
+ def fake_defineXML(xml):
+ return
+
+ def fake_baselineCPU(cpu, flag):
+ return """<cpu mode='custom' match='exact'>
+ <model fallback='allow'>Westmere</model>
+ <vendor>Intel</vendor>
+ <feature policy='require' name='aes'/>
+ </cpu>
+ """
+
+ network_info = _fake_network_info(self.stubs, 1)
+ self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
+ getCapabilities=fake_getCapabilities,
+ getVersion=lambda: 1005001)
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
+ instance_type = db.flavor_get(self.context,
+ instance_ref['instance_type_id'])
+ sys_meta = flavors.save_flavor_info({}, instance_type)
+ instance_ref['system_metadata'] = sys_meta
+ instance = db.instance_create(self.context, instance_ref)
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.listDefinedDomains = lambda: []
+ libvirt_driver.LibvirtDriver._conn.getCapabilities = \
+ fake_getCapabilities
+ libvirt_driver.LibvirtDriver._conn.getVersion = lambda: 1005001
+ libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup_name
+ libvirt_driver.LibvirtDriver._conn.defineXML = fake_defineXML
+ libvirt_driver.LibvirtDriver._conn.baselineCPU = fake_baselineCPU
+
+ self.mox.ReplayAll()
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.stubs.Set(conn,
+ 'to_xml',
+ fake_to_xml)
+ self.stubs.Set(conn,
+ '_lookup_by_name',
+ fake_lookup_name)
+ bdm = {'guest_format': None,
+ 'boot_index': 0,
+ 'mount_device': '/dev/vda',
+ 'connection_info':
+ {'driver_volume_type': 'iscsi'},
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'delete_on_termination': False,
+ }
+ block_device_info = {
+ 'block_device_mapping': [mocked_bdm(1, bdm)]
+ }
+
+ conn.post_live_migration_at_destination(self.context, instance,
+ network_info, True,
+ block_device_info=block_device_info)
+ self.assertTrue('fake' in self.resultXML)
+
class HostStateTestCase(test.TestCase):
@@ -5920,21 +6256,46 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.StubOutWithMock(self.fw,
'add_filters_for_instance',
use_mock_anything=True)
+ self.mox.StubOutWithMock(self.fw.iptables.ipv4['filter'],
+ 'has_chain')
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
- mox.IgnoreArg())
+ mox.IgnoreArg(), mox.IgnoreArg())
self.fw.instance_rules(instance_ref,
mox.IgnoreArg()).AndReturn((None, None))
+ self.fw.iptables.ipv4['filter'].has_chain(mox.IgnoreArg()
+ ).AndReturn(True)
self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg(),
- mox.IgnoreArg())
+ mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg())
- self.fw.instances[instance_ref['id']] = instance_ref
+ self.fw.instance_info[instance_ref['id']] = (instance_ref, None)
self.fw.do_refresh_security_group_rules("fake")
+ def test_do_refresh_security_group_rules_instance_disappeared(self):
+ instance1 = {'id': 0, 'uuid': 'fake-uuid1'}
+ instance2 = {'id': 1, 'uuid': 'fake-uuid2'}
+ network_infos = _fake_network_info(self.stubs, 2)
+ self.fw.instance_info[instance1['id']] = (instance1, network_infos[0])
+ self.fw.instance_info[instance2['id']] = (instance2, network_infos[1])
+ mock_filter = mock.MagicMock()
+ with mock.patch.dict(self.fw.iptables.ipv4, {'filter': mock_filter}):
+ mock_filter.has_chain.return_value = False
+ with mock.patch.object(self.fw, 'instance_rules') as mock_ir:
+ mock_ir.return_value = (None, None)
+ self.fw.do_refresh_security_group_rules('secgroup')
+ self.assertEqual(2, mock_ir.call_count)
+ # NOTE(danms): Make sure that it is checking has_chain each time,
+ # continuing to process all the instances, and never adding the
+ # new chains back if has_chain() is False
+ mock_filter.has_chain.assert_has_calls([mock.call('inst-0'),
+ mock.call('inst-1')],
+ any_order=True)
+ self.assertEqual(0, mock_filter.add_chain.call_count)
+
def test_unfilter_instance_undefines_nwfilter(self):
admin_ctxt = context.get_admin_context()
@@ -6395,12 +6756,12 @@ disk size: 4.4M''', ''))
def _do_test_extract_snapshot(self, dest_format='raw', out_format='raw'):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
- '-s', 'snap1', '/path/to/disk/image', '/extracted/snap')
+ '/path/to/disk/image', '/extracted/snap')
# Start test
self.mox.ReplayAll()
libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
- 'snap1', '/extracted/snap', dest_format)
+ '/extracted/snap', dest_format)
def test_extract_snapshot_raw(self):
self._do_test_extract_snapshot()
@@ -6890,8 +7251,9 @@ class LibvirtDriverTestCase(test.TestCase):
f = open(libvirt_xml_path, 'w')
f.close()
- self.libvirtconnection.finish_revert_migration(ins_ref, None,
- None, power_on)
+ self.libvirtconnection.finish_revert_migration(
+ context.get_admin_context(), ins_ref,
+ None, None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
@@ -6908,6 +7270,7 @@ class LibvirtDriverTestCase(test.TestCase):
def wait(self):
return None
+ context = 'fake_context'
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
@@ -6933,7 +7296,7 @@ class LibvirtDriverTestCase(test.TestCase):
self.mox.ReplayAll()
- self.libvirtconnection.finish_revert_migration({}, [])
+ self.libvirtconnection.finish_revert_migration(context, {}, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
@@ -6971,11 +7334,14 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_os_path_exists(path):
return True
- def fake_shutil_rmtree(target):
- pass
-
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
- self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
+
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
@@ -6988,9 +7354,6 @@ class LibvirtDriverTestCase(test.TestCase):
def fake_os_path_exists(path):
return True
- def fake_shutil_rmtree(target):
- pass
-
def fake_undefine_domain(instance):
pass
@@ -7001,7 +7364,6 @@ class LibvirtDriverTestCase(test.TestCase):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
- self.stubs.Set(shutil, 'rmtree', fake_shutil_rmtree)
self.stubs.Set(self.libvirtconnection, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.libvirtconnection, 'unplug_vifs',
@@ -7009,6 +7371,13 @@ class LibvirtDriverTestCase(test.TestCase):
self.stubs.Set(self.libvirtconnection.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
+ self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
+ self.mox.StubOutWithMock(utils, 'execute')
+
+ libvirt_utils.get_instance_path(ins_ref).AndReturn('/fake/inst')
+ utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
+ attempts=5)
+
self.mox.ReplayAll()
self.libvirtconnection._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
@@ -7194,6 +7563,33 @@ class LibvirtNonblockingTestCase(test.TestCase):
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
+ def test_tpool_execute_calls_libvirt(self):
+ conn = libvirt.virConnect()
+ conn.is_expected = True
+
+ self.mox.StubOutWithMock(eventlet.tpool, 'execute')
+ eventlet.tpool.execute(
+ libvirt.openAuth,
+ 'test:///default',
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(conn)
+ eventlet.tpool.execute(
+ conn.domainEventRegisterAny,
+ None,
+ libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ if hasattr(libvirt.virConnect, 'registerCloseCallback'):
+ eventlet.tpool.execute(
+ conn.registerCloseCallback,
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ c = driver._get_connection()
+ self.assertEqual(True, c.is_expected)
+
class LibvirtVolumeSnapshotTestCase(test.TestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
diff --git a/nova/tests/virt/libvirt/test_libvirt_volume.py b/nova/tests/virt/libvirt/test_libvirt_volume.py
index a3f3ad1232..e3074eed97 100644
--- a/nova/tests/virt/libvirt/test_libvirt_volume.py
+++ b/nova/tests/virt/libvirt/test_libvirt_volume.py
@@ -441,6 +441,9 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
connection_info['data']['device_path'] = mpdev_filepath
target_portals = ['fake_portal1', 'fake_portal2']
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[self.location, self.iqn]])
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
@@ -462,6 +465,9 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
mpdev_filepath = '/dev/mapper/foo'
target_portals = ['fake_portal1', 'fake_portal2']
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
conf = libvirt_driver.connect_volume(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
@@ -487,6 +493,9 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
}
target_portals = ['fake_portal1', 'fake_portal2']
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [[location, iqn]])
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
@@ -517,6 +526,9 @@ class LibvirtVolumeTestCase(test.NoDBTestCase):
}
target_portals = ['fake_portal1', 'fake_portal2']
libvirt_driver._get_multipath_device_name = lambda x: mpdev_filepath
+ self.stubs.Set(libvirt_driver,
+ '_get_target_portals_from_iscsiadm_output',
+ lambda x: [['fake_portal1', 'fake_iqn1']])
conf = libvirt_driver.connect_volume(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.find('./source').get('dev'), mpdev_filepath)
diff --git a/nova/tests/virt/powervm/test_powervm.py b/nova/tests/virt/powervm/test_powervm.py
index f2e7f80460..16a5595850 100644
--- a/nova/tests/virt/powervm/test_powervm.py
+++ b/nova/tests/virt/powervm/test_powervm.py
@@ -439,9 +439,11 @@ class PowerVMDriverTestCase(test.TestCase):
self.mox.ReplayAll()
- self.powervm_connection.finish_revert_migration(inst, network_info,
- block_device_info=None,
- power_on=power_on)
+ self.powervm_connection.finish_revert_migration(
+ context.get_admin_context(),
+ inst, network_info,
+ block_device_info=None,
+ power_on=power_on)
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True, True)
@@ -819,10 +821,16 @@ class PowerVMDriverTestCase(test.TestCase):
aggregate={'name': 'foo'}, host='fake')
def test_plug_vifs(self):
- # Check to make sure the method passes (does nothing) since
- # it simply passes in the powervm driver but it raises a
- # NotImplementedError in the base driver class.
- self.powervm_connection.plug_vifs(self.instance, None)
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self.powervm_connection.plug_vifs,
+ instance=self.instance, network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self.assertRaises(NotImplementedError,
+ self.powervm_connection.unplug_vifs,
+ instance=self.instance, network_info=None)
def test_manage_image_cache(self):
# Check to make sure the method passes (does nothing) since
diff --git a/nova/tests/virt/test_virt.py b/nova/tests/virt/test_virt.py
index 5048fc7562..6fa1a63cc6 100644
--- a/nova/tests/virt/test_virt.py
+++ b/nova/tests/virt/test_virt.py
@@ -125,12 +125,14 @@ class TestVirtDisk(test.NoDBTestCase):
disk_api.teardown_container('/mnt/nbd/nopart')
expected_commands += [
+ ('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.teardown_container('/mnt/nbd/part')
expected_commands += [
+ ('blockdev', '--flushbufs', '/dev/nbd15'),
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py
index 832d9d6f25..f6775fa8cc 100644
--- a/nova/tests/virt/test_virt_drivers.py
+++ b/nova/tests/virt/test_virt_drivers.py
@@ -28,8 +28,10 @@ from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
from nova.tests.virt.libvirt import fake_libvirt_utils
+from nova.tests.virt.libvirt import test_libvirt
from nova.virt import event as virtevent
from nova.virt import fake
+from nova.virt.libvirt import imagebackend
LOG = logging.getLogger(__name__)
@@ -201,6 +203,12 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
fake.FakeVirtAPI())
self.ctxt = test_utils.get_test_admin_context()
self.image_service = fake_image.FakeImageService()
+ # NOTE(dripton): resolve_driver_format does some file reading and
+ # writing and chowning that complicate testing too much by requiring
+ # using real directories with proper permissions. Just stub it out
+ # here; we test it in test_imagebackend.py
+ self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
+ imagebackend.Image._get_driver_format)
def _get_running_instance(self):
instance_ref = test_utils.get_test_instance()
@@ -451,11 +459,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.attach_volume(None, connection_info, instance_ref,
'/dev/sda')
- bdm = {
- 'root_device_name': None,
- 'swap': None,
- 'ephemerals': [],
- 'block_device_mapping': [{
+ bdm_data = {
'instance_uuid': instance_ref['uuid'],
'connection_info': {'driver_volume_type': 'fake'},
'mount_device': '/dev/sda',
@@ -464,8 +468,15 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
- 'no_device': None
- }]
+ 'no_device': None,
+ }
+ bdm = {
+ 'block_device_mapping': [
+ test_libvirt.mocked_bdm(1, bdm_data),
+ ],
+ 'root_device_name': None,
+ 'swap': None,
+ 'ephemerals': [],
}
self.connection.power_on(self.ctxt, instance_ref, network_info, bdm)
self.connection.detach_volume(connection_info,
diff --git a/nova/tests/virt/vmwareapi/stubs.py b/nova/tests/virt/vmwareapi/stubs.py
index b6ea4aa68b..d5b2a3d8c5 100644
--- a/nova/tests/virt/vmwareapi/stubs.py
+++ b/nova/tests/virt/vmwareapi/stubs.py
@@ -23,7 +23,6 @@ from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import network_util
-from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import vmware_images
@@ -50,7 +49,6 @@ def fake_temp_session_exception():
def set_stubs(stubs):
"""Set the stubs."""
- stubs.Set(vmops.VMwareVMOps, 'plug_vifs', fake.fake_plug_vifs)
stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
diff --git a/nova/tests/virt/vmwareapi/test_vmwareapi.py b/nova/tests/virt/vmwareapi/test_vmwareapi.py
index 7058dd9170..e8e0afefd3 100755
--- a/nova/tests/virt/vmwareapi/test_vmwareapi.py
+++ b/nova/tests/virt/vmwareapi/test_vmwareapi.py
@@ -21,15 +21,20 @@
Test suite for VMwareAPI.
"""
+import collections
import contextlib
+import copy
+
import mock
import mox
from oslo.config import cfg
+import suds
from nova import block_device
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
+from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
@@ -138,6 +143,46 @@ class VMwareSessionTestCase(test.NoDBTestCase):
*args, **kwargs)
+class fake_vm_ref(object):
+ def __init__(self):
+ self.value = 4
+ self._type = 'VirtualMachine'
+
+
+class fake_service_content(object):
+ def __init__(self):
+ self.ServiceContent = vmwareapi_fake.DataObject()
+ self.ServiceContent.fake = 'fake'
+
+
+class VMwareSudsTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(VMwareSudsTest, self).setUp()
+
+ def new_client_init(self, url, **kwargs):
+ return
+
+ mock.patch.object(suds.client.Client,
+ '__init__', new=new_client_init).start()
+ self.vim = self._vim_create()
+ self.addCleanup(mock.patch.stopall)
+
+ def _vim_create(self):
+
+ def fake_retrieve_service_content(fake):
+ return fake_service_content()
+
+ self.stubs.Set(vim.Vim, 'retrieve_service_content',
+ fake_retrieve_service_content)
+ return vim.Vim()
+
+ def test_exception_with_deepcopy(self):
+ self.assertIsNotNone(self.vim)
+ self.assertRaises(error_util.VimException,
+ copy.deepcopy, self.vim)
+
+
class VMwareAPIConfTestCase(test.NoDBTestCase):
"""Unit tests for VMWare API configurations."""
def setUp(self):
@@ -241,6 +286,32 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.conn = driver.VMwareAPISession()
self.assertEqual(self.attempts, 2)
+ def test_wait_for_task_exception(self):
+ self.flags(task_poll_interval=1, group='vmware')
+ self.login_session = vmwareapi_fake.FakeVim()._login()
+ self.stop_called = 0
+
+ def _fake_login(_self):
+ return self.login_session
+
+ self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
+
+ def fake_poll_task(instance_uuid, task_ref, done):
+ done.send_exception(exception.NovaException('fake exception'))
+
+ def fake_stop_loop(loop):
+ self.stop_called += 1
+ return loop.stop()
+
+ self.conn = driver.VMwareAPISession()
+ self.stubs.Set(self.conn, "_poll_task",
+ fake_poll_task)
+ self.stubs.Set(self.conn, "_stop_loop",
+ fake_stop_loop)
+ self.assertRaises(exception.NovaException,
+ self.conn._wait_for_task, 'fake-id', 'fake-ref')
+ self.assertEqual(self.stop_called, 1)
+
def _create_instance_in_the_db(self, node=None, set_image_ref=True,
uuid=None):
if not node:
@@ -723,6 +794,31 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
+ def destroy_rescued(self, fake_method):
+ self._rescue()
+ with (
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ fake_method)
+ ):
+ self.instance['vm_state'] = vm_states.RESCUED
+ self.conn.destroy(self.instance, self.network_info)
+ inst_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(inst_path))
+ rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds,
+ self.uuid,
+ self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(rescue_file_path))
+
+ def test_destroy_rescued(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ pass
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
+ def test_destroy_rescued_with_exception(self):
+ def fake_detach_disk_from_vm(*args, **kwargs):
+ raise exception.NovaException('Here is my fake exception')
+ self.destroy_rescued(fake_detach_disk_from_vm)
+
def test_destroy(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
@@ -739,10 +835,19 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.assertEquals(self.conn.destroy(self.instance, self.network_info),
None)
- def _rescue(self):
+ def _rescue(self, config_drive=False):
def fake_attach_disk_to_vm(*args, **kwargs):
pass
+ if config_drive:
+ def fake_create_config_drive(instance, injected_files, password,
+ data_store_name, dc_name,
+ instance_uuid, cookies):
+ self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
+
+ self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ fake_create_config_drive)
+
self._create_vm()
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
'node': self.instance_node})
@@ -750,7 +855,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
fake_attach_disk_to_vm)
self.conn.rescue(self.context, self.instance, self.network_info,
self.image, 'fake-password')
- info = self.conn.get_info({'name-rescue': 1,
+ info = self.conn.get_info({'name': '1-rescue',
'uuid': '%s-rescue' % self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
@@ -761,9 +866,37 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def test_rescue(self):
self._rescue()
+ def test_rescue_with_config_drive(self):
+ self.flags(force_config_drive=True)
+ self._rescue(config_drive=True)
+
def test_unrescue(self):
self._rescue()
- self.conn.unrescue(self.instance, None)
+ self.test_vm_ref = None
+ self.test_device_name = None
+
+ def fake_power_off_vm_ref(vm_ref):
+ self.test_vm_ref = vm_ref
+ self.assertIsNotNone(vm_ref)
+
+ def fake_detach_disk_from_vm(vm_ref, instance,
+ device_name, destroy_disk=False):
+ self.test_device_name = device_name
+ info = self.conn.get_info(instance)
+ self._check_vm_info(info, power_state.SHUTDOWN)
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._vmops, "_power_off_vm_ref",
+ side_effect=fake_power_off_vm_ref),
+ mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
+ side_effect=fake_detach_disk_from_vm),
+ ) as (poweroff, detach):
+ self.conn.unrescue(self.instance, None)
+ poweroff.assert_called_once_with(self.test_vm_ref)
+ detach.assert_called_once_with(self.test_vm_ref, mock.ANY,
+ self.test_device_name)
+ self.test_vm_ref = None
+ self.test_device_name = None
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
@@ -875,7 +1008,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
fake_wait_for_task)
# perform the revert on our stubbed methods
- self.conn.finish_revert_migration(instance=self.instance,
+ self.conn.finish_revert_migration(self.context,
+ instance=self.instance,
network_info=None,
power_on=power_on)
@@ -908,11 +1042,12 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def _test_get_vnc_console(self):
self._create_vm()
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
- fake_vm_id = int(fake_vm.obj.value.replace('vm-', ''))
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ opt_val = OptionValue(key='', value=5906)
+ fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
vnc_dict = self.conn.get_vnc_console(self.instance)
- self.assertEquals(vnc_dict['host'], self.vnc_host)
- self.assertEquals(vnc_dict['port'], cfg.CONF.vmware.vnc_port +
- fake_vm_id % cfg.CONF.vmware.vnc_port_total)
+ self.assertEqual(vnc_dict['host'], self.vnc_host)
+ self.assertEqual(vnc_dict['port'], 5906)
def test_get_vnc_console(self):
self._test_get_vnc_console()
@@ -921,6 +1056,13 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.flags(vnc_password='vmware', group='vmware')
self._test_get_vnc_console()
+ def test_get_vnc_console_noport(self):
+ self._create_vm()
+ fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0]
+ self.assertRaises(exception.ConsoleTypeUnavailable,
+ self.conn.get_vnc_console,
+ self.instance)
+
def test_host_ip_addr(self):
self.assertEquals(self.conn.get_host_ip_addr(), "test_url")
@@ -1105,6 +1247,11 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.assertEqual(connector['initiator'], 'iscsi-name')
self.assertNotIn('instance', connector)
+ def test_refresh_instance_security_rules(self):
+ self.assertRaises(NotImplementedError,
+ self.conn.refresh_instance_security_rules,
+ instance=None)
+
class VMwareAPIHostTestCase(test.NoDBTestCase):
"""Unit tests for Vmware API host calls."""
@@ -1130,6 +1277,7 @@ class VMwareAPIHostTestCase(test.NoDBTestCase):
self.assertEquals(stats['disk_used'], 1024 - 500)
self.assertEquals(stats['host_memory_total'], 1024)
self.assertEquals(stats['host_memory_free'], 1024 - 500)
+ self.assertEquals(stats['hypervisor_version'], 5000000)
supported_instances = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self.assertEquals(stats['supported_instances'], supported_instances)
@@ -1199,7 +1347,7 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
self.assertEquals(stats['memory_mb'], 1000)
self.assertEquals(stats['memory_mb_used'], 500)
self.assertEquals(stats['hypervisor_type'], 'VMware vCenter Server')
- self.assertEquals(stats['hypervisor_version'], '5.1.0')
+ self.assertEquals(stats['hypervisor_version'], 5001000)
self.assertEquals(stats['hypervisor_hostname'], self.node_name)
self.assertEquals(stats['cpu_info'], jsonutils.dumps(cpu_info))
self.assertEquals(stats['supported_instances'],
@@ -1362,3 +1510,17 @@ class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase):
self._create_vm()
# currently there are 2 data stores
self.assertEqual(2, len(vmops._datastore_dc_mapping))
+
+ def test_plug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance_in_the_db()
+ self.assertRaises(NotImplementedError,
+ self.conn.plug_vifs,
+ instance=self.instance, network_info=None)
+
+ def test_unplug_vifs(self):
+ # Check to make sure the method raises NotImplementedError.
+ self._create_instance_in_the_db()
+ self.assertRaises(NotImplementedError,
+ self.conn.unplug_vifs,
+ instance=self.instance, network_info=None)
diff --git a/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py b/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py
index 5ed7b5104a..fa0cccd14a 100755
--- a/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py
+++ b/nova/tests/virt/vmwareapi/test_vmwareapi_vm_util.py
@@ -21,6 +21,7 @@ import re
from nova import exception
from nova.openstack.common.gettextutils import _
+from nova.openstack.common import uuidutils
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vm_util
@@ -285,21 +286,46 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
self.assertEqual("ns0:VirtualLsiLogicSASController",
config_spec.device.obj_name)
- def test_get_vmdk_path_and_adapter_type(self):
+ def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
# Test the adapter_type returned for a lsiLogic sas controller
controller_key = 1000
- filename = '[test_datastore] test_file.vmdk'
disk = fake.VirtualDisk()
disk.controllerKey = controller_key
disk_backing = fake.VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
+ if parent:
+ disk_backing.parent = parent
disk.backing = disk_backing
controller = fake.VirtualLsiLogicSASController()
controller.key = controller_key
devices = [disk, controller]
+ return devices
+
+ def test_get_vmdk_path_and_adapter_type(self):
+ filename = '[test_datastore] test_file.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
vmdk_info = vm_util.get_vmdk_path_and_adapter_type(devices)
adapter_type = vmdk_info[2]
self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(vmdk_info[0], filename)
+
+ def test_get_vmdk_path_and_adapter_type_with_match(self):
+ n_filename = '[test_datastore] uuid/uuid.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[2]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertEqual(n_filename, vmdk_info[0])
+
+ def test_get_vmdk_path_and_adapter_type_with_nomatch(self):
+ n_filename = '[test_datastore] diuu/diuu.vmdk'
+ devices = self._vmdk_path_and_adapter_type_devices(n_filename)
+ vmdk_info = vm_util.get_vmdk_path_and_adapter_type(
+ devices, uuid='uuid')
+ adapter_type = vmdk_info[2]
+ self.assertEqual('lsiLogicsas', adapter_type)
+ self.assertIsNone(vmdk_info[0])
def test_get_vmdk_adapter_type(self):
# Test for the adapter_type to be used in vmdk descriptor
@@ -348,6 +374,31 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
result = re.sub(r'\s+', '', repr(result))
self.assertEqual(expected, result)
+ def _create_fake_vms(self):
+ fake_vms = fake.FakeRetrieveResult()
+ OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
+ for i in range(10):
+ vm = fake.ManagedObject()
+ opt_val = OptionValue(key='', value=5900 + i)
+ vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
+ fake_vms.add_object(vm)
+ return fake_vms
+
+ def test_get_vnc_port(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10000, group='vmware')
+ actual = vm_util.get_vnc_port(fake_session(fake_vms))
+ self.assertEqual(actual, 5910)
+
+ def test_get_vnc_port_exhausted(self):
+ fake_vms = self._create_fake_vms()
+ self.flags(vnc_port=5900, group='vmware')
+ self.flags(vnc_port_total=10, group='vmware')
+ self.assertRaises(exception.ConsolePortRangeExhausted,
+ vm_util.get_vnc_port,
+ fake_session(fake_vms))
+
def test_get_all_cluster_refs_by_name_none(self):
fake_objects = fake.FakeRetrieveResult()
refs = vm_util.get_all_cluster_refs_by_name(fake_session(fake_objects),
@@ -413,3 +464,32 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
def test_detach_virtual_disk_destroy_spec(self):
self._test_detach_virtual_disk_spec(destroy_disk=True)
+
+ def test_get_vm_create_spec(self):
+ instance_uuid = uuidutils.generate_uuid()
+ fake_instance = {'id': 7, 'name': 'fake!',
+ 'uuid': instance_uuid,
+ 'vcpus': 2, 'memory_mb': 2048}
+ result = vm_util.get_vm_create_spec(fake.FakeFactory(),
+ fake_instance, 'fake-name',
+ 'fake-datastore', [])
+ expected = """{
+ 'files': {'vmPathName': '[fake-datastore]',
+ 'obj_name': 'ns0:VirtualMachineFileInfo'},
+ 'name': 'fake-name', 'deviceChange': [],
+ 'extraConfig': [{'value': '%s',
+ 'key': 'nvp.vm-uuid',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'memoryMB': 2048,
+ 'obj_name': 'ns0:VirtualMachineConfigSpec',
+ 'guestId': 'otherGuest',
+ 'tools': {'beforeGuestStandby': True,
+ 'beforeGuestReboot': True,
+ 'beforeGuestShutdown': True,
+ 'afterResume': True,
+ 'afterPowerOn': True,
+ 'obj_name': 'ns0:ToolsConfigInfo'},
+ 'numCPUs': 2}""" % instance_uuid
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
diff --git a/nova/tests/virt/vmwareapi/test_vmwareapi_vmops.py b/nova/tests/virt/vmwareapi/test_vmwareapi_vmops.py
index fe7fbeffaf..3bd3ba3dde 100644
--- a/nova/tests/virt/vmwareapi/test_vmwareapi_vmops.py
+++ b/nova/tests/virt/vmwareapi/test_vmwareapi_vmops.py
@@ -15,12 +15,30 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
from nova.network import model as network_model
from nova import test
from nova import utils
+from nova.virt.vmwareapi import fake as vmwareapi_fake
+from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vmops
+class fake_session(object):
+ def __init__(self, ret=None):
+ self.ret = ret
+
+ def _get_vim(self):
+ return vmwareapi_fake.FakeVim()
+
+ def _call_method(self, module, method, *args, **kwargs):
+ return self.ret
+
+ def _wait_for_task(self, task_ref):
+ return
+
+
class VMwareVMOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMOpsTestCase, self).setUp()
@@ -55,6 +73,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
rxtx_cap=3)
])
utils.reset_is_neutron()
+ self._session = fake_session()
def test_get_machine_id_str(self):
result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
@@ -103,3 +122,51 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
value = vmops.VMwareVMOps.decide_linked_clone("yes", False)
self.assertTrue(value,
"image level metadata failed to override global")
+
+ def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
+ instance_ds_ref = mock.Mock()
+ instance_ds_ref.value = "ds-1"
+ _vcvmops = vmops.VMwareVCVMOps(self._session, None, None)
+ if ds_ref_exists:
+ ds_ref = mock.Mock()
+ ds_ref.value = "ds-1"
+ else:
+ ds_ref = None
+
+ def fake_call_method(module, method, *args, **kwargs):
+ fake_object1 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object1.add_object(vmwareapi_fake.Datacenter(
+ ds_ref=ds_ref))
+ if not ds_ref:
+ # Token is set for the fake_object1, so it will continue to
+ # fetch the next object.
+ setattr(fake_object1, 'token', 'token-0')
+ if method == "continue_to_get_objects":
+ fake_object2 = vmwareapi_fake.FakeRetrieveResult()
+ fake_object2.add_object(vmwareapi_fake.Datacenter())
+ return fake_object2
+
+ return fake_object1
+
+ with mock.patch.object(self._session, '_call_method',
+ side_effect=fake_call_method) as fake_call:
+ dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
+
+ if ds_ref:
+ self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
+ fake_call.assert_called_once_with(vim_util, "get_objects",
+ "Datacenter", ["name", "datastore", "vmFolder"])
+ self.assertEqual("ha-datacenter", dc_info.name)
+ else:
+ calls = [mock.call(vim_util, "get_objects", "Datacenter",
+ ["name", "datastore", "vmFolder"]),
+ mock.call(vim_util, "continue_to_get_objects",
+ "token-0")]
+ fake_call.assert_has_calls(calls)
+ self.assertIsNone(dc_info)
+
+ def test_get_datacenter_ref_and_name(self):
+ self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
+
+ def test_get_datacenter_ref_and_name_with_no_datastore(self):
+ self._test_get_datacenter_ref_and_name()
diff --git a/nova/tests/virt/vmwareapi/test_vmwareapi_volumeops.py b/nova/tests/virt/vmwareapi/test_vmwareapi_volumeops.py
index a4eefd11c8..3d5d831380 100644
--- a/nova/tests/virt/vmwareapi/test_vmwareapi_volumeops.py
+++ b/nova/tests/virt/vmwareapi/test_vmwareapi_volumeops.py
@@ -27,11 +27,17 @@ from nova.virt.vmwareapi import volumeops
class VMwareVolumeOpsTestCase(test.NoDBTestCase):
def setUp(self):
+
+ def fake_del():
+ return
+
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
-
self._session = driver.VMwareAPISession()
+ self.stubs.Set(self._session, '__del__',
+ fake_del)
+
self._volumeops = volumeops.VMwareVolumeOps(self._session)
self.instance = {'name': 'fake_name', 'uuid': 'fake_uuid'}
diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py
index 3222094ba6..188e6eb395 100644
--- a/nova/tests/virt/xenapi/test_vmops.py
+++ b/nova/tests/virt/xenapi/test_vmops.py
@@ -80,6 +80,7 @@ class VMOpsTestCase(test.NoDBTestCase):
def _test_finish_revert_migration_after_crash(self, backup_made, new_made):
instance = {'name': 'foo',
'task_state': task_states.RESIZE_MIGRATING}
+ context = 'fake_context'
self.mox.StubOutWithMock(vm_utils, 'lookup')
self.mox.StubOutWithMock(self._vmops, '_destroy')
@@ -100,7 +101,7 @@ class VMOpsTestCase(test.NoDBTestCase):
self.mox.ReplayAll()
- self._vmops.finish_revert_migration(instance, [])
+ self._vmops.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True)
diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py
index 9a45bf0a43..631d382a5c 100644
--- a/nova/tests/virt/xenapi/test_xenapi.py
+++ b/nova/tests/virt/xenapi/test_xenapi.py
@@ -1309,13 +1309,13 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def __init__(self):
self.finish_revert_migration_called = False
- def finish_revert_migration(self, instance, block_info,
+ def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
- conn.finish_revert_migration(instance, None)
+ conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
@@ -1717,6 +1717,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
+ context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
@@ -1750,7 +1751,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
- conn.finish_revert_migration(instance, network_info)
+ conn.finish_revert_migration(context, instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_revert_migrate_power_on(self):
@@ -2759,7 +2760,8 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
- self.fw.instances[instance_ref['id']] = instance_ref
+ self.fw.instance_info[instance_ref['id']] = (instance_ref,
+ network_info)
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
diff --git a/nova/utils.py b/nova/utils.py
index 065d9ff89c..5f10a8a21f 100644..100755
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -23,6 +23,7 @@ import contextlib
import datetime
import functools
import hashlib
+import hmac
import inspect
import os
import pyclbr
@@ -924,6 +925,20 @@ def temporary_chown(path, owner_uid=None):
execute('chown', orig_uid, path, run_as_root=True)
+def chown(path, owner_uid=None):
+ """chown a path.
+
+ :param owner_uid: UID of owner (defaults to current user)
+ """
+ if owner_uid is None:
+ owner_uid = os.getuid()
+
+ orig_uid = os.stat(path).st_uid
+
+ if orig_uid != owner_uid:
+ execute('chown', owner_uid, path, run_as_root=True)
+
+
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
@@ -1165,7 +1180,17 @@ def is_none_string(val):
def convert_version_to_int(version):
- return version[0] * 1000000 + version[1] * 1000 + version[2]
+ try:
+ if type(version) == str:
+ version = convert_version_to_tuple(version)
+ if type(version) == tuple:
+ return reduce(lambda x, y: (x * 1000) + y, version)
+ except Exception:
+ raise exception.NovaException(message="Hypervisor version invalid.")
+
+
+def convert_version_to_tuple(version_str):
+ return tuple(int(part) for part in version_str.split('.'))
def is_neutron():
@@ -1278,3 +1303,20 @@ def get_boolean(value):
return value
else:
return strutils.bool_from_string(value)
+
+if hasattr(hmac, 'compare_digest'):
+ constant_time_compare = hmac.compare_digest
+else:
+ def constant_time_compare(first, second):
+ """Returns True if both string inputs are equal, otherwise False.
+
+ This function should take a constant amount of time regardless of
+ how many characters in the strings match.
+
+ """
+ if len(first) != len(second):
+ return False
+ result = 0
+ for x, y in zip(first, second):
+ result |= ord(x) ^ ord(y)
+ return result == 0
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 3c3e430732..1ad040ff27 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -88,7 +88,7 @@ class ConfigDriveBuilder(object):
filepath = os.path.join(self.tempdir, path)
dirname = os.path.dirname(filepath)
fileutils.ensure_tree(dirname)
- with open(filepath, 'w') as f:
+ with open(filepath, 'wb') as f:
f.write(data)
def add_instance_metadata(self, instance_md):
@@ -122,7 +122,7 @@ class ConfigDriveBuilder(object):
def _make_vfat(self, path):
# NOTE(mikal): This is a little horrible, but I couldn't find an
# equivalent to genisoimage for vfat filesystems.
- with open(path, 'w') as f:
+ with open(path, 'wb') as f:
f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index fb0ba8d5fe..6a545b7000 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -205,10 +205,14 @@ class Mount(object):
"""Unmount the device from the file system."""
if not self.mounted:
return
+ self.flush_dev()
LOG.debug(_("Umount %s") % self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
self.mounted = False
+ def flush_dev(self):
+ pass
+
def do_mount(self):
"""Call the get, map and mnt operations."""
status = False
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 801e25eba3..cc33bc64b4 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -129,3 +129,12 @@ class NbdMount(api.Mount):
utils.execute('qemu-nbd', '-d', self.device, run_as_root=True)
self.linked = False
self.device = None
+
+ def flush_dev(self):
+ """flush NBD block device buffer."""
+ # Perform an explicit BLKFLSBUF to support older qemu-nbd(s).
+ # Without this flush, when a nbd device gets re-used the
+ # qemu-nbd intermittently hangs.
+ if self.device:
+ utils.execute('blockdev', '--flushbufs',
+ self.device, run_as_root=True)
diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py
index 3824fe2076..bb1f986a52 100644
--- a/nova/virt/disk/vfs/guestfs.py
+++ b/nova/virt/disk/vfs/guestfs.py
@@ -122,13 +122,18 @@ class VFSGuestFS(vfs.VFS):
self.handle.aug_init("/", 0)
except RuntimeError as e:
- # dereference object and implicitly close()
- self.handle = None
+ # explicitly teardown instead of implicit close()
+ # to prevent orphaned VMs in cases when an implicit
+ # close() is not enough
+ self.teardown()
raise exception.NovaException(
_("Error mounting %(imgfile)s with libguestfs (%(e)s)") %
{'imgfile': self.imgfile, 'e': e})
except Exception:
- self.handle = None
+ # explicitly teardown instead of implicit close()
+ # to prevent orphaned VMs in cases when an implicit
+ # close() is not enough
+ self.teardown()
raise
def teardown(self):
diff --git a/nova/virt/docker/driver.py b/nova/virt/docker/driver.py
index 2bdaaffabc..f6b802a756 100644
--- a/nova/virt/docker/driver.py
+++ b/nova/virt/docker/driver.py
@@ -99,11 +99,13 @@ class DockerDriver(driver.ComputeDriver):
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
- pass
+ msg = _("VIF plugging is not supported by the Docker driver.")
+ raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
- pass
+ msg = _("VIF unplugging is not supported by the Docker driver.")
+ raise NotImplementedError(msg)
def find_container_by_name(self, name):
for info in self.list_instances(inspect=True):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 973b9037f5..66d5077245 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -37,7 +37,8 @@ driver_opts = [
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, baremetal.BareMetalDriver, '
- 'vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver'),
+ 'vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver, '
+ 'hyperv.HyperVDriver'),
cfg.StrOpt('default_ephemeral_format',
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
@@ -401,11 +402,12 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""
Finish reverting a resize.
+ :param context: the context for the finish_revert_migration
:param instance: the instance being migrated/resized
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
@@ -413,7 +415,6 @@ class ComputeDriver(object):
:param power_on: True if the instance should be powered on, False
otherwise
"""
- # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
@@ -681,6 +682,15 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
+ def refresh_instance_security_rules(self, instance):
+ """Refresh security group rules
+
+ Gets called when an instance gets added to or removed from
+ the security group the instance is a member of or if the
+ group gains or looses a rule.
+ """
+ raise NotImplementedError()
+
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 41825a94f2..b9f8fb4185 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -172,7 +172,7 @@ class FakeDriver(driver.ComputeDriver):
block_device_info=None):
pass
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
pass
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index 1c1cd0aab1..10d46ea9ec 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -141,8 +141,7 @@ class IptablesFirewallDriver(FirewallDriver):
def __init__(self, virtapi, **kwargs):
super(IptablesFirewallDriver, self).__init__(virtapi)
self.iptables = linux_net.iptables_manager
- self.instances = {}
- self.network_infos = {}
+ self.instance_info = {}
self.basically_filtered = False
# Flags for DHCP request rule
@@ -168,9 +167,7 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables.defer_apply_off()
def unfilter_instance(self, instance, network_info):
- if self.instances.pop(instance['id'], None):
- # NOTE(vish): use the passed info instead of the stored info
- self.network_infos.pop(instance['id'])
+ if self.instance_info.pop(instance['id'], None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
else:
@@ -178,10 +175,10 @@ class IptablesFirewallDriver(FirewallDriver):
'filtered'), instance=instance)
def prepare_instance_filter(self, instance, network_info):
- self.instances[instance['id']] = instance
- self.network_infos[instance['id']] = network_info
+ self.instance_info[instance['id']] = (instance, network_info)
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
- self.add_filters_for_instance(instance, ipv4_rules, ipv6_rules)
+ self.add_filters_for_instance(instance, network_info, ipv4_rules,
+ ipv6_rules)
LOG.debug(_('Filters added to instance'), instance=instance)
self.refresh_provider_fw_rules()
LOG.debug(_('Provider Firewall Rules refreshed'), instance=instance)
@@ -238,9 +235,8 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
- def add_filters_for_instance(self, instance, inst_ipv4_rules,
+ def add_filters_for_instance(self, instance, network_info, inst_ipv4_rules,
inst_ipv6_rules):
- network_info = self.network_infos[instance['id']]
chain_name = self._instance_chain_name(instance)
if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
@@ -439,22 +435,38 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables.apply()
@utils.synchronized('iptables', external=True)
- def _inner_do_refresh_rules(self, instance, ipv4_rules,
- ipv6_rules):
+ def _inner_do_refresh_rules(self, instance, network_info, ipv4_rules,
+ ipv6_rules):
+ chain_name = self._instance_chain_name(instance)
+ if not self.iptables.ipv4['filter'].has_chain(chain_name):
+ LOG.info(
+ _('instance chain %s disappeared during refresh, '
+ 'skipping') % chain_name,
+ instance=instance)
+ return
self.remove_filters_for_instance(instance)
- self.add_filters_for_instance(instance, ipv4_rules, ipv6_rules)
+ self.add_filters_for_instance(instance, network_info, ipv4_rules,
+ ipv6_rules)
def do_refresh_security_group_rules(self, security_group):
- for instance in self.instances.values():
- network_info = self.network_infos[instance['id']]
+ id_list = self.instance_info.keys()
+ for instance_id in id_list:
+ try:
+ instance, network_info = self.instance_info[instance_id]
+ except KeyError:
+ # NOTE(danms): instance cache must have been modified,
+ # ignore this deleted instance and move on
+ continue
ipv4_rules, ipv6_rules = self.instance_rules(instance,
network_info)
- self._inner_do_refresh_rules(instance, ipv4_rules, ipv6_rules)
+ self._inner_do_refresh_rules(instance, network_info, ipv4_rules,
+ ipv6_rules)
def do_refresh_instance_rules(self, instance):
- network_info = self.network_infos[instance['id']]
+ _instance, network_info = self.instance_info[instance['id']]
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
- self._inner_do_refresh_rules(instance, ipv4_rules, ipv6_rules)
+ self._inner_do_refresh_rules(instance, network_info, ipv4_rules,
+ ipv6_rules)
def refresh_provider_fw_rules(self):
"""See :class:`FirewallDriver` docs."""
diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py
index 090fc0639e..0774b31a9b 100644
--- a/nova/virt/hyperv/__init__.py
+++ b/nova/virt/hyperv/__init__.py
@@ -14,3 +14,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+from nova.virt.hyperv import driver
+
+HyperVDriver = driver.HyperVDriver
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index ff8e6722f2..47cda5f118 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -71,8 +71,9 @@ class BaseVolumeUtils(object):
except Exception:
LOG.info(_("The ISCSI initiator name can't be found. "
"Choosing the default one"))
- computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
+ if computer_system.PartofDomain:
+ initiator_name += '.' + computer_system.Domain.lower()
return initiator_name
def volume_in_mapping(self, mount_device, block_device_info):
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index d8908bd13a..a914bc75b6 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -151,10 +151,14 @@ class HyperVDriver(driver.ComputeDriver):
ctxt, instance_ref, dest_check_data)
def plug_vifs(self, instance, network_info):
- LOG.debug(_("plug_vifs called"), instance=instance)
+ """Plug VIFs into networks."""
+ msg = _("VIF plugging is not supported by the Hyper-V driver.")
+ raise NotImplementedError(msg)
def unplug_vifs(self, instance, network_info):
- LOG.debug(_("unplug_vifs called"), instance=instance)
+ """Unplug VIFs from networks."""
+ msg = _("VIF unplugging is not supported by the Hyper-V driver.")
+ raise NotImplementedError(msg)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
LOG.debug(_("ensure_filtering_rules_for_instance called"),
@@ -175,9 +179,10 @@ class HyperVDriver(driver.ComputeDriver):
def confirm_migration(self, migration, instance, network_info):
self._migrationops.confirm_migration(migration, instance, network_info)
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
- self._migrationops.finish_revert_migration(instance, network_info,
+ self._migrationops.finish_revert_migration(context, instance,
+ network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
diff --git a/nova/virt/hyperv/imagecache.py b/nova/virt/hyperv/imagecache.py
index f9c1ce937f..0465502f72 100644
--- a/nova/virt/hyperv/imagecache.py
+++ b/nova/virt/hyperv/imagecache.py
@@ -27,7 +27,6 @@ from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import utilsfactory
-from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
from nova.virt import images
@@ -66,14 +65,9 @@ class ImageCache(object):
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * 1024 ** 3
- # NOTE(lpetrut): Checking the namespace is needed as the following
- # method is not yet implemented in the vhdutilsv2 module.
- if not isinstance(self._vhdutils, vhdutilsv2.VHDUtilsV2):
- root_vhd_internal_size = (
+ root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
- else:
- root_vhd_internal_size = root_vhd_size
if root_vhd_internal_size < vhd_size:
raise vmutils.HyperVException(
@@ -102,7 +96,8 @@ class ImageCache(object):
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
- root_vhd_size)
+ root_vhd_internal_size,
+ is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
diff --git a/nova/virt/hyperv/migrationops.py b/nova/virt/hyperv/migrationops.py
index e2b12d45f7..e041e5effb 100644
--- a/nova/virt/hyperv/migrationops.py
+++ b/nova/virt/hyperv/migrationops.py
@@ -142,7 +142,7 @@ class MigrationOps(object):
instance_name)
self._pathutils.rename(revert_path, instance_path)
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("finish_revert_migration called"), instance=instance)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index ec10341f16..f29c5b1226 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -104,7 +104,7 @@ class SnapshotOps(object):
LOG.debug(_("Updating Glance image %(name)s with content from "
"merged disk %(image_vhd_path)s"),
- {'image_id': name, 'image_vhd_path': image_vhd_path})
+ {'name': name, 'image_vhd_path': image_vhd_path})
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self._save_glance_image(context, name, image_vhd_path)
diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py
index 36956d2ebc..b48b7a92a7 100644
--- a/nova/virt/hyperv/vhdutils.py
+++ b/nova/virt/hyperv/vhdutils.py
@@ -15,6 +15,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+"""
+Utility class for VHD related operations.
+
+Official VHD format specs can be retrieved at:
+http://technet.microsoft.com/en-us/library/bb676673.aspx
+See "Download the Specifications Without Registering"
+
+Official VHDX format specs can be retrieved at:
+http://www.microsoft.com/en-us/download/details.aspx?id=34750
+"""
import struct
import sys
@@ -34,6 +44,9 @@ VHD_HEADER_SIZE_DYNAMIC = 512
VHD_FOOTER_SIZE_DYNAMIC = 512
VHD_BLK_SIZE_OFFSET = 544
+VHD_SIGNATURE = 'conectix'
+VHDX_SIGNATURE = 'vhdxfile'
+
class VHDUtils(object):
@@ -84,6 +97,10 @@ class VHDUtils(object):
DestinationPath=dest_vhd_path)
self._vmutils.check_ret_val(ret_val, job_path)
+ def _get_resize_method(self):
+ image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+ return image_man_svc.ExpandVirtualHardDisk
+
def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True):
if is_file_max_size:
new_internal_max_size = self.get_internal_vhd_size_by_file_size(
@@ -91,9 +108,9 @@ class VHDUtils(object):
else:
new_internal_max_size = new_max_size
- image_man_svc = self._conn.Msvm_ImageManagementService()[0]
+ resize = self._get_resize_method()
- (job_path, ret_val) = image_man_svc.ExpandVirtualHardDisk(
+ (job_path, ret_val) = resize(
Path=vhd_path, MaxInternalSize=new_internal_max_size)
self._vmutils.check_ret_val(ret_val, job_path)
@@ -178,13 +195,19 @@ class VHDUtils(object):
def get_vhd_format(self, path):
with open(path, 'rb') as f:
- signature = f.read(8)
- if signature == 'vhdxfile':
- return constants.DISK_FORMAT_VHDX
- elif signature == 'conectix':
- return constants.DISK_FORMAT_VHD
- else:
- raise vmutils.HyperVException(_('Unsupported virtual disk format'))
+ # Read header
+ if f.read(8) == VHDX_SIGNATURE:
+ return constants.DISK_FORMAT_VHDX
+
+ # Read footer
+ f.seek(0, 2)
+ file_size = f.tell()
+ if file_size >= 512:
+ f.seek(-512, 2)
+ if f.read(8) == VHD_SIGNATURE:
+ return constants.DISK_FORMAT_VHD
+
+ raise vmutils.HyperVException(_('Unsupported virtual disk format'))
def get_best_supported_vhd_format(self):
return constants.DISK_FORMAT_VHD
diff --git a/nova/virt/hyperv/vhdutilsv2.py b/nova/virt/hyperv/vhdutilsv2.py
index 946758b3f4..4017c1b720 100644
--- a/nova/virt/hyperv/vhdutilsv2.py
+++ b/nova/virt/hyperv/vhdutilsv2.py
@@ -20,6 +20,7 @@ Utility class for VHD related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
+import struct
import sys
if sys.platform == 'win32':
@@ -33,6 +34,15 @@ from nova.virt.hyperv import vmutilsv2
from xml.etree import ElementTree
+VHDX_BAT_ENTRY_SIZE = 8
+VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024]
+VHDX_HEADER_SECTION_SIZE = 1 << 20
+VHDX_LOG_LENGTH_OFFSET = 68
+VHDX_METADATA_SIZE_OFFSET = 64
+VHDX_REGION_TABLE_OFFSET = 192 * 1024
+VHDX_BS_METADATA_ENTRY_OFFSET = 48
+
+
class VHDUtilsV2(vhdutils.VHDUtils):
_VHD_TYPE_DYNAMIC = 3
@@ -99,13 +109,91 @@ class VHDUtilsV2(vhdutils.VHDUtils):
self._vmutils.check_ret_val(ret_val, job_path)
- def resize_vhd(self, vhd_path, new_max_size):
+ def _get_resize_method(self):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
-
- (job_path, ret_val) = image_man_svc.ResizeVirtualHardDisk(
- Path=vhd_path, MaxInternalSize=new_max_size)
-
- self._vmutils.check_ret_val(ret_val, job_path)
+ return image_man_svc.ResizeVirtualHardDisk
+
+ def get_internal_vhd_size_by_file_size(self, vhd_path,
+ new_vhd_file_size):
+ """VHDX Size = Header (1 MB)
+ + Log
+ + Metadata Region
+ + BAT
+ + Payload Blocks
+ Chunk size = maximum number of bytes described by a SB block
+ = 2 ** 23 * LogicalSectorSize
+ """
+ vhd_format = self.get_vhd_format(vhd_path)
+ if vhd_format == constants.DISK_FORMAT_VHD:
+ return super(VHDUtilsV2,
+ self).get_internal_vhd_size_by_file_size(
+ vhd_path, new_vhd_file_size)
+ else:
+ vhd_info = self.get_vhd_info(vhd_path)
+ vhd_type = vhd_info['Type']
+ if vhd_type == self._VHD_TYPE_DIFFERENCING:
+ raise vmutils.HyperVException(_("Differencing VHDX images "
+ "are not supported"))
+ else:
+ try:
+ with open(vhd_path, 'rb') as f:
+ hs = VHDX_HEADER_SECTION_SIZE
+ bes = VHDX_BAT_ENTRY_SIZE
+
+ lss = vhd_info['LogicalSectorSize']
+ bs = self._get_vhdx_block_size(f)
+ ls = self._get_vhdx_log_size(f)
+ ms = self._get_vhdx_metadata_size_and_offset(f)[0]
+
+ chunk_ratio = (1 << 23) * lss / bs
+ size = new_vhd_file_size
+
+ max_internal_size = (bs * chunk_ratio * (size - hs -
+ ls - ms - bes - bes / chunk_ratio) / (bs *
+ chunk_ratio + bes * chunk_ratio + bes))
+
+ return max_internal_size - (max_internal_size % bs)
+
+ except IOError as ex:
+ raise vmutils.HyperVException(_("Unable to obtain "
+ "internal size from VHDX: "
+ "%(vhd_path)s. Exception: "
+ "%(ex)s") %
+ {"vhd_path": vhd_path,
+ "ex": ex})
+
+ def _get_vhdx_current_header_offset(self, vhdx_file):
+ sequence_numbers = []
+ for offset in VHDX_HEADER_OFFSETS:
+ vhdx_file.seek(offset + 8)
+ sequence_numbers.append(struct.unpack('<Q',
+ vhdx_file.read(8))[0])
+ current_header = sequence_numbers.index(max(sequence_numbers))
+ return VHDX_HEADER_OFFSETS[current_header]
+
+ def _get_vhdx_log_size(self, vhdx_file):
+ current_header_offset = self._get_vhdx_current_header_offset(vhdx_file)
+ offset = current_header_offset + VHDX_LOG_LENGTH_OFFSET
+ vhdx_file.seek(offset)
+ log_size = struct.unpack('<I', vhdx_file.read(4))[0]
+ return log_size
+
+ def _get_vhdx_metadata_size_and_offset(self, vhdx_file):
+ offset = VHDX_METADATA_SIZE_OFFSET + VHDX_REGION_TABLE_OFFSET
+ vhdx_file.seek(offset)
+ metadata_offset = struct.unpack('<Q', vhdx_file.read(8))[0]
+ metadata_size = struct.unpack('<I', vhdx_file.read(4))[0]
+ return metadata_size, metadata_offset
+
+ def _get_vhdx_block_size(self, vhdx_file):
+ metadata_offset = self._get_vhdx_metadata_size_and_offset(vhdx_file)[1]
+ offset = metadata_offset + VHDX_BS_METADATA_ENTRY_OFFSET
+ vhdx_file.seek(offset)
+ file_parameter_offset = struct.unpack('<I', vhdx_file.read(4))[0]
+
+ vhdx_file.seek(file_parameter_offset + metadata_offset)
+ block_size = struct.unpack('<I', vhdx_file.read(4))[0]
+ return block_size
def _get_vhd_info_xml(self, image_man_svc, vhd_path):
(job_path,
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index a1faf645b2..b78fade6f2 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -36,7 +36,6 @@ from nova.virt import configdrive
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
-from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
@@ -161,24 +160,25 @@ class VMOps(object):
base_vhd_size = base_vhd_info['MaxInternalSize']
root_vhd_size = instance['root_gb'] * 1024 ** 3
- # NOTE(lpetrut): Checking the namespace is needed as the
- # following method is not yet implemented in vhdutilsv2.
- if not isinstance(self._vhdutils, vhdutilsv2.VHDUtilsV2):
- root_vhd_internal_size = (
+ root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
root_vhd_path, root_vhd_size))
- else:
- root_vhd_internal_size = root_vhd_size
if root_vhd_internal_size < base_vhd_size:
- raise vmutils.HyperVException(_("Cannot resize a VHD to a "
- "smaller size"))
+ error_msg = _("Cannot resize a VHD to a smaller size, the"
+ " original size is %(base_vhd_size)s, the"
+ " newer size is %(root_vhd_size)s"
+ ) % {'base_vhd_size': base_vhd_size,
+ 'root_vhd_size': root_vhd_internal_size}
+ raise vmutils.HyperVException(error_msg)
elif root_vhd_internal_size > base_vhd_size:
LOG.debug(_("Resizing VHD %(root_vhd_path)s to new "
"size %(root_vhd_size)s"),
- {'base_vhd_path': base_vhd_path,
+ {'root_vhd_size': root_vhd_internal_size,
'root_vhd_path': root_vhd_path})
- self._vhdutils.resize_vhd(root_vhd_path, root_vhd_size)
+ self._vhdutils.resize_vhd(root_vhd_path,
+ root_vhd_internal_size,
+ is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
@@ -226,10 +226,9 @@ class VMOps(object):
admin_password)
self.power_on(instance)
- except Exception as ex:
- LOG.exception(ex)
- self.destroy(instance)
- raise vmutils.HyperVException(_('Spawn instance failed'))
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.destroy(instance)
def create_instance(self, instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path):
@@ -275,8 +274,9 @@ class VMOps(object):
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
- vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
- CONF.config_drive_format)
+ raise vmutils.UnsupportedConfigDriveFormatException(
+ _('Invalid config_drive_format "%s"') %
+ CONF.config_drive_format)
LOG.info(_('Using config drive for instance: %s'), instance=instance)
@@ -352,10 +352,10 @@ class VMOps(object):
if destroy_disks:
self._delete_disk_files(instance_name)
- except Exception as ex:
- LOG.exception(ex)
- raise vmutils.HyperVException(_('Failed to destroy instance: %s') %
- instance_name)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_('Failed to destroy instance: %s'),
+ instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
@@ -405,9 +405,8 @@ class VMOps(object):
LOG.debug(_("Successfully changed state of VM %(vm_name)s"
" to: %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
- except Exception as ex:
- LOG.exception(ex)
- msg = (_("Failed to change vm state of %(vm_name)s"
- " to %(req_state)s") %
- {'vm_name': vm_name, 'req_state': req_state})
- raise vmutils.HyperVException(msg)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_("Failed to change vm state of %(vm_name)s"
+ " to %(req_state)s"),
+ {'vm_name': vm_name, 'req_state': req_state})
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 5e5872bdf3..7df8169fd2 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -56,6 +56,11 @@ class HyperVAuthorizationException(HyperVException):
super(HyperVException, self).__init__(message)
+class UnsupportedConfigDriveFormatException(HyperVException):
+ def __init__(self, message=None):
+ super(HyperVException, self).__init__(message)
+
+
class VMUtils(object):
# These constants can be overridden by inherited classes
@@ -67,6 +72,16 @@ class VMUtils(object):
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
+ _SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
+ _VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
+ _RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
+ _PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
+ _MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
+ _STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
+ _SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
+ 'Msvm_SyntheticEthernetPortSettingData'
+ _AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
+
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_REBOOT: 10,
@@ -95,8 +110,8 @@ class VMUtils(object):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
- wmi_association_class='Msvm_SettingsDefineState',
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
#See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
@@ -146,13 +161,13 @@ class VMUtils(object):
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
- wmi_result_class='Msvm_MemorySettingData')[0]
+ wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
@@ -175,7 +190,7 @@ class VMUtils(object):
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
- wmi_result_class='Msvm_ProcessorSettingData')[0]
+ wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
@@ -229,18 +244,18 @@ class VMUtils(object):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
+ wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
- wmi_result_class='MSVM_ResourceAllocationSettingData')
+ wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
return [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)][0].path_()
@@ -250,12 +265,13 @@ class VMUtils(object):
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks_count(self, scsi_controller_path):
- volumes = self._conn.query("SELECT * FROM "
- "Msvm_ResourceAllocationSettingData "
+ volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"Parent = '%(parent)s'" %
- {'res_sub_type':
+ {"class_name":
+ self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
+ 'res_sub_type':
self._PHYS_DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
@@ -265,9 +281,10 @@ class VMUtils(object):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
- def _get_new_resource_setting_data(
- self, resource_sub_type,
- class_name='Msvm_ResourceAllocationSettingData'):
+ def _get_new_resource_setting_data(self, resource_sub_type,
+ class_name=None):
+ if class_name is None:
+ class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
return self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
@@ -349,7 +366,7 @@ class VMUtils(object):
"""Create a (synthetic) nic and attach it to the vm."""
#Create a new nic
new_nic_data = self._get_new_setting_data(
- 'Msvm_SyntheticEthernetPortSettingData')
+ self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
#Configure the nic
new_nic_data.ElementName = nic_name
@@ -374,19 +391,12 @@ class VMUtils(object):
"to %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
+ def _get_disk_resource_disk_path(self, disk_resource):
+ return disk_resource.Connection
+
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
-
- vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- rasds = vmsettings[0].associators(
- wmi_result_class='Msvm_ResourceAllocationSettingData')
- disk_resources = [r for r in rasds
- if r.ResourceSubType ==
- self._IDE_DISK_RES_SUB_TYPE]
- volume_resources = [r for r in rasds
- if r.ResourceSubType ==
- self._PHYS_DISK_RES_SUB_TYPE]
+ (disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
@@ -395,10 +405,25 @@ class VMUtils(object):
disk_files = []
for disk_resource in disk_resources:
- disk_files.extend([c for c in disk_resource.Connection])
+ disk_files.extend(
+ [c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
+ def _get_vm_disks(self, vm):
+ vmsettings = vm.associators(
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
+ rasds = vmsettings[0].associators(
+ wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
+ disk_resources = [r for r in rasds if
+ r.ResourceSubType in
+ [self._IDE_DISK_RES_SUB_TYPE,
+ self._IDE_DVD_RES_SUB_TYPE]]
+ volume_resources = [r for r in rasds if
+ r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
+
+ return (disk_resources, volume_resources)
+
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
@@ -511,7 +536,7 @@ class VMUtils(object):
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
@@ -528,10 +553,12 @@ class VMUtils(object):
self._remove_virt_resource(physical_disk, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path):
- physical_disks = self._conn.query("SELECT * FROM "
- "Msvm_ResourceAllocationSettingData"
- " WHERE ResourceSubType = '%s'" %
- self._PHYS_DISK_RES_SUB_TYPE)
+ physical_disks = self._conn.query("SELECT * FROM %(class_name)s "
+ "WHERE ResourceSubType = '%(res_sub_type)s'" %
+ {"class_name":
+ self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
+ 'res_sub_type':
+ self._PHYS_DISK_RES_SUB_TYPE})
for physical_disk in physical_disks:
if physical_disk.HostResource:
if physical_disk.HostResource[0].lower() == disk_path.lower():
@@ -545,12 +572,15 @@ class VMUtils(object):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
- disks = self._conn.query("SELECT * FROM "
- "Msvm_ResourceAllocationSettingData "
+ disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
- {"res_sub_type": self._PHYS_DISK_RES_SUB_TYPE,
- "parent": controller_path})
+ {"class_name":
+ self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
+ "res_sub_type":
+ self._PHYS_DISK_RES_SUB_TYPE,
+ "parent":
+ controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
diff --git a/nova/virt/hyperv/vmutilsv2.py b/nova/virt/hyperv/vmutilsv2.py
index ff830654b7..a34553abdb 100644
--- a/nova/virt/hyperv/vmutilsv2.py
+++ b/nova/virt/hyperv/vmutilsv2.py
@@ -53,11 +53,12 @@ class VMUtilsV2(vmutils.VMUtils):
_SNAPSHOT_FULL = 2
_METRIC_AGGR_CPU_AVG = 'Aggregated Average CPU Utilization'
- _METRIC_AGGR_DISK_R = 'Aggregated Disk Data Read'
- _METRIC_AGGR_DISK_W = 'Aggregated Disk Data Written'
-
_METRIC_ENABLED = 2
+ _STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData'
+ _ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS = \
+ 'Msvm_EthernetPortAllocationSettingData'
+
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_REBOOT: 11,
@@ -81,12 +82,12 @@ class VMUtilsV2(vmutils.VMUtils):
SystemSettings=vs_data.GetText_(1))
job = self.check_ret_val(ret_val, job_path)
if not vm_path and job:
- vm_path = job.associators("Msvm_AffectedJobElement")[0]
+ vm_path = job.associators(self._AFFECTED_JOB_ELEMENT_CLASS)[0]
return self._get_wmi_obj(vm_path)
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
@@ -120,7 +121,7 @@ class VMUtilsV2(vmutils.VMUtils):
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(
- res_sub_type, 'Msvm_StorageAllocationSettingData')
+ res_sub_type, self._STORAGE_ALLOC_SETTING_DATA_CLASS)
res.Parent = drive_path
res.HostResource = [path]
@@ -152,6 +153,9 @@ class VMUtilsV2(vmutils.VMUtils):
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(scsicontrl, vm.path_())
+ def _get_disk_resource_disk_path(self, disk_resource):
+ return disk_resource.HostResource
+
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
@@ -202,7 +206,7 @@ class VMUtilsV2(vmutils.VMUtils):
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
- wmi_result_class='Msvm_VirtualSystemSettingData')[0]
+ wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
@@ -215,7 +219,7 @@ class VMUtilsV2(vmutils.VMUtils):
nic_data = self._get_nic_data_by_name(nic_name)
eth_port_data = self._get_new_setting_data(
- 'Msvm_EthernetPortAllocationSettingData')
+ self._ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS)
eth_port_data.HostResource = [vswitch_conn_data]
eth_port_data.Parent = nic_data.path_()
@@ -224,19 +228,27 @@ class VMUtilsV2(vmutils.VMUtils):
self._add_virt_resource(eth_port_data, vm.path_())
def enable_vm_metrics_collection(self, vm_name):
- metric_names = [self._METRIC_AGGR_CPU_AVG,
- self._METRIC_AGGR_DISK_R,
- self._METRIC_AGGR_DISK_W]
+ metric_names = [self._METRIC_AGGR_CPU_AVG]
vm = self._lookup_vm_check(vm_name)
metric_svc = self._conn.Msvm_MetricService()[0]
+ (disks, volumes) = self._get_vm_disks(vm)
+ filtered_disks = [d for d in disks if
+ d.ResourceSubType is not self._IDE_DVD_RES_SUB_TYPE]
+
+ # enable metrics for disk.
+ for disk in filtered_disks:
+ self._enable_metrics(metric_svc, disk)
for metric_name in metric_names:
metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name)
if not metric_def:
LOG.debug(_("Metric not found: %s") % metric_name)
else:
- metric_svc.ControlMetrics(
- Subject=vm.path_(),
- Definition=metric_def[0].path_(),
- MetricCollectionEnabled=self._METRIC_ENABLED)
+ self._enable_metrics(metric_svc, vm, metric_def[0].path_())
+
+ def _enable_metrics(self, metric_svc, element, definition_path=None):
+ metric_svc.ControlMetrics(
+ Subject=element.path_(),
+ Definition=definition_path,
+ MetricCollectionEnabled=self._METRIC_ENABLED)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index f73d6015c7..474beedb2c 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -24,11 +24,11 @@ import time
from oslo.config import cfg
from nova import exception
+from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import utilsfactory
-from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
@@ -145,12 +145,12 @@ class VolumeOps(object):
ctrller_path,
slot,
mounted_disk_path)
- except Exception as exn:
- LOG.exception(_('Attach volume failed: %s'), exn)
- if target_iqn:
- self._volutils.logout_storage_target(target_iqn)
- raise vmutils.HyperVException(_('Unable to attach volume '
- 'to instance %s') % instance_name)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_('Unable to attach volume to instance %s'),
+ instance_name)
+ if target_iqn:
+ self._volutils.logout_storage_target(target_iqn)
def _get_free_controller_slot(self, scsi_controller_path):
#Slots starts from 0, so the lenght of the disks gives us the free slot
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index a789444dcd..a819636cdf 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -285,7 +285,6 @@ MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
-MIN_LIBVIRT_CLOSE_CALLBACK_VERSION = (1, 0, 1)
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
@@ -398,20 +397,21 @@ class LibvirtDriver(driver.ComputeDriver):
driver_cache)
conf.driver_cache = cache_mode
- def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
+ @staticmethod
+ def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
- libvirt_version = self._conn.getLibVersion()
+ libvirt_version = conn.getLibVersion()
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
if hv_ver is not None:
- hypervisor_version = self._conn.getVersion()
+ hypervisor_version = conn.getVersion()
if hypervisor_version < utils.convert_version_to_int(hv_ver):
return False
if hv_type is not None:
- hypervisor_type = self._conn.getType()
+ hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
@@ -419,6 +419,9 @@ class LibvirtDriver(driver.ComputeDriver):
except Exception:
return False
+ def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
+ return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type)
+
def _native_thread(self):
"""Receives async events coming in from libvirtd.
@@ -576,42 +579,50 @@ class LibvirtDriver(driver.ComputeDriver):
self._init_events()
- def _get_connection(self):
- with self._wrapped_conn_lock:
- wrapped_conn = self._wrapped_conn
+ def _get_new_connection(self):
+ # call with _wrapped_conn_lock held
+ LOG.debug(_('Connecting to libvirt: %s'), self.uri())
+ wrapped_conn = self._connect(self.uri(), self.read_only)
- if not wrapped_conn or not self._test_connection(wrapped_conn):
- LOG.debug(_('Connecting to libvirt: %s'), self.uri())
- if not CONF.libvirt_nonblocking:
- wrapped_conn = self._connect(self.uri(), self.read_only)
- else:
- wrapped_conn = tpool.proxy_call(
- (libvirt.virDomain, libvirt.virConnect),
- self._connect, self.uri(), self.read_only)
- with self._wrapped_conn_lock:
- self._wrapped_conn = wrapped_conn
+ self._wrapped_conn = wrapped_conn
- try:
- LOG.debug(_("Registering for lifecycle events %s") %
- str(self))
- wrapped_conn.domainEventRegisterAny(
- None,
- libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
- self._event_lifecycle_callback,
- self)
- except Exception:
- LOG.warn(_("URI %s does not support events"),
- self.uri())
+ try:
+ LOG.debug(_("Registering for lifecycle events %s"), self)
+ wrapped_conn.domainEventRegisterAny(
+ None,
+ libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
+ self._event_lifecycle_callback,
+ self)
+ except Exception as e:
+ LOG.warn(_("URI %(uri)s does not support events: %(error)s"),
+ {'uri': self.uri(), 'error': e})
- if self.has_min_version(MIN_LIBVIRT_CLOSE_CALLBACK_VERSION):
- try:
- LOG.debug(_("Registering for connection events: %s") %
- str(self))
- wrapped_conn.registerCloseCallback(
- self._close_callback, None)
- except libvirt.libvirtError:
- LOG.debug(_("URI %s does not support connection events"),
- self.uri())
+ try:
+ LOG.debug(_("Registering for connection events: %s") %
+ str(self))
+ wrapped_conn.registerCloseCallback(self._close_callback, None)
+ except (TypeError, AttributeError) as e:
+ # NOTE: The registerCloseCallback of python-libvirt 1.0.1+
+ # is defined with 3 arguments, and the above registerClose-
+ # Callback succeeds. However, the one of python-libvirt 1.0.0
+ # is defined with 4 arguments and TypeError happens here.
+ # Then python-libvirt 0.9 does not define a method register-
+ # CloseCallback.
+ LOG.debug(_("The version of python-libvirt does not support "
+ "registerCloseCallback or is too old: %s"), e)
+ except libvirt.libvirtError as e:
+ LOG.warn(_("URI %(uri)s does not support connection"
+ " events: %(error)s"),
+ {'uri': self.uri(), 'error': e})
+
+ return wrapped_conn
+
+ def _get_connection(self):
+ # multiple concurrent connections are protected by _wrapped_conn_lock
+ with self._wrapped_conn_lock:
+ wrapped_conn = self._wrapped_conn
+ if not wrapped_conn or not self._test_connection(wrapped_conn):
+ wrapped_conn = self._get_new_connection()
return wrapped_conn
@@ -674,7 +685,15 @@ class LibvirtDriver(driver.ComputeDriver):
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
- return libvirt.openAuth(uri, auth, flags)
+ if not CONF.libvirt_nonblocking:
+ return libvirt.openAuth(uri, auth, flags)
+ else:
+ # tpool.proxy_call creates a native thread. Due to limitations
+ # with eventlet locking we cannot use the logging API inside
+ # the called function.
+ return tpool.proxy_call(
+ (libvirt.virDomain, libvirt.virConnect),
+ libvirt.openAuth, uri, auth, flags)
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
@@ -952,7 +971,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _cleanup_rbd(self, instance):
pool = CONF.libvirt_images_rbd_pool
volumes = libvirt_utils.list_rbd_volumes(pool)
- pattern = instance['name']
+ pattern = instance['uuid']
def belongs_to_instance(disk):
return disk.startswith(pattern)
@@ -1025,7 +1044,11 @@ class LibvirtDriver(driver.ComputeDriver):
def _cleanup_resize(self, instance, network_info):
target = libvirt_utils.get_instance_path(instance) + "_resize"
if os.path.exists(target):
- shutil.rmtree(target)
+ # Deletion can fail over NFS, so retry the deletion as required.
+ # Set maximum attempt as 5, most test can remove the directory
+ # for the second time.
+ utils.execute('rm', '-rf', target, delay_on_retry=True,
+ attempts=5)
if instance['host'] != CONF.host:
self._undefine_domain(instance)
@@ -1384,7 +1407,6 @@ class LibvirtDriver(driver.ComputeDriver):
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(disk_path,
- snapshot_name,
image_type=source_format)
if live_snapshot:
@@ -1393,7 +1415,6 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.info(_("Beginning cold snapshot process"),
instance=instance)
- snapshot_backend.snapshot_create()
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt_snapshots_directory
@@ -1409,8 +1430,6 @@ class LibvirtDriver(driver.ComputeDriver):
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
- if not live_snapshot:
- snapshot_backend.snapshot_delete()
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
@@ -1513,7 +1532,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
- libvirt_utils.extract_snapshot(disk_delta, 'qcow2', None,
+ libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
@@ -1906,9 +1925,25 @@ class LibvirtDriver(driver.ComputeDriver):
"""
self._destroy(instance)
+
+ # Get the system metadata from the instance
+ system_meta = utils.instance_sys_meta(instance)
+
+ # Convert the system metadata to image metadata
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+ if not image_meta:
+ image_ref = instance.get('image_ref')
+ service, image_id = glance.get_remote_image_service(context,
+ image_ref)
+ image_meta = compute_utils.get_image_metadata(context,
+ service,
+ image_id,
+ instance)
+
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
- block_device_info)
+ block_device_info,
+ image_meta)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
@@ -1928,9 +1963,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Initialize all the necessary networking, block devices and
# start the instance.
- self._create_domain_and_network(xml, instance, network_info,
- block_device_info, context=context,
- reboot=True)
+ self._create_domain_and_network(context, xml, instance, network_info,
+ block_device_info, reboot=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance))
@@ -1979,8 +2013,8 @@ class LibvirtDriver(driver.ComputeDriver):
"""resume the specified instance."""
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
- dom = self._create_domain_and_network(xml, instance, network_info,
- block_device_info=block_device_info, context=context)
+ dom = self._create_domain_and_network(context, xml, instance,
+ network_info, block_device_info=block_device_info)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
@@ -2087,8 +2121,8 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=block_device_info,
write_to_disk=True)
- self._create_domain_and_network(xml, instance, network_info,
- block_device_info, context=context)
+ self._create_domain_and_network(context, xml, instance, network_info,
+ block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
@@ -2776,6 +2810,9 @@ class LibvirtDriver(driver.ComputeDriver):
connection_info,
info)
devices.append(cfg)
+ self.virtapi.block_device_mapping_update(
+ nova_context.get_admin_context(), vol.id,
+ {'connection_info': jsonutils.dumps(connection_info)})
if 'disk.config' in disk_mapping:
diskconfig = self.get_guest_disk_config(instance,
@@ -3211,9 +3248,9 @@ class LibvirtDriver(driver.ComputeDriver):
return domain
- def _create_domain_and_network(self, xml, instance, network_info,
+ def _create_domain_and_network(self, context, xml, instance, network_info,
block_device_info=None, power_on=True,
- context=None, reboot=False):
+ reboot=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
@@ -3433,7 +3470,7 @@ class LibvirtDriver(driver.ComputeDriver):
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
- :returns: The total number of vcpu that currently used.
+ :returns: The total number of vcpu(s) that are currently being used.
"""
@@ -3452,7 +3489,9 @@ class LibvirtDriver(driver.ComputeDriver):
" %(id)s, exception: %(ex)s") %
{"id": dom_id, "ex": e})
else:
- total += len(vcpus[1])
+ if vcpus is not None and len(vcpus) > 1:
+ total += len(vcpus[1])
+
except exception.InstanceNotFound:
LOG.info(_("libvirt can't find a domain with id: %s") % dom_id)
continue
@@ -4294,10 +4333,11 @@ class LibvirtDriver(driver.ComputeDriver):
if instance["name"] not in dom_list:
# In case of block migration, destination does not have
# libvirt.xml
- disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
- instance)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt_type, instance, block_device_info)
self.to_xml(context, instance, network_info, disk_info,
- block_device_info, write_to_disk=True)
+ block_device_info=block_device_info,
+ write_to_disk=True)
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
@@ -4605,9 +4645,8 @@ class LibvirtDriver(driver.ComputeDriver):
xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info,
write_to_disk=True)
- self._create_domain_and_network(xml, instance, network_info,
- block_device_info, power_on,
- context=context)
+ self._create_domain_and_network(context, xml, instance, network_info,
+ block_device_info, power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
@@ -4624,7 +4663,7 @@ class LibvirtDriver(driver.ComputeDriver):
if e.errno != errno.ENOENT:
raise
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
@@ -4643,10 +4682,9 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
instance,
block_device_info)
- xml = self.to_xml(nova_context.get_admin_context(),
- instance, network_info, disk_info,
+ xml = self.to_xml(context, instance, network_info, disk_info,
block_device_info=block_device_info)
- self._create_domain_and_network(xml, instance, network_info,
+ self._create_domain_and_network(context, xml, instance, network_info,
block_device_info, power_on)
if power_on:
@@ -4758,7 +4796,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _delete_instance_files(self, instance):
# NOTE(mikal): a shim to handle this file not using instance objects
# everywhere. Remove this when that conversion happens.
- context = nova_context.get_admin_context()
+ context = nova_context.get_admin_context(read_deleted='yes')
inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid'])
# NOTE(mikal): this code should be pushed up a layer when this shim is
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 1cbba788a8..808beb0f82 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -219,9 +219,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
filter_set = ['no-mac-spoofing',
'no-ip-spoofing',
'no-arp-spoofing']
- if CONF.use_ipv6:
- self._define_filter(self.nova_no_nd_reflection_filter)
- filter_set.append('nova-no-nd-reflection')
+ self._define_filter(self.nova_no_nd_reflection_filter)
+ filter_set.append('nova-no-nd-reflection')
self._define_filter(self._filter_container('nova-nodhcp', filter_set))
filter_set.append('allow-dhcp-server')
self._define_filter(self._filter_container('nova-base', filter_set))
@@ -317,9 +316,7 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
- if self.instances.pop(instance['id'], None):
- # NOTE(vish): use the passed info instead of the stored info
- self.network_infos.pop(instance['id'])
+ if self.instance_info.pop(instance['id'], None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index e9007896c7..ca46add554 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -35,8 +35,10 @@ from nova.virt.libvirt import utils as libvirt_utils
try:
+ import rados
import rbd
except ImportError:
+ rados = None
rbd = None
@@ -88,6 +90,11 @@ class Image(object):
self.is_block_dev = is_block_dev
self.preallocate = False
+ # NOTE(dripton): We store lines of json (path, disk_format) in this
+ # file, for some image types, to prevent attacks based on changing the
+ # disk_format.
+ self.disk_info_path = None
+
# NOTE(mikal): We need a lock directory which is shared along with
# instance files, to cover the scenario where multiple compute nodes
# are trying to create a base file at the same time
@@ -223,32 +230,88 @@ class Image(object):
'size': size})
raise exception.InstanceTypeDiskTooSmall()
- def snapshot_create(self):
- raise NotImplementedError()
-
def snapshot_extract(self, target, out_format):
raise NotImplementedError()
- def snapshot_delete(self):
- raise NotImplementedError()
+ def _get_driver_format(self):
+ return self.driver_format
+
+ def resolve_driver_format(self):
+ """Return the driver format for self.path.
+
+ First checks self.disk_info_path for an entry.
+ If it's not there, calls self._get_driver_format(), and then
+ stores the result in self.disk_info_path
+
+ See https://bugs.launchpad.net/nova/+bug/1221190
+ """
+ def _dict_from_line(line):
+ if not line:
+ return {}
+ try:
+ return jsonutils.loads(line)
+ except (TypeError, ValueError) as e:
+ msg = (_("Could not load line %(line)s, got error "
+ "%(error)s") %
+ {'line': line, 'error': unicode(e)})
+ raise exception.InvalidDiskInfo(reason=msg)
+
+ @utils.synchronized(self.disk_info_path, external=False,
+ lock_path=self.lock_path)
+ def write_to_disk_info_file():
+ # Use os.open to create it without group or world write permission.
+ fd = os.open(self.disk_info_path, os.O_RDWR | os.O_CREAT, 0o644)
+ with os.fdopen(fd, "r+") as disk_info_file:
+ line = disk_info_file.read().rstrip()
+ dct = _dict_from_line(line)
+ if self.path in dct:
+ msg = _("Attempted overwrite of an existing value.")
+ raise exception.InvalidDiskInfo(reason=msg)
+ dct.update({self.path: driver_format})
+ disk_info_file.seek(0)
+ disk_info_file.truncate()
+ disk_info_file.write('%s\n' % jsonutils.dumps(dct))
+ # Ensure the file is always owned by the nova user so qemu can't
+ # write it.
+ utils.chown(self.disk_info_path, owner_uid=os.getuid())
+
+ try:
+ if (self.disk_info_path is not None and
+ os.path.exists(self.disk_info_path)):
+ with open(self.disk_info_path) as disk_info_file:
+ line = disk_info_file.read().rstrip()
+ dct = _dict_from_line(line)
+ for path, driver_format in dct.iteritems():
+ if path == self.path:
+ return driver_format
+ driver_format = self._get_driver_format()
+ if self.disk_info_path is not None:
+ fileutils.ensure_tree(os.path.dirname(self.disk_info_path))
+ write_to_disk_info_file()
+ except OSError as e:
+ raise exception.DiskInfoReadWriteFail(reason=unicode(e))
+ return driver_format
class Raw(Image):
- def __init__(self, instance=None, disk_name=None, path=None,
- snapshot_name=None):
+ def __init__(self, instance=None, disk_name=None, path=None):
super(Raw, self).__init__("file", "raw", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
+ self.disk_info_path = os.path.join(os.path.dirname(self.path),
+ 'disk.info')
self.correct_format()
+ def _get_driver_format(self):
+ data = images.qemu_img_info(self.path)
+ return data.file_format or 'raw'
+
def correct_format(self):
if os.path.exists(self.path):
- data = images.qemu_img_info(self.path)
- self.driver_format = data.file_format or 'raw'
+ self.driver_format = self.resolve_driver_format()
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
@@ -271,26 +334,21 @@ class Raw(Image):
copy_raw_image(base, self.path, size)
self.correct_format()
- def snapshot_create(self):
- pass
-
def snapshot_extract(self, target, out_format):
images.convert_image(self.path, target, out_format)
- def snapshot_delete(self):
- pass
-
class Qcow2(Image):
- def __init__(self, instance=None, disk_name=None, path=None,
- snapshot_name=None):
+ def __init__(self, instance=None, disk_name=None, path=None):
super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False)
self.path = (path or
os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- self.snapshot_name = snapshot_name
self.preallocate = CONF.preallocate_images != 'none'
+ self.disk_info_path = os.path.join(os.path.dirname(self.path),
+ 'disk.info')
+ self.resolve_driver_format()
def create_image(self, prepare_template, base, size, *args, **kwargs):
@utils.synchronized(base, external=True, lock_path=self.lock_path)
@@ -336,25 +394,18 @@ class Qcow2(Image):
with fileutils.remove_path_on_error(self.path):
copy_qcow2_image(base, self.path, size)
- def snapshot_create(self):
- libvirt_utils.create_snapshot(self.path, self.snapshot_name)
-
def snapshot_extract(self, target, out_format):
libvirt_utils.extract_snapshot(self.path, 'qcow2',
- self.snapshot_name, target,
+ target,
out_format)
- def snapshot_delete(self):
- libvirt_utils.delete_snapshot(self.path, self.snapshot_name)
-
class Lvm(Image):
@staticmethod
def escape(filename):
return filename.replace('_', '__')
- def __init__(self, instance=None, disk_name=None, path=None,
- snapshot_name=None):
+ def __init__(self, instance=None, disk_name=None, path=None):
super(Lvm, self).__init__("block", "raw", is_block_dev=True)
if path:
@@ -377,11 +428,6 @@ class Lvm(Image):
self.sparse = CONF.libvirt_sparse_logical_volumes
self.preallocate = not self.sparse
- if snapshot_name:
- self.snapshot_name = snapshot_name
- self.snapshot_path = os.path.join('/dev', self.vg,
- self.snapshot_name)
-
def _can_fallocate(self):
return False
@@ -419,25 +465,58 @@ class Lvm(Image):
with excutils.save_and_reraise_exception():
libvirt_utils.remove_logical_volumes(path)
- def snapshot_create(self):
- size = CONF.libvirt_lvm_snapshot_size
- cmd = ('lvcreate', '-L', size, '-s', '--name', self.snapshot_name,
- self.path)
- libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
-
def snapshot_extract(self, target, out_format):
- images.convert_image(self.snapshot_path, target, out_format,
+ images.convert_image(self.path, target, out_format,
run_as_root=True)
- def snapshot_delete(self):
- # NOTE (rmk): Snapshot volumes are automatically zeroed by LVM
- cmd = ('lvremove', '-f', self.snapshot_path)
- libvirt_utils.execute(*cmd, run_as_root=True, attempts=3)
+
+class RBDVolumeProxy(object):
+ """Context manager for dealing with an existing rbd volume.
+
+ This handles connecting to rados and opening an ioctx automatically, and
+ otherwise acts like a librbd Image object.
+
+ The underlying librados client and ioctx can be accessed as the attributes
+ 'client' and 'ioctx'.
+ """
+ def __init__(self, driver, name, pool=None):
+ client, ioctx = driver._connect_to_rados(pool)
+ try:
+ self.volume = driver.rbd.Image(ioctx, str(name), snapshot=None)
+ except driver.rbd.Error:
+ LOG.exception(_("error opening rbd image %s"), name)
+ driver._disconnect_from_rados(client, ioctx)
+ raise
+ self.driver = driver
+ self.client = client
+ self.ioctx = ioctx
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type_, value, traceback):
+ try:
+ self.volume.close()
+ finally:
+ self.driver._disconnect_from_rados(self.client, self.ioctx)
+
+ def __getattr__(self, attrib):
+ return getattr(self.volume, attrib)
+
+
+def ascii_str(s):
+ """Convert a string to ascii, or return None if the input is None.
+
+ This is useful when a parameter is None by default, or a string. LibRBD
+ only accepts ascii, hence the need for conversion.
+ """
+ if s is None:
+ return s
+ return str(s)
class Rbd(Image):
- def __init__(self, instance=None, disk_name=None, path=None,
- snapshot_name=None, **kwargs):
+ def __init__(self, instance=None, disk_name=None, path=None, **kwargs):
super(Rbd, self).__init__("block", "rbd", is_block_dev=True)
if path:
try:
@@ -445,22 +524,41 @@ class Rbd(Image):
except IndexError:
raise exception.InvalidDevicePath(path=path)
else:
- self.rbd_name = '%s_%s' % (instance['name'], disk_name)
- self.snapshot_name = snapshot_name
+ self.rbd_name = '%s_%s' % (instance['uuid'], disk_name)
if not CONF.libvirt_images_rbd_pool:
raise RuntimeError(_('You should specify'
' libvirt_images_rbd_pool'
' flag to use rbd images.'))
self.pool = CONF.libvirt_images_rbd_pool
- self.ceph_conf = CONF.libvirt_images_rbd_ceph_conf
+ self.ceph_conf = ascii_str(CONF.libvirt_images_rbd_ceph_conf)
+ self.rbd_user = ascii_str(CONF.rbd_user)
self.rbd = kwargs.get('rbd', rbd)
+ self.rados = kwargs.get('rados', rados)
+
+ def _connect_to_rados(self, pool=None):
+ client = self.rados.Rados(rados_id=self.rbd_user,
+ conffile=self.ceph_conf)
+ try:
+ client.connect()
+ pool_to_open = str(pool or self.pool)
+ ioctx = client.open_ioctx(pool_to_open)
+ return client, ioctx
+ except self.rados.Error:
+ # shutdown cannot raise an exception
+ client.shutdown()
+ raise
+
+ def _disconnect_from_rados(self, client, ioctx):
+ # closing an ioctx cannot raise an exception
+ ioctx.close()
+ client.shutdown()
def _supports_layering(self):
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
def _ceph_args(self):
args = []
- args.extend(['--id', CONF.rbd_user])
+ args.extend(['--id', self.rbd_user])
args.extend(['--conf', self.ceph_conf])
return args
@@ -482,7 +580,7 @@ class Rbd(Image):
return hosts, ports
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
- extra_specs):
+ extra_specs, hypervisor_version):
"""Get `LibvirtConfigGuestDisk` filled for this image.
:disk_dev: Disk bus device name
@@ -526,6 +624,12 @@ class Rbd(Image):
return False
+ def _resize(self, volume_name, size):
+ size = int(size)
+
+ with RBDVolumeProxy(self, volume_name) as vol:
+ vol.resize(size)
+
def create_image(self, prepare_template, base, size, *args, **kwargs):
if self.rbd is None:
raise RuntimeError(_('rbd python libraries not found'))
@@ -549,16 +653,15 @@ class Rbd(Image):
args += self._ceph_args()
libvirt_utils.import_rbd_image(*args)
- def snapshot_create(self):
- pass
+ base_size = disk.get_disk_size(base)
+
+ if size and size > base_size:
+ self._resize(self.rbd_name, size)
def snapshot_extract(self, target, out_format):
snap = 'rbd:%s/%s' % (self.pool, self.rbd_name)
images.convert_image(snap, target, out_format)
- def snapshot_delete(self):
- pass
-
class Backend(object):
def __init__(self, use_cow):
@@ -589,12 +692,11 @@ class Backend(object):
backend = self.backend(image_type)
return backend(instance=instance, disk_name=disk_name)
- def snapshot(self, disk_path, snapshot_name, image_type=None):
+ def snapshot(self, disk_path, image_type=None):
"""Returns snapshot for given image
:path: path to image
- :snapshot_name: snapshot name
:image_type: type of image
"""
backend = self.backend(image_type)
- return backend(path=disk_path, snapshot_name=snapshot_name)
+ return backend(path=disk_path)
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index d7c92b778a..c2171c1b68 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -504,33 +504,11 @@ def chown(path, owner):
execute('chown', owner, path, run_as_root=True)
-def create_snapshot(disk_path, snapshot_name):
- """Create a snapshot in a disk image
+def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
+ """Extract a snapshot from a disk image.
+ Note that nobody should write to the disk image during this operation.
:param disk_path: Path to disk image
- :param snapshot_name: Name of snapshot in disk image
- """
- qemu_img_cmd = ('qemu-img', 'snapshot', '-c', snapshot_name, disk_path)
- # NOTE(vish): libvirt changes ownership of images
- execute(*qemu_img_cmd, run_as_root=True)
-
-
-def delete_snapshot(disk_path, snapshot_name):
- """Create a snapshot in a disk image
-
- :param disk_path: Path to disk image
- :param snapshot_name: Name of snapshot in disk image
- """
- qemu_img_cmd = ('qemu-img', 'snapshot', '-d', snapshot_name, disk_path)
- # NOTE(vish): libvirt changes ownership of images
- execute(*qemu_img_cmd, run_as_root=True)
-
-
-def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
- """Extract a named snapshot from a disk image
-
- :param disk_path: Path to disk image
- :param snapshot_name: Name of snapshot in disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
@@ -543,11 +521,6 @@ def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
if CONF.libvirt_snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
- # When snapshot name is omitted we do a basic convert, which
- # is used by live snapshots.
- if snapshot_name is not None:
- qemu_img_cmd += ('-s', snapshot_name)
-
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 942970366a..eb53be9e38 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -227,7 +227,8 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
- return [line.split()[0] for line in output.splitlines()]
+ # return both portals and iqns
+ return [line.split() for line in output.splitlines()]
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
@@ -253,9 +254,10 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
check_exit_code=[0, 255])[0] \
or ""
- for ip in self._get_target_portals_from_iscsiadm_output(out):
+ for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
props = iscsi_properties.copy()
props['target_portal'] = ip
+ props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
@@ -346,17 +348,42 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
if mpdev:
devices.append(mpdev)
+ # Do a discovery to find all targets.
+ # Targets for multiple paths for the same multipath device
+ # may not be the same.
+ out = self._run_iscsiadm_bare(['-m',
+ 'discovery',
+ '-t',
+ 'sendtargets',
+ '-p',
+ iscsi_properties['target_portal']],
+ check_exit_code=[0, 255])[0] \
+ or ""
+
+ ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
+
if not devices:
# disconnect if no other multipath devices
- self._disconnect_mpath(iscsi_properties)
+ self._disconnect_mpath(iscsi_properties, ips_iqns)
return
+ # Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
-
- if iscsi_properties['target_iqn'] not in other_iqns:
+ # Get all the targets for the current multipath device
+ current_iqns = [iqn for ip, iqn in ips_iqns]
+
+ in_use = False
+ for current in current_iqns:
+ if current in other_iqns:
+ in_use = True
+ break
+
+ # If no other multipath device attached has the same iqn
+ # as the current device
+ if not in_use:
# disconnect if no other multipath devices with same iqn
- self._disconnect_mpath(iscsi_properties)
+ self._disconnect_mpath(iscsi_properties, ips_iqns)
return
# else do not disconnect iscsi portals,
@@ -451,13 +478,11 @@ class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
return []
return [entry for entry in devices if entry.startswith("ip-")]
- def _disconnect_mpath(self, iscsi_properties):
- entries = self._get_iscsi_devices()
- ips = [ip.split("-")[1] for ip in entries
- if iscsi_properties['target_iqn'] in ip]
- for ip in ips:
+ def _disconnect_mpath(self, iscsi_properties, ips_iqns):
+ for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip
+ props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
@@ -526,9 +551,10 @@ class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
iser_properties['target_portal']],
check_exit_code=[0, 255])[0] or ""
- for ip in self._get_target_portals_from_iscsiadm_output(out):
+ for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
props = iser_properties.copy()
props['target_portal'] = ip
+ props['target_iqn'] = iqn
self._connect_to_iser_portal(props)
self._rescan_iscsi()
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 164eb59211..a63de1d32d 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -92,9 +92,13 @@ class PowerVMDriver(driver.ComputeDriver):
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
- LOG.debug(_('Network injection is not supported by the '
- 'PowerVM driver.'), instance)
- pass
+ msg = _("VIF plugging is not supported by the PowerVM driver.")
+ raise NotImplementedError(msg)
+
+ def unplug_vifs(self, instance, network_info):
+ """Unplug VIFs from networks."""
+ msg = _("VIF unplugging is not supported by the PowerVM driver.")
+ raise NotImplementedError(msg)
def macs_for_instance(self, instance):
return self._powervm.macs_for_instance(instance)
@@ -306,7 +310,7 @@ class PowerVMDriver(driver.ComputeDriver):
new_name = self._get_resize_name(instance['name'])
self._powervm.destroy(new_name)
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 38e546772d..a31d1f7b50 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -372,14 +372,6 @@ class VMwareESXDriver(driver.ComputeDriver):
"""inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
- def plug_vifs(self, instance, network_info):
- """Plug VIFs into networks."""
- self._vmops.plug_vifs(instance, network_info)
-
- def unplug_vifs(self, instance, network_info):
- """Unplug VIFs from networks."""
- self._vmops.unplug_vifs(instance, network_info)
-
def list_instance_uuids(self):
"""List VM instance UUIDs."""
uuids = self._vmops.list_instances()
@@ -455,11 +447,11 @@ class VMwareVCDriver(VMwareESXDriver):
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.confirm_migration(migration, instance, network_info)
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize, powering back on the instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
- _vmops.finish_revert_migration(instance, network_info,
+ _vmops.finish_revert_migration(context, instance, network_info,
block_device_info, power_on)
def finish_migration(self, context, migration, instance, disk_info,
@@ -733,16 +725,6 @@ class VMwareVCDriver(VMwareESXDriver):
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.inject_network_info(instance, network_info)
- def plug_vifs(self, instance, network_info):
- """Plug VIFs into networks."""
- _vmops = self._get_vmops_for_compute_node(instance['node'])
- _vmops.plug_vifs(instance, network_info)
-
- def unplug_vifs(self, instance, network_info):
- """Unplug VIFs from networks."""
- _vmops = self._get_vmops_for_compute_node(instance['node'])
- _vmops.unplug_vifs(instance, network_info)
-
class VMwareAPISession(object):
"""
@@ -808,16 +790,8 @@ class VMwareAPISession(object):
def __del__(self):
"""Logs-out the session."""
- # Logout to avoid un-necessary increase in session count at the
- # ESX host
- try:
- # May not have been able to connect to VC, so vim is still None
- if self.vim:
- self.vim.Logout(self.vim.get_service_content().sessionManager)
- except Exception as excep:
- # It is just cautionary on our part to do a logout in del just
- # to ensure that the session is not left active.
- LOG.debug(excep)
+ if hasattr(self, 'vim') and self.vim:
+ self.vim.Logout(self.vim.get_service_content().sessionManager)
def _is_vim_object(self, module):
"""Check if the module is a VIM Object instance."""
@@ -910,6 +884,9 @@ class VMwareAPISession(object):
self._create_session()
return self.vim
+ def _stop_loop(self, loop):
+ loop.stop()
+
def _wait_for_task(self, instance_uuid, task_ref):
"""
Return a Deferred that will give the result of the given task.
@@ -920,8 +897,12 @@ class VMwareAPISession(object):
instance_uuid,
task_ref, done)
loop.start(CONF.vmware.task_poll_interval)
- ret_val = done.wait()
- loop.stop()
+ try:
+ ret_val = done.wait()
+ except Exception:
+ raise
+ finally:
+ self._stop_loop(loop)
return ret_val
def _poll_task(self, instance_uuid, task_ref, done):
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index dc77f64cba..19834a6cb5 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -694,8 +694,11 @@ class Datacenter(ManagedObject):
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
- datastore = DataObject()
- datastore.ManagedObjectReference = [ds_ref]
+ if ds_ref:
+ datastore = DataObject()
+ datastore.ManagedObjectReference = [ds_ref]
+ else:
+ datastore = None
self.set("datastore", datastore)
diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py
index fb7b602db0..4420256107 100644..100755
--- a/nova/virt/vmwareapi/host.py
+++ b/nova/virt/vmwareapi/host.py
@@ -21,6 +21,7 @@ Management class for host-related functions (start, reboot, etc).
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
+from nova import utils
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -133,7 +134,8 @@ class HostState(object):
data["host_memory_free"] = data["host_memory_total"] - \
summary.quickStats.overallMemoryUsage
data["hypervisor_type"] = summary.config.product.name
- data["hypervisor_version"] = summary.config.product.version
+ data["hypervisor_version"] = utils.convert_version_to_int(
+ str(summary.config.product.version))
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
@@ -186,7 +188,8 @@ class VCState(object):
data["host_memory_total"] = stats['mem']['total']
data["host_memory_free"] = stats['mem']['free']
data["hypervisor_type"] = about_info.name
- data["hypervisor_version"] = about_info.version
+ data["hypervisor_version"] = utils.convert_version_to_int(
+ str(about_info.version))
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 5928eb45f9..4b64e66f21 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -93,8 +93,10 @@ class Vim:
self.url = Vim.get_soap_url(protocol, host)
self.client = suds.client.Client(self.wsdl_url, location=self.url,
plugins=[VIMMessagePlugin()])
- self._service_content = self.RetrieveServiceContent(
- "ServiceInstance")
+ self._service_content = self.retrieve_service_content()
+
+ def retrieve_service_content(self):
+ return self.RetrieveServiceContent("ServiceInstance")
@staticmethod
def get_wsdl_url(protocol, host_name):
@@ -162,7 +164,9 @@ class Vim:
return response
# Catch the VimFaultException that is raised by the fault
# check of the SOAP response
- except error_util.VimFaultException as excep:
+ except error_util.VimFaultException:
+ raise
+ except suds.MethodNotFound:
raise
except suds.WebFault as excep:
doc = excep.document
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index d833ec8b62..4edd3201c6 100644..100755
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -23,13 +23,20 @@ The VMware API VM utility module to build SOAP object specs.
import collections
import copy
+from oslo.config import cfg
+
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
+from nova import utils
from nova.virt.vmwareapi import vim_util
+CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+# the config key which stores the VNC port
+VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
+
def build_datastore_path(datastore_name, path):
"""Build the datastore compliant path."""
@@ -50,11 +57,11 @@ def split_datastore_path(datastore_path):
return datastore_url, path.strip()
-def get_vm_create_spec(client_factory, instance, data_store_name,
+def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
- config_spec.name = instance['uuid']
+ config_spec.name = name
config_spec.guestId = os_type
# Allow nested ESX instances to host 64 bit VMs.
@@ -293,12 +300,12 @@ def get_vm_extra_config_spec(client_factory, extra_opts):
return config_spec
-def get_vmdk_path_and_adapter_type(hardware_devices):
+def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
- vmdk_controler_key = None
+ vmdk_controller_key = None
disk_type = None
unit_number = 0
@@ -307,8 +314,12 @@ def get_vmdk_path_and_adapter_type(hardware_devices):
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
- vmdk_file_path = device.backing.fileName
- vmdk_controler_key = device.controllerKey
+ if uuid:
+ if uuid in device.backing.fileName:
+ vmdk_file_path = device.backing.fileName
+ else:
+ vmdk_file_path = device.backing.fileName
+ vmdk_controller_key = device.controllerKey
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
@@ -327,9 +338,9 @@ def get_vmdk_path_and_adapter_type(hardware_devices):
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = "lsiLogicsas"
- adapter_type = adapter_type_dict.get(vmdk_controler_key, "")
+ adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
- return (vmdk_file_path, vmdk_controler_key, adapter_type,
+ return (vmdk_file_path, vmdk_controller_key, adapter_type,
disk_type, unit_number)
@@ -596,6 +607,45 @@ def get_vnc_config_spec(client_factory, port, password):
return virtual_machine_config_spec
+@utils.synchronized('vmware.get_vnc_port')
+def get_vnc_port(session):
+ """Return VNC port for an VM or None if there is no available port."""
+ min_port = CONF.vmware.vnc_port
+ port_total = CONF.vmware.vnc_port_total
+ allocated_ports = _get_allocated_vnc_ports(session)
+ max_port = min_port + port_total
+ for port in range(min_port, max_port):
+ if port not in allocated_ports:
+ return port
+ raise exception.ConsolePortRangeExhausted(min_port=min_port,
+ max_port=max_port)
+
+
+def _get_allocated_vnc_ports(session):
+ """Return an integer set of all allocated VNC ports."""
+ # TODO(rgerganov): bug #1256944
+ # The VNC port should be unique per host, not per vCenter
+ vnc_ports = set()
+ result = session._call_method(vim_util, "get_objects",
+ "VirtualMachine", [VNC_CONFIG_KEY])
+ while result:
+ for obj in result.objects:
+ if not hasattr(obj, 'propSet'):
+ continue
+ dynamic_prop = obj.propSet[0]
+ option_value = dynamic_prop.val
+ vnc_port = option_value.value
+ vnc_ports.add(int(vnc_port))
+ token = _get_token(result)
+ if token:
+ result = session._call_method(vim_util,
+ "continue_to_get_objects",
+ token)
+ else:
+ break
+ return vnc_ports
+
+
def search_datastore_spec(client_factory, file_name):
"""Builds the datastore search spec."""
search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec')
@@ -993,13 +1043,14 @@ def get_vmdk_backed_disk_device(hardware_devices, uuid):
return device
-def get_vmdk_volume_disk(hardware_devices):
+def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
- return device
+ if not path or path == device.backing.fileName:
+ return device
def get_res_pool_ref(session, cluster, node_mo_id):
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index d799f0df26..16a5eaf60b 100644..100755
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -35,6 +35,7 @@ from nova.api.metadata import base as instance_metadata
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
+from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
@@ -171,7 +172,8 @@ class VMwareVMOps(object):
LOG.debug(_("Deleted the datastore file"), instance=instance)
def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info=None):
+ admin_password, network_info, block_device_info=None,
+ instance_name=None):
"""
Creates a VM instance.
@@ -282,9 +284,13 @@ class VMwareVMOps(object):
vif_infos = _get_vif_infos()
+ # Get the instance name. In some cases this may differ from the 'uuid',
+ # for example when the spawn of a rescue instance takes place.
+ if not instance_name:
+ instance_name = instance['uuid']
# Get the create vm config spec
config_spec = vm_util.get_vm_create_spec(
- client_factory, instance,
+ client_factory, instance, instance_name,
data_store_name, vif_infos, os_type)
def _execute_create_vm():
@@ -300,7 +306,7 @@ class VMwareVMOps(object):
LOG.debug(_("Created VM on the ESX host"), instance=instance)
_execute_create_vm()
- vm_ref = vm_util.get_vm_ref(self._session, instance)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
@@ -309,8 +315,8 @@ class VMwareVMOps(object):
# Set the vnc configuration of the instance, vnc port starts from 5900
if CONF.vnc_enabled:
- vnc_port = self._get_vnc_port(vm_ref)
vnc_pass = CONF.vmware.vnc_password or ''
+ vnc_port = vm_util.get_vnc_port(self._session)
self._set_vnc_config(client_factory, instance, vnc_port, vnc_pass)
def _create_virtual_disk():
@@ -693,7 +699,7 @@ class VMwareVMOps(object):
"VirtualMachine", "config.hardware.device")
(vmdk_file_path_before_snapshot, controller_key, adapter_type,
disk_type, unit_number) = vm_util.get_vmdk_path_and_adapter_type(
- hardware_devices)
+ hardware_devices, uuid=instance['uuid'])
datastore_name = vm_util.split_datastore_path(
vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
@@ -844,8 +850,6 @@ class VMwareVMOps(object):
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
- self.plug_vifs(instance, network_info)
-
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -898,21 +902,18 @@ class VMwareVMOps(object):
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:delete, got this exception"
" while destroying the VM: %s") % str(excep))
-
- if network_info:
- self.unplug_vifs(instance, network_info)
except Exception as exc:
LOG.exception(exc, instance=instance)
- def destroy(self, instance, network_info, destroy_disks=True):
- """
- Destroy a VM instance. Steps followed are:
- 1. Power off the VM, if it is in poweredOn state.
- 2. Un-register a VM.
- 3. Delete the contents of the folder holding the VM related data.
- """
+ def _destroy_instance(self, instance, network_info, destroy_disks=True,
+ instance_name=None):
+ # Destroy a VM instance
+ # Get the instance name. In some cases this may differ from the 'uuid',
+ # for example when the spawn of a rescue instance takes place.
+ if not instance_name:
+ instance_name = instance['uuid']
try:
- vm_ref = vm_util.get_vm_ref(self._session, instance)
+ vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
@@ -943,12 +944,9 @@ class VMwareVMOps(object):
"UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception as excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
- " while un-registering the VM: %s") % str(excep))
-
- if network_info:
- self.unplug_vifs(instance, network_info)
-
+ LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
+ "exception while un-registering the VM: %s"),
+ excep)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks:
@@ -976,13 +974,37 @@ class VMwareVMOps(object):
{'datastore_name': datastore_name},
instance=instance)
except Exception as excep:
- LOG.warn(_("In vmwareapi:vmops:destroy, "
- "got this exception while deleting"
- " the VM contents from the disk: %s")
- % str(excep))
+ LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
+ "got this exception while deleting "
+ "the VM contents from the disk: %s"),
+ excep)
except Exception as exc:
LOG.exception(exc, instance=instance)
+ def destroy(self, instance, network_info, destroy_disks=True):
+ """Destroy a VM instance.
+
+ Steps followed for each VM are:
+ 1. Power off, if it is in poweredOn state.
+ 2. Un-register.
+ 3. Delete the contents of the folder holding the VM related data.
+ """
+ # If there is a rescue VM then we need to destroy that one too.
+ LOG.debug(_("Destroying instance"), instance=instance)
+ if instance['vm_state'] == vm_states.RESCUED:
+ LOG.debug(_("Rescue VM configured"), instance=instance)
+ try:
+ self.unrescue(instance, power_on=False)
+ LOG.debug(_("Rescue VM destroyed"), instance=instance)
+ except Exception:
+ rescue_name = instance['uuid'] + self._rescue_suffix
+ self._destroy_instance(instance, network_info,
+ destroy_disks=destroy_disks,
+ instance_name=rescue_name)
+ self._destroy_instance(instance, network_info,
+ destroy_disks=destroy_disks)
+ LOG.debug(_("Instance destroyed"), instance=instance)
+
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
@@ -1041,39 +1063,70 @@ class VMwareVMOps(object):
self.power_off(instance)
r_instance = copy.deepcopy(instance)
r_instance['name'] = r_instance['name'] + self._rescue_suffix
- r_instance['uuid'] = r_instance['uuid'] + self._rescue_suffix
+ instance_name = r_instance['uuid'] + self._rescue_suffix
self.spawn(context, r_instance, image_meta,
- None, None, network_info)
+ None, None, network_info,
+ instance_name=instance_name)
# Attach vmdk to the rescue VM
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
- vmdk_path, controller_key, adapter_type, disk_type, unit_number \
- = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)
+ (vmdk_path, controller_key, adapter_type, disk_type,
+ unit_number) = vm_util.get_vmdk_path_and_adapter_type(
+ hardware_devices, uuid=instance['uuid'])
+
# Figure out the correct unit number
unit_number = unit_number + 1
- rescue_vm_ref = vm_util.get_vm_ref_from_uuid(self._session,
- r_instance['uuid'])
- if rescue_vm_ref is None:
- rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
- r_instance['name'])
+ rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance_name)
self._volumeops.attach_disk_to_vm(
rescue_vm_ref, r_instance,
adapter_type, disk_type, vmdk_path,
controller_key=controller_key,
unit_number=unit_number)
- def unrescue(self, instance):
+ def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
+ # Get the original vmdk_path
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ (vmdk_path, controller_key, adapter_type, disk_type,
+ unit_number) = vm_util.get_vmdk_path_and_adapter_type(
+ hardware_devices, uuid=instance['uuid'])
r_instance = copy.deepcopy(instance)
+ instance_name = r_instance['uuid'] + self._rescue_suffix
r_instance['name'] = r_instance['name'] + self._rescue_suffix
- r_instance['uuid'] = r_instance['uuid'] + self._rescue_suffix
- self.destroy(r_instance, None)
- self._power_on(instance)
+ # detach the original instance disk from the rescue disk
+ vm_rescue_ref = vm_util.get_vm_ref_from_name(self._session,
+ instance_name)
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_rescue_ref,
+ "VirtualMachine", "config.hardware.device")
+ device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path)
+ self._power_off_vm_ref(vm_rescue_ref)
+ self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device)
+ self._destroy_instance(r_instance, None, instance_name=instance_name)
+ if power_on:
+ self._power_on(instance)
+
+ def _power_off_vm_ref(self, vm_ref):
+ """Power off the specifed vm.
+
+ :param vm_ref: a reference object to the VM.
+ """
+ poweroff_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOffVM_Task", vm_ref)
+ self._session._wait_for_task(None, poweroff_task)
def power_off(self, instance):
- """Power off the specified instance."""
+ """Power off the specified instance.
+
+ :param instance: nova.objects.instance.Instance
+ """
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
@@ -1082,10 +1135,7 @@ class VMwareVMOps(object):
# Only PoweredOn VMs can be powered off.
if pwr_state == "poweredOn":
LOG.debug(_("Powering off the VM"), instance=instance)
- poweroff_task = self._session._call_method(
- self._session._get_vim(),
- "PowerOffVM_Task", vm_ref)
- self._session._wait_for_task(instance['uuid'], poweroff_task)
+ self._power_off_vm_ref(vm_ref)
LOG.debug(_("Powered off the VM"), instance=instance)
# Raise Exception if VM is suspended
elif pwr_state == "suspended":
@@ -1219,10 +1269,7 @@ class VMwareVMOps(object):
LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
"exception while destroying the VM: %s") % str(excep))
- if network_info:
- self.unplug_vifs(instance, network_info)
-
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
# The original vm was suffixed with '-orig'; find it using
@@ -1351,9 +1398,17 @@ class VMwareVMOps(object):
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
+ opt_value = self._session._call_method(vim_util,
+ 'get_dynamic_property',
+ vm_ref, 'VirtualMachine',
+ vm_util.VNC_CONFIG_KEY)
+ if opt_value:
+ port = int(opt_value.value)
+ else:
+ raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'host': CONF.vmware.host_ip,
- 'port': self._get_vnc_port(vm_ref),
+ 'port': port,
'internal_access_path': None}
def get_vnc_console_vcenter(self, instance):
@@ -1377,14 +1432,6 @@ class VMwareVMOps(object):
return vnc_console
@staticmethod
- def _get_vnc_port(vm_ref):
- """Return VNC port for an VM."""
- vm_id = int(vm_ref.value.replace('vm-', ''))
- port = CONF.vmware.vnc_port + vm_id % CONF.vmware.vnc_port_total
-
- return port
-
- @staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
@@ -1586,14 +1633,6 @@ class VMwareVMOps(object):
client_factory = self._session._get_vim().client.factory
self._set_machine_id(client_factory, instance, network_info)
- def plug_vifs(self, instance, network_info):
- """Plug VIFs into networks."""
- pass
-
- def unplug_vifs(self, instance, network_info):
- """Unplug VIFs from networks."""
- pass
-
class VMwareVCVMOps(VMwareVMOps):
"""Management class for VM-related tasks.
@@ -1619,19 +1658,19 @@ class VMwareVCVMOps(VMwareVMOps):
while dcs:
token = vm_util._get_token(dcs)
for dco in dcs.objects:
- name = None
- vmFolder = None
dc_ref = dco.obj
ds_refs = []
- for p in dco.propSet:
- if p.name == 'name':
- name = p.val
- if p.name == 'datastore':
- datastore_refs = p.val.ManagedObjectReference
- for ds in datastore_refs:
- ds_refs.append(ds.value)
- if p.name == 'vmFolder':
- vmFolder = p.val
+ prop_dict = vm_util.propset_dict(dco.propSet)
+ name = prop_dict.get('name')
+ vmFolder = prop_dict.get('vmFolder')
+ datastore_refs = prop_dict.get('datastore')
+ if datastore_refs:
+ datastore_refs = datastore_refs.ManagedObjectReference
+ for ds in datastore_refs:
+ ds_refs.append(ds.value)
+ else:
+ LOG.debug("Datacenter %s doesn't have any datastore "
+ "associated with it, ignoring it", name)
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index d4bcf3832d..473f10d925 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -220,11 +220,12 @@ class XenAPIDriver(driver.ComputeDriver):
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
- def finish_revert_migration(self, instance, network_info,
+ def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize."""
# NOTE(vish): Xen currently does not use network info.
- self._vmops.finish_revert_migration(instance, block_device_info,
+ self._vmops.finish_revert_migration(context, instance,
+ block_device_info,
power_on)
def finish_migration(self, context, migration, instance, disk_info,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 611f3f4e21..75fb6f5479 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -239,7 +239,8 @@ class VMOps(object):
mount_device,
hotplug=False)
- def finish_revert_migration(self, instance, block_device_info=None,
+ def finish_revert_migration(self, context, instance,
+ block_device_info=None,
power_on=True):
self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info,
power_on)
diff --git a/requirements.txt b/requirements.txt
index e6c8bbc5e6..65680878c8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,12 +8,12 @@ eventlet>=0.13.0
Jinja2
kombu>=2.4.8
lxml>=2.3
-Routes>=1.12.3
+Routes>=1.12.3,!=2.0
WebOb>=1.2.3,<1.3
greenlet>=0.3.2
PasteDeploy>=1.5.0
Paste
-sqlalchemy-migrate>=0.7.2
+sqlalchemy-migrate>=0.7.2,!=0.9.2
netaddr
suds>=0.4
paramiko>=1.8.0
@@ -22,7 +22,7 @@ Babel>=1.3
iso8601>=0.1.8
jsonschema>=1.3.0,!=1.4.0
python-cinderclient>=1.0.6
-python-neutronclient>=2.3.0,<3
+python-neutronclient>=2.3.4,<3
python-glanceclient>=0.9.0
python-keystoneclient>=0.3.2
six>=1.4.1
diff --git a/setup.cfg b/setup.cfg
index 4273ee37d3..59d6deff9a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = nova
-version = 2013.2.3
+version = 2013.2.4
summary = Cloud computing fabric controller
description-file =
README.rst
diff --git a/test-requirements.txt b/test-requirements.txt
index 2a05b889de..5477e34c9e 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -9,7 +9,7 @@ MySQL-python
psycopg2
pylint==0.25.2
python-subunit
-sphinx>=1.1.2,<1.2
+sphinx>=1.1.2,<1.1.999
oslo.sphinx
testrepository>=0.0.17
testtools>=0.9.32
diff --git a/tox.ini b/tox.ini
index 9d2df7deeb..8062febe6a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,10 +7,13 @@ skipsdist = True
sitepackages = True
usedevelop = True
install_command = pip install -U {opts} {packages}
+# Note the hash seed is set to 0 until nova can be tested with a
+# random hash seed successfully.
setenv = VIRTUAL_ENV={envdir}
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
+ PYTHONHASHSEED=0
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
@@ -28,13 +31,11 @@ commands =
{toxinidir}/tools/config/check_uptodate.sh
[testenv:pylint]
-setenv = VIRTUAL_ENV={envdir}
commands = bash tools/lintstack.sh
[testenv:cover]
# Also do not run test_coverage_ext tests while gathering coverage as those
# tests conflict with coverage.
-setenv = VIRTUAL_ENV={envdir}
commands =
python tools/patch_tox_venv.py
python setup.py testr --coverage \